aboutsummaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-08 17:12:57 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-08 17:12:57 +0000
commitc46e6a5940c50058e00c0c5f9123fd82e338d29a (patch)
tree89a719d723035c54a190b1f81d329834f1f93336 /lib/Target
parent148779df305667b6942fee7e758fdf81a6498f38 (diff)
downloadsrc-c46e6a5940c50058e00c0c5f9123fd82e338d29a.tar.gz
src-c46e6a5940c50058e00c0c5f9123fd82e338d29a.zip
Notes
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/AArch64/AArch64.h2
-rw-r--r--lib/Target/AArch64/AArch64.td1
-rw-r--r--lib/Target/AArch64/AArch64AddressTypePromotion.cpp493
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td56
-rw-r--r--lib/Target/AArch64/AArch64RegisterBankInfo.cpp60
-rw-r--r--lib/Target/AArch64/AArch64RegisterBankInfo.h7
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp9
-rw-r--r--lib/Target/AArch64/CMakeLists.txt1
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp3
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.cpp2
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp91
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterBankInfo.h5
-rw-r--r--lib/Target/AMDGPU/SIFrameLowering.cpp3
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp14
-rw-r--r--lib/Target/AMDGPU/SIInsertWaitcnts.cpp10
-rw-r--r--lib/Target/AMDGPU/SIMachineFunctionInfo.cpp8
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp15
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp53
-rw-r--r--lib/Target/ARM/ARMISelLowering.h6
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td215
-rw-r--r--lib/Target/ARM/ARMInstrNEON.td26
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td233
-rw-r--r--lib/Target/ARM/ARMRegisterBankInfo.cpp14
-rw-r--r--lib/Target/ARM/ARMRegisterBankInfo.h3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp1
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp2
-rw-r--r--lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonDepIICHVX.td1143
-rw-r--r--lib/Target/Hexagon/HexagonDepIICScalar.td2504
-rw-r--r--lib/Target/Hexagon/HexagonDepITypes.h56
-rw-r--r--lib/Target/Hexagon/HexagonDepITypes.td56
-rw-r--r--lib/Target/Hexagon/HexagonDepInstrFormats.td5327
-rw-r--r--lib/Target/Hexagon/HexagonDepInstrInfo.td6429
-rw-r--r--lib/Target/Hexagon/HexagonDepTimingClasses.h132
-rw-r--r--lib/Target/Hexagon/HexagonIICHVX.td100
-rw-r--r--lib/Target/Hexagon/HexagonIICScalar.td164
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormats.td164
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormatsV4.td63
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormatsV60.td180
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp152
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h15
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonPatterns.td8
-rw-r--r--lib/Target/Hexagon/HexagonPseudo.td272
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.td16
-rw-r--r--lib/Target/Hexagon/HexagonSchedule.td51
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV4.td213
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV55.td207
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV60.td253
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV62.td112
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.cpp302
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.h10
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp110
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h2
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp8
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp9
-rw-r--r--lib/Target/Hexagon/RDFLiveness.cpp33
-rw-r--r--lib/Target/Hexagon/RDFRegisters.cpp15
-rw-r--r--lib/Target/Hexagon/RDFRegisters.h1
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp35
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp9323
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.td6329
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp3
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.cpp8
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp94
-rw-r--r--lib/Target/X86/AsmParser/X86Operand.h14
-rw-r--r--lib/Target/X86/X86AsmPrinter.h1
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp273
-rw-r--r--lib/Target/X86/X86InstrAVX512.td39
-rw-r--r--lib/Target/X86/X86InstrInfo.td6
-rw-r--r--lib/Target/X86/X86InstrSSE.td9
-rw-r--r--lib/Target/X86/X86InstructionSelector.cpp30
-rw-r--r--lib/Target/X86/X86LegalizerInfo.cpp102
-rw-r--r--lib/Target/X86/X86LegalizerInfo.h5
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp80
-rw-r--r--lib/Target/X86/X86OptimizeLEAs.cpp8
-rw-r--r--lib/Target/X86/X86RegisterBankInfo.cpp23
-rw-r--r--lib/Target/X86/X86RegisterBankInfo.h7
-rw-r--r--lib/Target/X86/X86Subtarget.cpp10
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp32
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp2
85 files changed, 18857 insertions, 17034 deletions
diff --git a/lib/Target/AArch64/AArch64.h b/lib/Target/AArch64/AArch64.h
index b44b13e36e15..3e0e3978b90b 100644
--- a/lib/Target/AArch64/AArch64.h
+++ b/lib/Target/AArch64/AArch64.h
@@ -41,7 +41,6 @@ FunctionPass *createAArch64LoadStoreOptimizationPass();
FunctionPass *createAArch64VectorByElementOptPass();
ModulePass *createAArch64PromoteConstantPass();
FunctionPass *createAArch64ConditionOptimizerPass();
-FunctionPass *createAArch64AddressTypePromotionPass();
FunctionPass *createAArch64A57FPLoadBalancing();
FunctionPass *createAArch64A53Fix835769();
@@ -54,7 +53,6 @@ createAArch64InstructionSelector(const AArch64TargetMachine &,
void initializeAArch64A53Fix835769Pass(PassRegistry&);
void initializeAArch64A57FPLoadBalancingPass(PassRegistry&);
-void initializeAArch64AddressTypePromotionPass(PassRegistry&);
void initializeAArch64AdvSIMDScalarPass(PassRegistry&);
void initializeAArch64CollectLOHPass(PassRegistry&);
void initializeAArch64ConditionalComparesPass(PassRegistry&);
diff --git a/lib/Target/AArch64/AArch64.td b/lib/Target/AArch64/AArch64.td
index 519ca2894683..73f2b6a25f66 100644
--- a/lib/Target/AArch64/AArch64.td
+++ b/lib/Target/AArch64/AArch64.td
@@ -358,7 +358,6 @@ def ProcThunderXT83 : SubtargetFeature<"thunderxt83", "ARMProcFamily",
FeatureNEON]>;
def : ProcessorModel<"generic", NoSchedModel, [
- FeatureCRC,
FeatureFPARMv8,
FeatureNEON,
FeaturePerfMon,
diff --git a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
deleted file mode 100644
index e1b8ee6d03c3..000000000000
--- a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
+++ /dev/null
@@ -1,493 +0,0 @@
-//===-- AArch64AddressTypePromotion.cpp --- Promote type for addr accesses -==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass tries to promote the computations use to obtained a sign extended
-// value used into memory accesses.
-// E.g.
-// a = add nsw i32 b, 3
-// d = sext i32 a to i64
-// e = getelementptr ..., i64 d
-//
-// =>
-// f = sext i32 b to i64
-// a = add nsw i64 f, 3
-// e = getelementptr ..., i64 a
-//
-// This is legal to do if the computations are marked with either nsw or nuw
-// markers. Moreover, the current heuristic is simple: it does not create new
-// sext operations, i.e., it gives up when a sext would have forked (e.g., if a
-// = add i32 b, c, two sexts are required to promote the computation).
-//
-// FIXME: This pass may be useful for other targets too.
-// ===---------------------------------------------------------------------===//
-
-#include "AArch64.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/InstrTypes.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Operator.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Use.h"
-#include "llvm/IR/User.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cassert>
-
-using namespace llvm;
-
-#define DEBUG_TYPE "aarch64-type-promotion"
-
-static cl::opt<bool>
-EnableMerge("aarch64-type-promotion-merge", cl::Hidden,
- cl::desc("Enable merging of redundant sexts when one is dominating"
- " the other."),
- cl::init(true));
-
-#define AARCH64_TYPE_PROMO_NAME "AArch64 Address Type Promotion"
-
-//===----------------------------------------------------------------------===//
-// AArch64AddressTypePromotion
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AArch64AddressTypePromotion : public FunctionPass {
-public:
- static char ID;
-
- AArch64AddressTypePromotion() : FunctionPass(ID) {
- initializeAArch64AddressTypePromotionPass(*PassRegistry::getPassRegistry());
- }
-
- StringRef getPassName() const override { return AARCH64_TYPE_PROMO_NAME; }
-
- /// Iterate over the functions and promote the computation of interesting
- // sext instructions.
- bool runOnFunction(Function &F) override;
-
-private:
- /// The current function.
- Function *Func = nullptr;
-
- /// Filter out all sexts that does not have this type.
- /// Currently initialized with Int64Ty.
- Type *ConsideredSExtType = nullptr;
-
- // This transformation requires dominator info.
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- AU.addRequired<DominatorTreeWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- FunctionPass::getAnalysisUsage(AU);
- }
-
- typedef SmallPtrSet<Instruction *, 32> SetOfInstructions;
- typedef SmallVector<Instruction *, 16> Instructions;
- typedef DenseMap<Value *, Instructions> ValueToInsts;
-
- /// Check if it is profitable to move a sext through this instruction.
- /// Currently, we consider it is profitable if:
- /// - Inst is used only once (no need to insert truncate).
- /// - Inst has only one operand that will require a sext operation (we do
- /// do not create new sext operation).
- bool shouldGetThrough(const Instruction *Inst);
-
- /// Check if it is possible and legal to move a sext through this
- /// instruction.
- /// Current heuristic considers that we can get through:
- /// - Arithmetic operation marked with the nsw or nuw flag.
- /// - Other sext operation.
- /// - Truncate operation if it was just dropping sign extended bits.
- bool canGetThrough(const Instruction *Inst);
-
- /// Move sext operations through safe to sext instructions.
- bool propagateSignExtension(Instructions &SExtInsts);
-
- /// Is this sext should be considered for code motion.
- /// We look for sext with ConsideredSExtType and uses in at least one
- // GetElementPtrInst.
- bool shouldConsiderSExt(const Instruction *SExt) const;
-
- /// Collect all interesting sext operations, i.e., the ones with the right
- /// type and used in memory accesses.
- /// More precisely, a sext instruction is considered as interesting if it
- /// is used in a "complex" getelementptr or it exits at least another
- /// sext instruction that sign extended the same initial value.
- /// A getelementptr is considered as "complex" if it has more than 2
- // operands.
- void analyzeSExtension(Instructions &SExtInsts);
-
- /// Merge redundant sign extension operations in common dominator.
- void mergeSExts(ValueToInsts &ValToSExtendedUses,
- SetOfInstructions &ToRemove);
-};
-
-} // end anonymous namespace
-
-char AArch64AddressTypePromotion::ID = 0;
-
-INITIALIZE_PASS_BEGIN(AArch64AddressTypePromotion, "aarch64-type-promotion",
- AARCH64_TYPE_PROMO_NAME, false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_END(AArch64AddressTypePromotion, "aarch64-type-promotion",
- AARCH64_TYPE_PROMO_NAME, false, false)
-
-FunctionPass *llvm::createAArch64AddressTypePromotionPass() {
- return new AArch64AddressTypePromotion();
-}
-
-bool AArch64AddressTypePromotion::canGetThrough(const Instruction *Inst) {
- if (isa<SExtInst>(Inst))
- return true;
-
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
- if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
- (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap()))
- return true;
-
- // sext(trunc(sext)) --> sext
- if (isa<TruncInst>(Inst) && isa<SExtInst>(Inst->getOperand(0))) {
- const Instruction *Opnd = cast<Instruction>(Inst->getOperand(0));
- // Check that the truncate just drop sign extended bits.
- if (Inst->getType()->getIntegerBitWidth() >=
- Opnd->getOperand(0)->getType()->getIntegerBitWidth() &&
- Inst->getOperand(0)->getType()->getIntegerBitWidth() <=
- ConsideredSExtType->getIntegerBitWidth())
- return true;
- }
-
- return false;
-}
-
-bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
- // If the type of the sext is the same as the considered one, this sext
- // will become useless.
- // Otherwise, we will have to do something to preserve the original value,
- // unless it is used once.
- if (isa<SExtInst>(Inst) &&
- (Inst->getType() == ConsideredSExtType || Inst->hasOneUse()))
- return true;
-
- // If the Inst is used more that once, we may need to insert truncate
- // operations and we don't do that at the moment.
- if (!Inst->hasOneUse())
- return false;
-
- // This truncate is used only once, thus if we can get thourgh, it will become
- // useless.
- if (isa<TruncInst>(Inst))
- return true;
-
- // If both operands are not constant, a new sext will be created here.
- // Current heuristic is: each step should be profitable.
- // Therefore we don't allow to increase the number of sext even if it may
- // be profitable later on.
- if (isa<BinaryOperator>(Inst) && isa<ConstantInt>(Inst->getOperand(1)))
- return true;
-
- return false;
-}
-
-static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
- return !(isa<SelectInst>(Inst) && OpIdx == 0);
-}
-
-bool
-AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
- if (SExt->getType() != ConsideredSExtType)
- return false;
-
- for (const User *U : SExt->users()) {
- if (isa<GetElementPtrInst>(U))
- return true;
- }
-
- return false;
-}
-
-// Input:
-// - SExtInsts contains all the sext instructions that are used directly in
-// GetElementPtrInst, i.e., access to memory.
-// Algorithm:
-// - For each sext operation in SExtInsts:
-// Let var be the operand of sext.
-// while it is profitable (see shouldGetThrough), legal, and safe
-// (see canGetThrough) to move sext through var's definition:
-// * promote the type of var's definition.
-// * fold var into sext uses.
-// * move sext above var's definition.
-// * update sext operand to use the operand of var that should be sign
-// extended (by construction there is only one).
-//
-// E.g.,
-// a = ... i32 c, 3
-// b = sext i32 a to i64 <- is it legal/safe/profitable to get through 'a'
-// ...
-// = b
-// => Yes, update the code
-// b = sext i32 c to i64
-// a = ... i64 b, 3
-// ...
-// = a
-// Iterate on 'c'.
-bool
-AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
- DEBUG(dbgs() << "*** Propagate Sign Extension ***\n");
-
- bool LocalChange = false;
- SetOfInstructions ToRemove;
- ValueToInsts ValToSExtendedUses;
- while (!SExtInsts.empty()) {
- // Get through simple chain.
- Instruction *SExt = SExtInsts.pop_back_val();
-
- DEBUG(dbgs() << "Consider:\n" << *SExt << '\n');
-
- // If this SExt has already been merged continue.
- if (SExt->use_empty() && ToRemove.count(SExt)) {
- DEBUG(dbgs() << "No uses => marked as delete\n");
- continue;
- }
-
- // Now try to get through the chain of definitions.
- while (auto *Inst = dyn_cast<Instruction>(SExt->getOperand(0))) {
- DEBUG(dbgs() << "Try to get through:\n" << *Inst << '\n');
- if (!canGetThrough(Inst) || !shouldGetThrough(Inst)) {
- // We cannot get through something that is not an Instruction
- // or not safe to SExt.
- DEBUG(dbgs() << "Cannot get through\n");
- break;
- }
-
- LocalChange = true;
- // If this is a sign extend, it becomes useless.
- if (isa<SExtInst>(Inst) || isa<TruncInst>(Inst)) {
- DEBUG(dbgs() << "SExt or trunc, mark it as to remove\n");
- // We cannot use replaceAllUsesWith here because we may trigger some
- // assertion on the type as all involved sext operation may have not
- // been moved yet.
- while (!Inst->use_empty()) {
- Use &U = *Inst->use_begin();
- Instruction *User = dyn_cast<Instruction>(U.getUser());
- assert(User && "User of sext is not an Instruction!");
- User->setOperand(U.getOperandNo(), SExt);
- }
- ToRemove.insert(Inst);
- SExt->setOperand(0, Inst->getOperand(0));
- SExt->moveBefore(Inst);
- continue;
- }
-
- // Get through the Instruction:
- // 1. Update its type.
- // 2. Replace the uses of SExt by Inst.
- // 3. Sign extend each operand that needs to be sign extended.
-
- // Step #1.
- Inst->mutateType(SExt->getType());
- // Step #2.
- SExt->replaceAllUsesWith(Inst);
- // Step #3.
- Instruction *SExtForOpnd = SExt;
-
- DEBUG(dbgs() << "Propagate SExt to operands\n");
- for (int OpIdx = 0, EndOpIdx = Inst->getNumOperands(); OpIdx != EndOpIdx;
- ++OpIdx) {
- DEBUG(dbgs() << "Operand:\n" << *(Inst->getOperand(OpIdx)) << '\n');
- if (Inst->getOperand(OpIdx)->getType() == SExt->getType() ||
- !shouldSExtOperand(Inst, OpIdx)) {
- DEBUG(dbgs() << "No need to propagate\n");
- continue;
- }
- // Check if we can statically sign extend the operand.
- Value *Opnd = Inst->getOperand(OpIdx);
- if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
- DEBUG(dbgs() << "Statically sign extend\n");
- Inst->setOperand(OpIdx, ConstantInt::getSigned(SExt->getType(),
- Cst->getSExtValue()));
- continue;
- }
- // UndefValue are typed, so we have to statically sign extend them.
- if (isa<UndefValue>(Opnd)) {
- DEBUG(dbgs() << "Statically sign extend\n");
- Inst->setOperand(OpIdx, UndefValue::get(SExt->getType()));
- continue;
- }
-
- // Otherwise we have to explicity sign extend it.
- assert(SExtForOpnd &&
- "Only one operand should have been sign extended");
-
- SExtForOpnd->setOperand(0, Opnd);
-
- DEBUG(dbgs() << "Move before:\n" << *Inst << "\nSign extend\n");
- // Move the sign extension before the insertion point.
- SExtForOpnd->moveBefore(Inst);
- Inst->setOperand(OpIdx, SExtForOpnd);
- // If more sext are required, new instructions will have to be created.
- SExtForOpnd = nullptr;
- }
- if (SExtForOpnd == SExt) {
- DEBUG(dbgs() << "Sign extension is useless now\n");
- ToRemove.insert(SExt);
- break;
- }
- }
-
- // If the use is already of the right type, connect its uses to its argument
- // and delete it.
- // This can happen for an Instruction all uses of which are sign extended.
- if (!ToRemove.count(SExt) &&
- SExt->getType() == SExt->getOperand(0)->getType()) {
- DEBUG(dbgs() << "Sign extension is useless, attach its use to "
- "its argument\n");
- SExt->replaceAllUsesWith(SExt->getOperand(0));
- ToRemove.insert(SExt);
- } else
- ValToSExtendedUses[SExt->getOperand(0)].push_back(SExt);
- }
-
- if (EnableMerge)
- mergeSExts(ValToSExtendedUses, ToRemove);
-
- // Remove all instructions marked as ToRemove.
- for (Instruction *I: ToRemove)
- I->eraseFromParent();
- return LocalChange;
-}
-
-void AArch64AddressTypePromotion::mergeSExts(ValueToInsts &ValToSExtendedUses,
- SetOfInstructions &ToRemove) {
- DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
-
- for (auto &Entry : ValToSExtendedUses) {
- Instructions &Insts = Entry.second;
- Instructions CurPts;
- for (Instruction *Inst : Insts) {
- if (ToRemove.count(Inst))
- continue;
- bool inserted = false;
- for (auto &Pt : CurPts) {
- if (DT.dominates(Inst, Pt)) {
- DEBUG(dbgs() << "Replace all uses of:\n" << *Pt << "\nwith:\n"
- << *Inst << '\n');
- Pt->replaceAllUsesWith(Inst);
- ToRemove.insert(Pt);
- Pt = Inst;
- inserted = true;
- break;
- }
- if (!DT.dominates(Pt, Inst))
- // Give up if we need to merge in a common dominator as the
- // expermients show it is not profitable.
- continue;
-
- DEBUG(dbgs() << "Replace all uses of:\n" << *Inst << "\nwith:\n"
- << *Pt << '\n');
- Inst->replaceAllUsesWith(Pt);
- ToRemove.insert(Inst);
- inserted = true;
- break;
- }
- if (!inserted)
- CurPts.push_back(Inst);
- }
- }
-}
-
-void AArch64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
- DEBUG(dbgs() << "*** Analyze Sign Extensions ***\n");
-
- DenseMap<Value *, Instruction *> SeenChains;
-
- for (auto &BB : *Func) {
- for (auto &II : BB) {
- Instruction *SExt = &II;
-
- // Collect all sext operation per type.
- if (!isa<SExtInst>(SExt) || !shouldConsiderSExt(SExt))
- continue;
-
- DEBUG(dbgs() << "Found:\n" << (*SExt) << '\n');
-
- // Cases where we actually perform the optimization:
- // 1. SExt is used in a getelementptr with more than 2 operand =>
- // likely we can merge some computation if they are done on 64 bits.
- // 2. The beginning of the SExt chain is SExt several time. =>
- // code sharing is possible.
-
- bool insert = false;
- // #1.
- for (const User *U : SExt->users()) {
- const Instruction *Inst = dyn_cast<GetElementPtrInst>(U);
- if (Inst && Inst->getNumOperands() > 2) {
- DEBUG(dbgs() << "Interesting use in GetElementPtrInst\n" << *Inst
- << '\n');
- insert = true;
- break;
- }
- }
-
- // #2.
- // Check the head of the chain.
- Instruction *Inst = SExt;
- Value *Last;
- do {
- int OpdIdx = 0;
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
- if (BinOp && isa<ConstantInt>(BinOp->getOperand(0)))
- OpdIdx = 1;
- Last = Inst->getOperand(OpdIdx);
- Inst = dyn_cast<Instruction>(Last);
- } while (Inst && canGetThrough(Inst) && shouldGetThrough(Inst));
-
- DEBUG(dbgs() << "Head of the chain:\n" << *Last << '\n');
- DenseMap<Value *, Instruction *>::iterator AlreadySeen =
- SeenChains.find(Last);
- if (insert || AlreadySeen != SeenChains.end()) {
- DEBUG(dbgs() << "Insert\n");
- SExtInsts.push_back(SExt);
- if (AlreadySeen != SeenChains.end() && AlreadySeen->second != nullptr) {
- DEBUG(dbgs() << "Insert chain member\n");
- SExtInsts.push_back(AlreadySeen->second);
- SeenChains[Last] = nullptr;
- }
- } else {
- DEBUG(dbgs() << "Record its chain membership\n");
- SeenChains[Last] = SExt;
- }
- }
- }
-}
-
-bool AArch64AddressTypePromotion::runOnFunction(Function &F) {
- if (skipFunction(F))
- return false;
-
- if (F.isDeclaration())
- return false;
- Func = &F;
- ConsideredSExtType = Type::getInt64Ty(Func->getContext());
-
- DEBUG(dbgs() << "*** " << getPassName() << ": " << Func->getName() << '\n');
-
- Instructions SExtInsts;
- analyzeSExtension(SExtInsts);
- return propagateSignExtension(SExtInsts);
-}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index eb1bbcafe6e6..4b1bb27dce73 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -758,6 +758,9 @@ void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) {
setOperationAction(ISD::FP_TO_SINT, VT, Custom);
setOperationAction(ISD::FP_TO_UINT, VT, Custom);
+ if (!VT.isFloatingPoint())
+ setOperationAction(ISD::ABS, VT, Legal);
+
// [SU][MIN|MAX] are available for all NEON types apart from i64.
if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
@@ -2482,6 +2485,9 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
+ case Intrinsic::aarch64_neon_abs:
+ return DAG.getNode(ISD::ABS, dl, Op.getValueType(),
+ Op.getOperand(1));
case Intrinsic::aarch64_neon_smax:
return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index ce401206e517..902b08844216 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2734,60 +2734,36 @@ defm FMOV : FPMoveImmediate<"fmov">;
defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
int_aarch64_neon_uabd>;
// Match UABDL in log2-shuffle patterns.
+def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
+ (zext (v8i8 V64:$opB))))),
+ (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
(v8i16 (add (sub (zext (v8i8 V64:$opA)),
(zext (v8i8 V64:$opB))),
(AArch64vashr v8i16:$src, (i32 15))))),
(UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
+def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
+ (zext (extract_high_v16i8 V128:$opB))))),
+ (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
(v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
(zext (extract_high_v16i8 V128:$opB))),
(AArch64vashr v8i16:$src, (i32 15))))),
(UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
- (v4i32 (add (sub (zext (v4i16 V64:$opA)),
- (zext (v4i16 V64:$opB))),
- (AArch64vashr v4i32:$src, (i32 31))))),
+def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
+ (zext (v4i16 V64:$opB))))),
(UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
- (v4i32 (add (sub (zext (extract_high_v8i16 V128:$opA)),
- (zext (extract_high_v8i16 V128:$opB))),
- (AArch64vashr v4i32:$src, (i32 31))))),
+def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
+ (zext (extract_high_v8i16 V128:$opB))))),
(UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
- (v2i64 (add (sub (zext (v2i32 V64:$opA)),
- (zext (v2i32 V64:$opB))),
- (AArch64vashr v2i64:$src, (i32 63))))),
+def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
+ (zext (v2i32 V64:$opB))))),
(UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
- (v2i64 (add (sub (zext (extract_high_v4i32 V128:$opA)),
- (zext (extract_high_v4i32 V128:$opB))),
- (AArch64vashr v2i64:$src, (i32 63))))),
+def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
+ (zext (extract_high_v4i32 V128:$opB))))),
(UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
-defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_aarch64_neon_abs>;
-def : Pat<(xor (v8i8 (AArch64vashr V64:$src, (i32 7))),
- (v8i8 (add V64:$src, (AArch64vashr V64:$src, (i32 7))))),
- (ABSv8i8 V64:$src)>;
-def : Pat<(xor (v4i16 (AArch64vashr V64:$src, (i32 15))),
- (v4i16 (add V64:$src, (AArch64vashr V64:$src, (i32 15))))),
- (ABSv4i16 V64:$src)>;
-def : Pat<(xor (v2i32 (AArch64vashr V64:$src, (i32 31))),
- (v2i32 (add V64:$src, (AArch64vashr V64:$src, (i32 31))))),
- (ABSv2i32 V64:$src)>;
-def : Pat<(xor (v16i8 (AArch64vashr V128:$src, (i32 7))),
- (v16i8 (add V128:$src, (AArch64vashr V128:$src, (i32 7))))),
- (ABSv16i8 V128:$src)>;
-def : Pat<(xor (v8i16 (AArch64vashr V128:$src, (i32 15))),
- (v8i16 (add V128:$src, (AArch64vashr V128:$src, (i32 15))))),
- (ABSv8i16 V128:$src)>;
-def : Pat<(xor (v4i32 (AArch64vashr V128:$src, (i32 31))),
- (v4i32 (add V128:$src, (AArch64vashr V128:$src, (i32 31))))),
- (ABSv4i32 V128:$src)>;
-def : Pat<(xor (v2i64 (AArch64vashr V128:$src, (i32 63))),
- (v2i64 (add V128:$src, (AArch64vashr V128:$src, (i32 63))))),
- (ABSv2i64 V128:$src)>;
-
+defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
@@ -3359,7 +3335,7 @@ def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
// Advanced SIMD two scalar instructions.
//===----------------------------------------------------------------------===//
-defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_aarch64_neon_abs>;
+defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", abs>;
defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index 6f9021c4a030..5f895903da6f 100644
--- a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -260,15 +260,15 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
if (MI.getNumOperands() != 3)
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
/*NumOperands*/ 3);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
/*NumOperands*/ 3);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
return AltMappings;
}
case TargetOpcode::G_BITCAST: {
@@ -282,29 +282,29 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1,
getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1,
getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping GPRToFPRMapping(
+ const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
/*ID*/ 3,
/*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping FPRToGPRMapping(
+ const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
/*ID*/ 3,
/*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
/*NumOperands*/ 2);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
- AltMappings.emplace_back(std::move(GPRToFPRMapping));
- AltMappings.emplace_back(std::move(FPRToGPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
+ AltMappings.push_back(&GPRToFPRMapping);
+ AltMappings.push_back(&FPRToGPRMapping);
return AltMappings;
}
case TargetOpcode::G_LOAD: {
@@ -318,21 +318,21 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1,
getOperandsMapping({getValueMapping(PMI_FirstGPR, Size),
// Addresses are GPR 64-bit.
getValueMapping(PMI_FirstGPR, 64)}),
/*NumOperands*/ 2);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1,
getOperandsMapping({getValueMapping(PMI_FirstFPR, Size),
// Addresses are GPR 64-bit.
getValueMapping(PMI_FirstGPR, 64)}),
/*NumOperands*/ 2);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
return AltMappings;
}
default:
@@ -373,8 +373,9 @@ static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
return false;
}
-RegisterBankInfo::InstructionMapping
-AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
+const RegisterBankInfo::InstructionMapping &
+AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
+ const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -411,11 +412,11 @@ AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
}
#endif // End NDEBUG.
- return InstructionMapping{DefaultMappingID, 1, getValueMapping(RBIdx, Size),
- NumOperands};
+ return getInstructionMapping(DefaultMappingID, 1,
+ getValueMapping(RBIdx, Size), NumOperands);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
@@ -424,7 +425,8 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping =
+ getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -462,15 +464,15 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
const RegisterBank &SrcRB =
SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
- return InstructionMapping{
+ return getInstructionMapping(
DefaultMappingID, copyCost(DstRB, SrcRB, Size),
getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
- /*NumOperands*/ 2};
+ /*NumOperands*/ 2);
}
case TargetOpcode::G_SEQUENCE:
// FIXME: support this, but the generic code is really not going to do
// anything sane.
- return InstructionMapping();
+ return getInvalidInstructionMapping();
default:
break;
}
@@ -533,19 +535,17 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
// Finally construct the computed mapping.
- RegisterBankInfo::InstructionMapping Mapping =
- InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands};
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
if (!Mapping->isValid())
- return InstructionMapping();
+ return getInvalidInstructionMapping();
OpdsMapping[Idx] = Mapping;
}
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ return getInstructionMapping(DefaultMappingID, Cost,
+ getOperandsMapping(OpdsMapping), NumOperands);
}
diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.h b/lib/Target/AArch64/AArch64RegisterBankInfo.h
index 0a795a42c0b1..6d74a47095a9 100644
--- a/lib/Target/AArch64/AArch64RegisterBankInfo.h
+++ b/lib/Target/AArch64/AArch64RegisterBankInfo.h
@@ -98,8 +98,8 @@ class AArch64RegisterBankInfo final : public AArch64GenRegisterBankInfo {
///
/// \return An InstructionMappings with a statically allocated
/// OperandsMapping.
- static InstructionMapping
- getSameKindOfOperandsMapping(const MachineInstr &MI);
+ const InstructionMapping &
+ getSameKindOfOperandsMapping(const MachineInstr &MI) const;
public:
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI);
@@ -113,7 +113,8 @@ public:
InstructionMappings
getInstrAlternativeMappings(const MachineInstr &MI) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index de7108d302dd..5a90fd1eb1ba 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -109,11 +109,6 @@ EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
cl::init(false));
static cl::opt<bool>
- EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden,
- cl::desc("Enable the type promotion pass"),
- cl::init(false));
-
-static cl::opt<bool>
EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
cl::desc("Enable optimizations on complex GEPs"),
cl::init(false));
@@ -146,7 +141,6 @@ extern "C" void LLVMInitializeAArch64Target() {
initializeGlobalISel(*PR);
initializeAArch64A53Fix835769Pass(*PR);
initializeAArch64A57FPLoadBalancingPass(*PR);
- initializeAArch64AddressTypePromotionPass(*PR);
initializeAArch64AdvSIMDScalarPass(*PR);
initializeAArch64CollectLOHPass(*PR);
initializeAArch64ConditionalComparesPass(*PR);
@@ -382,9 +376,6 @@ bool AArch64PassConfig::addPreISel() {
addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
}
- if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion)
- addPass(createAArch64AddressTypePromotionPass());
-
return false;
}
diff --git a/lib/Target/AArch64/CMakeLists.txt b/lib/Target/AArch64/CMakeLists.txt
index 6d0930c358f1..f0f50f29be0f 100644
--- a/lib/Target/AArch64/CMakeLists.txt
+++ b/lib/Target/AArch64/CMakeLists.txt
@@ -39,7 +39,6 @@ endif()
add_llvm_target(AArch64CodeGen
AArch64A57FPLoadBalancing.cpp
- AArch64AddressTypePromotion.cpp
AArch64AdvSIMDScalarPass.cpp
AArch64AsmPrinter.cpp
AArch64CleanupLocalDynamicTLSPass.cpp
diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 2ce23dbf08e6..f473944cd528 100644
--- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -713,7 +713,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
S_00B84C_EXCP_EN_MSB(0) |
- S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks) |
+ // For AMDHSA, LDS_SIZE must be zero, as it is populated by the CP.
+ S_00B84C_LDS_SIZE(STM.isAmdHsaOS() ? 0 : ProgInfo.LDSBlocks) |
S_00B84C_EXCP_EN(0);
}
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 64e1b8f0d7f0..915d1d9e0e68 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -3580,7 +3580,7 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op, KnownBits &Known,
const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits(); // Don't know anything.
+ Known.resetAll(); // Don't know anything.
KnownBits Known2;
unsigned Opc = Op.getOpcode();
diff --git a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index a5edc0c3b937..623b2c88ab8f 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -82,25 +82,28 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappings(
switch (MI.getOpcode()) {
case TargetOpcode::G_LOAD: {
// FIXME: Should we be hard coding the size for these mappings?
- InstructionMapping SSMapping(1, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(SSMapping));
-
- InstructionMapping VVMapping(2, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(VVMapping));
+ const InstructionMapping &SSMapping = getInstructionMapping(
+ 1, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&SSMapping);
+
+ const InstructionMapping &VVMapping = getInstructionMapping(
+ 2, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&VVMapping);
// FIXME: Should this be the pointer-size (64-bits) or the size of the
// register that will hold the bufffer resourc (128-bits).
- InstructionMapping VSMapping(3, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(VSMapping));
+ const InstructionMapping &VSMapping = getInstructionMapping(
+ 3, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&VSMapping);
return AltMappings;
@@ -124,13 +127,11 @@ static bool isInstrUniform(const MachineInstr &MI) {
return AMDGPU::isUniformMMO(MMO);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- RegisterBankInfo::InstructionMapping Mapping =
- InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
unsigned PtrSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
@@ -150,32 +151,34 @@ AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const {
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = PtrMapping;
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
+ 1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands());
return Mapping;
// FIXME: Do we want to add a mapping for FLAT load, or should we just
// handle that during instruction selection?
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- Mapping = InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
+ bool IsComplete = true;
switch (MI.getOpcode()) {
- default: break;
+ default:
+ IsComplete = false;
+ break;
case AMDGPU::G_CONSTANT: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_GEP: {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
@@ -185,8 +188,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned Size = MRI.getType(MI.getOperand(i).getReg()).getSizeInBits();
OpdsMapping[i] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_STORE: {
assert(MI.getOperand(0).isReg());
@@ -203,28 +205,27 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = PtrMapping;
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_LOAD:
return getInstrMappingForLoad(MI);
}
- unsigned BankID = AMDGPU::SGPRRegBankID;
-
- Mapping = InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
- unsigned Size = 0;
- for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
- // If the operand is not a register default to the size of the previous
- // operand.
- // FIXME: Can't we pull the types from the MachineInstr rather than the
- // operands.
- if (MI.getOperand(Idx).isReg())
- Size = getSizeInBits(MI.getOperand(Idx).getReg(), MRI, *TRI);
- OpdsMapping.push_back(AMDGPU::getValueMapping(BankID, Size));
+ if (!IsComplete) {
+ unsigned BankID = AMDGPU::SGPRRegBankID;
+
+ unsigned Size = 0;
+ for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
+ // If the operand is not a register default to the size of the previous
+ // operand.
+ // FIXME: Can't we pull the types from the MachineInstr rather than the
+ // operands.
+ if (MI.getOperand(Idx).isReg())
+ Size = getSizeInBits(MI.getOperand(Idx).getReg(), MRI, *TRI);
+ OpdsMapping.push_back(AMDGPU::getValueMapping(BankID, Size));
+ }
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
-
- return Mapping;
+ return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
+ MI.getNumOperands());
}
diff --git a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index f13bde87ef2d..7c198a1b8a3f 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -44,7 +44,7 @@ class AMDGPURegisterBankInfo : public AMDGPUGenRegisterBankInfo {
/// See RegisterBankInfo::applyMapping.
void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
- RegisterBankInfo::InstructionMapping
+ const RegisterBankInfo::InstructionMapping &
getInstrMappingForLoad(const MachineInstr &MI) const;
public:
@@ -59,7 +59,8 @@ public:
InstructionMappings
getInstrAlternativeMappings(const MachineInstr &MI) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/lib/Target/AMDGPU/SIFrameLowering.cpp b/lib/Target/AMDGPU/SIFrameLowering.cpp
index 86e3b37b09e9..1279f845de0e 100644
--- a/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -353,7 +353,8 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
if (OffsetRegUsed &&
PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
- .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
+ .addReg(PreloadedScratchWaveOffsetReg,
+ MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill);
}
if (CopyBuffer && !CopyBufferFirst) {
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index 853c8737b464..cc93c27731ff 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1042,6 +1042,7 @@ static void allocateHSAUserSGPRs(CCState &CCInfo,
static void allocateSystemSGPRs(CCState &CCInfo,
MachineFunction &MF,
SIMachineFunctionInfo &Info,
+ CallingConv::ID CallConv,
bool IsShader) {
if (Info.hasWorkGroupIDX()) {
unsigned Reg = Info.addWorkGroupIDX();
@@ -1072,8 +1073,15 @@ static void allocateSystemSGPRs(CCState &CCInfo,
unsigned PrivateSegmentWaveByteOffsetReg;
if (IsShader) {
- PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
- Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ PrivateSegmentWaveByteOffsetReg =
+ Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
+
+ // This is true if the scratch wave byte offset doesn't have a fixed
+ // location.
+ if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
+ PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
+ Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ }
} else
PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
@@ -1310,7 +1318,7 @@ SDValue SITargetLowering::LowerFormalArguments(
// Start adding system SGPRs.
if (IsEntryFunc)
- allocateSystemSGPRs(CCInfo, MF, *Info, IsShader);
+ allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
diff --git a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 9122cd72d323..b5e3ce3dfe3e 100644
--- a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1087,7 +1087,7 @@ MachineInstr *SIInsertWaitcnts::generateSWaitCntInstBefore(
(CntVal[LGKM_CNT] & AMDGPU::getLgkmcntBitMask(IV)))) {
MachineLoop *ContainingLoop = MLI->getLoopFor(MI.getParent());
if (ContainingLoop) {
- MachineBasicBlock *TBB = ContainingLoop->getTopBlock();
+ MachineBasicBlock *TBB = ContainingLoop->getHeader();
BlockWaitcntBrackets *ScoreBracket =
BlockWaitcntBracketsMap[TBB].get();
if (!ScoreBracket) {
@@ -1097,7 +1097,7 @@ MachineInstr *SIInsertWaitcnts::generateSWaitCntInstBefore(
}
ScoreBracket->setRevisitLoop(true);
DEBUG(dbgs() << "set-revisit: block"
- << ContainingLoop->getTopBlock()->getNumber() << '\n';);
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
}
@@ -1758,12 +1758,12 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
// If we are walking into the block from before the loop, then guarantee
// at least 1 re-walk over the loop to propagate the information, even if
// no S_WAITCNT instructions were generated.
- if (ContainingLoop && ContainingLoop->getTopBlock() == &MBB && J < I &&
+ if (ContainingLoop && ContainingLoop->getHeader() == &MBB && J < I &&
(BlockWaitcntProcessedSet.find(&MBB) ==
BlockWaitcntProcessedSet.end())) {
BlockWaitcntBracketsMap[&MBB]->setRevisitLoop(true);
DEBUG(dbgs() << "set-revisit: block"
- << ContainingLoop->getTopBlock()->getNumber() << '\n';);
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
// Walk over the instructions.
@@ -1774,7 +1774,7 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
// See if we want to revisit the loop.
if (ContainingLoop && loopBottom(ContainingLoop) == &MBB) {
- MachineBasicBlock *EntryBB = ContainingLoop->getTopBlock();
+ MachineBasicBlock *EntryBB = ContainingLoop->getHeader();
BlockWaitcntBrackets *EntrySB = BlockWaitcntBracketsMap[EntryBB].get();
if (EntrySB && EntrySB->getRevisitLoop()) {
EntrySB->setRevisitLoop(false);
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index b6a982aee6be..adebb8c4a1c5 100644
--- a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -122,9 +122,15 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
bool MaySpill = ST.isVGPRSpillingEnabled(*F);
bool HasStackObjects = FrameInfo.hasStackObjects();
- if (HasStackObjects || MaySpill)
+ if (HasStackObjects || MaySpill) {
PrivateSegmentWaveByteOffset = true;
+ // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
+ if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
+ (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
+ PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5;
+ }
+
if (ST.isAmdCodeObjectV2(MF)) {
if (HasStackObjects || MaySpill)
PrivateSegmentBuffer = true;
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index a20887564f44..b18ed509ed23 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -245,11 +245,18 @@ ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
switch (RC->getID()) {
default:
return 0;
- case ARM::tGPRRegClassID:
- return TFI->hasFP(MF) ? 4 : 5;
+ case ARM::tGPRRegClassID: {
+ // hasFP ends up calling getMaxCallFrameComputed() which may not be
+ // available when getPressureLimit() is called as part of
+ // ScheduleDAGRRList.
+ bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
+ ? TFI->hasFP(MF) : true;
+ return 5 - HasFP;
+ }
case ARM::GPRRegClassID: {
- unsigned FP = TFI->hasFP(MF) ? 1 : 0;
- return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
+ bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
+ ? TFI->hasFP(MF) : true;
+ return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0);
}
case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
case ARM::DPRRegClassID:
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 9f7e60a848d9..e64582402fe1 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -202,7 +202,7 @@ void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
if (!VT.isFloatingPoint() &&
VT != MVT::v2i64 && VT != MVT::v1i64)
- for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
+ for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
setOperationAction(Opcode, VT, Legal);
}
@@ -822,6 +822,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL, MVT::i64, Custom);
setOperationAction(ISD::SRA, MVT::i64, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
setOperationAction(ISD::ADDC, MVT::i32, Custom);
setOperationAction(ISD::ADDE, MVT::i32, Custom);
@@ -1344,6 +1345,10 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::SMLALTT: return "ARMISD::SMLALTT";
case ARMISD::SMULWB: return "ARMISD::SMULWB";
case ARMISD::SMULWT: return "ARMISD::SMULWT";
+ case ARMISD::SMLALD: return "ARMISD::SMLALD";
+ case ARMISD::SMLALDX: return "ARMISD::SMLALDX";
+ case ARMISD::SMLSLD: return "ARMISD::SMLSLD";
+ case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX";
case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
case ARMISD::BFI: return "ARMISD::BFI";
case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
@@ -3311,6 +3316,9 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
}
return Result;
}
+ case Intrinsic::arm_neon_vabs:
+ return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
+ Op.getOperand(1));
case Intrinsic::arm_neon_vmulls:
case Intrinsic::arm_neon_vmullu: {
unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
@@ -7722,6 +7730,37 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
}
}
+static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned Opc = 0;
+ if (IntNo == Intrinsic::arm_smlald)
+ Opc = ARMISD::SMLALD;
+ else if (IntNo == Intrinsic::arm_smlaldx)
+ Opc = ARMISD::SMLALDX;
+ else if (IntNo == Intrinsic::arm_smlsld)
+ Opc = ARMISD::SMLSLD;
+ else if (IntNo == Intrinsic::arm_smlsldx)
+ Opc = ARMISD::SMLSLDX;
+ else
+ return;
+
+ SDLoc dl(N);
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+ N->getOperand(3),
+ DAG.getConstant(0, dl, MVT::i32));
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+ N->getOperand(3),
+ DAG.getConstant(1, dl, MVT::i32));
+
+ SDValue LongMul = DAG.getNode(Opc, dl,
+ DAG.getVTList(MVT::i32, MVT::i32),
+ N->getOperand(1), N->getOperand(2),
+ Lo, Hi);
+ Results.push_back(LongMul.getValue(0));
+ Results.push_back(LongMul.getValue(1));
+}
+
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
@@ -7763,6 +7802,8 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::ATOMIC_CMP_SWAP:
ReplaceCMP_SWAP_64Results(N, Results, DAG);
return;
+ case ISD::INTRINSIC_WO_CHAIN:
+ return ReplaceLongIntrinsic(N, Results, DAG);
}
if (Res.getNode())
Results.push_back(Res);
@@ -12602,7 +12643,7 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const SelectionDAG &DAG,
unsigned Depth) const {
unsigned BitWidth = Known.getBitWidth();
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case ARMISD::ADDC:
@@ -12617,7 +12658,8 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
- if (Known.Zero == 0 && Known.One == 0) return;
+ if (Known.isUnknown())
+ return;
KnownBits KnownRHS;
DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
@@ -14015,3 +14057,8 @@ void ARMTargetLowering::insertCopiesSplitCSR(
.addReg(NewVR);
}
}
+
+void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
+ MF.getFrameInfo().computeMaxCallFrameSize(MF);
+ TargetLoweringBase::finalizeLowering(MF);
+}
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 76e4b60e01fb..08c51b66dfe7 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -184,6 +184,10 @@ class InstrItineraryData;
SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
SMLALTT, // 64-bit signed accumulate multiply top, top 16
+ SMLALD, // Signed multiply accumulate long dual
+ SMLALDX, // Signed multiply accumulate long dual exchange
+ SMLSLD, // Signed multiply subtract long dual
+ SMLSLDX, // Signed multiply subtract long dual exchange
// Operands of the standard BUILD_VECTOR node are not legalized, which
// is fine if BUILD_VECTORs are always lowered to shuffles or other
@@ -540,6 +544,8 @@ class InstrItineraryData;
unsigned getNumInterleavedAccesses(VectorType *VecTy,
const DataLayout &DL) const;
+ void finalizeLowering(MachineFunction &MF) const override;
+
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index 28eb5fc30864..a94d6048f02d 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -99,6 +99,11 @@ def SDT_LongMac : SDTypeProfile<2, 4, [SDTCisVT<0, i32>,
SDTCisSameAs<0, 4>,
SDTCisSameAs<0, 5>]>;
+def ARMSmlald : SDNode<"ARMISD::SMLALD", SDT_LongMac>;
+def ARMSmlaldx : SDNode<"ARMISD::SMLALDX", SDT_LongMac>;
+def ARMSmlsld : SDNode<"ARMISD::SMLSLD", SDT_LongMac>;
+def ARMSmlsldx : SDNode<"ARMISD::SMLSLDX", SDT_LongMac>;
+
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
@@ -870,7 +875,9 @@ def imm1_16_XFORM: SDNodeXForm<imm, [{
MVT::i32);
}]>;
def Imm1_16AsmOperand: ImmAsmOperandMinusOne<1,16> { let Name = "Imm1_16"; }
-def imm1_16 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 16; }],
+def imm1_16 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm > 0 && Imm <= 16;
+ }],
imm1_16_XFORM> {
let PrintMethod = "printImmPlusOneOperand";
let ParserMatchClass = Imm1_16AsmOperand;
@@ -1983,7 +1990,9 @@ def : InstAlias<"sevl$p", (HINT 5, pred:$p)>, Requires<[IsARM, HasV8]>;
def : InstAlias<"esb$p", (HINT 16, pred:$p)>, Requires<[IsARM, HasRAS]>;
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
- "\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
+ "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_sel GPR:$Rn, GPR:$Rm))]>,
+ Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
@@ -3472,8 +3481,12 @@ def : ARMV6Pat<(add rGPR:$Rn, (sext_inreg (srl rGPR:$Rm, imm8_or_16:$rot),
(SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
def SXTB16 : AI_ext_rrot_np<0b01101000, "sxtb16">;
+def : ARMV6Pat<(int_arm_sxtb16 GPR:$Src),
+ (SXTB16 GPR:$Src, 0)>;
def SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
+def : ARMV6Pat<(int_arm_sxtab16 GPR:$LHS, GPR:$RHS),
+ (SXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
// Zero extenders
@@ -3493,6 +3506,8 @@ def UXTB16 : AI_ext_rrot<0b01101100,
// (UXTB16r_rot GPR:$Src, 3)>;
def : ARMV6Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
(UXTB16 GPR:$Src, 1)>;
+def : ARMV6Pat<(int_arm_uxtb16 GPR:$Src),
+ (UXTB16 GPR:$Src, 0)>;
def UXTAB : AI_exta_rrot<0b01101110, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
@@ -3507,6 +3522,8 @@ def : ARMV6Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot), 0xFFFF)),
// This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
def UXTAB16 : AI_exta_rrot_np<0b01101100, "uxtab16">;
+def : ARMV6Pat<(int_arm_uxtab16 GPR:$LHS, GPR:$RHS),
+ (UXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
def SBFX : I<(outs GPRnopc:$Rd),
@@ -3633,71 +3650,85 @@ class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
let Unpredictable{11-8} = 0b1111;
}
-// Saturating add/subtract
+// Wrappers around the AAI class
+class AAIRevOpr<bits<8> op27_20, bits<8> op11_4, string opc,
+ list<dag> pattern = []>
+ : AAI<op27_20, op11_4, opc,
+ pattern,
+ (ins GPRnopc:$Rm, GPRnopc:$Rn),
+ "\t$Rd, $Rm, $Rn">;
+class AAIIntrinsic<bits<8> op27_20, bits<8> op11_4, string opc,
+ Intrinsic intrinsic>
+ : AAI<op27_20, op11_4, opc,
+ [(set GPRnopc:$Rd, (intrinsic GPRnopc:$Rn, GPRnopc:$Rm))]>;
+
+// Saturating add/subtract
+let hasSideEffects = 1 in {
+def QADD8 : AAIIntrinsic<0b01100010, 0b11111001, "qadd8", int_arm_qadd8>;
+def QADD16 : AAIIntrinsic<0b01100010, 0b11110001, "qadd16", int_arm_qadd16>;
+def QSUB16 : AAIIntrinsic<0b01100010, 0b11110111, "qsub16", int_arm_qsub16>;
+def QSUB8 : AAIIntrinsic<0b01100010, 0b11111111, "qsub8", int_arm_qsub8>;
+
+def QDADD : AAIRevOpr<0b00010100, 0b00000101, "qdadd",
+ [(set GPRnopc:$Rd, (int_arm_qadd (int_arm_qadd GPRnopc:$Rm,
+ GPRnopc:$Rm),
+ GPRnopc:$Rn))]>;
+def QDSUB : AAIRevOpr<0b00010110, 0b00000101, "qdsub",
+ [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm,
+ (int_arm_qadd GPRnopc:$Rn, GPRnopc:$Rn)))]>;
+def QSUB : AAIRevOpr<0b00010010, 0b00000101, "qsub",
+ [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))]>;
let DecoderMethod = "DecodeQADDInstruction" in
-def QADD : AAI<0b00010000, 0b00000101, "qadd",
- [(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))],
- (ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
-
-def QSUB : AAI<0b00010010, 0b00000101, "qsub",
- [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))],
- (ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
-def QDADD : AAI<0b00010100, 0b00000101, "qdadd", [],
- (ins GPRnopc:$Rm, GPRnopc:$Rn),
- "\t$Rd, $Rm, $Rn">;
-def QDSUB : AAI<0b00010110, 0b00000101, "qdsub", [],
- (ins GPRnopc:$Rm, GPRnopc:$Rn),
- "\t$Rd, $Rm, $Rn">;
-
-def QADD16 : AAI<0b01100010, 0b11110001, "qadd16">;
-def QADD8 : AAI<0b01100010, 0b11111001, "qadd8">;
-def QASX : AAI<0b01100010, 0b11110011, "qasx">;
-def QSAX : AAI<0b01100010, 0b11110101, "qsax">;
-def QSUB16 : AAI<0b01100010, 0b11110111, "qsub16">;
-def QSUB8 : AAI<0b01100010, 0b11111111, "qsub8">;
-def UQADD16 : AAI<0b01100110, 0b11110001, "uqadd16">;
-def UQADD8 : AAI<0b01100110, 0b11111001, "uqadd8">;
-def UQASX : AAI<0b01100110, 0b11110011, "uqasx">;
-def UQSAX : AAI<0b01100110, 0b11110101, "uqsax">;
-def UQSUB16 : AAI<0b01100110, 0b11110111, "uqsub16">;
-def UQSUB8 : AAI<0b01100110, 0b11111111, "uqsub8">;
+ def QADD : AAIRevOpr<0b00010000, 0b00000101, "qadd",
+ [(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))]>;
+}
+
+def UQADD16 : AAIIntrinsic<0b01100110, 0b11110001, "uqadd16", int_arm_uqadd16>;
+def UQADD8 : AAIIntrinsic<0b01100110, 0b11111001, "uqadd8", int_arm_uqadd8>;
+def UQSUB16 : AAIIntrinsic<0b01100110, 0b11110111, "uqsub16", int_arm_uqsub16>;
+def UQSUB8 : AAIIntrinsic<0b01100110, 0b11111111, "uqsub8", int_arm_uqsub8>;
+def QASX : AAIIntrinsic<0b01100010, 0b11110011, "qasx", int_arm_qasx>;
+def QSAX : AAIIntrinsic<0b01100010, 0b11110101, "qsax", int_arm_qsax>;
+def UQASX : AAIIntrinsic<0b01100110, 0b11110011, "uqasx", int_arm_uqasx>;
+def UQSAX : AAIIntrinsic<0b01100110, 0b11110101, "uqsax", int_arm_uqsax>;
// Signed/Unsigned add/subtract
-def SASX : AAI<0b01100001, 0b11110011, "sasx">;
-def SADD16 : AAI<0b01100001, 0b11110001, "sadd16">;
-def SADD8 : AAI<0b01100001, 0b11111001, "sadd8">;
-def SSAX : AAI<0b01100001, 0b11110101, "ssax">;
-def SSUB16 : AAI<0b01100001, 0b11110111, "ssub16">;
-def SSUB8 : AAI<0b01100001, 0b11111111, "ssub8">;
-def UASX : AAI<0b01100101, 0b11110011, "uasx">;
-def UADD16 : AAI<0b01100101, 0b11110001, "uadd16">;
-def UADD8 : AAI<0b01100101, 0b11111001, "uadd8">;
-def USAX : AAI<0b01100101, 0b11110101, "usax">;
-def USUB16 : AAI<0b01100101, 0b11110111, "usub16">;
-def USUB8 : AAI<0b01100101, 0b11111111, "usub8">;
+def SASX : AAIIntrinsic<0b01100001, 0b11110011, "sasx", int_arm_sasx>;
+def SADD16 : AAIIntrinsic<0b01100001, 0b11110001, "sadd16", int_arm_sadd16>;
+def SADD8 : AAIIntrinsic<0b01100001, 0b11111001, "sadd8", int_arm_sadd8>;
+def SSAX : AAIIntrinsic<0b01100001, 0b11110101, "ssax", int_arm_ssax>;
+def SSUB16 : AAIIntrinsic<0b01100001, 0b11110111, "ssub16", int_arm_ssub16>;
+def SSUB8 : AAIIntrinsic<0b01100001, 0b11111111, "ssub8", int_arm_ssub8>;
+def UASX : AAIIntrinsic<0b01100101, 0b11110011, "uasx", int_arm_uasx>;
+def UADD16 : AAIIntrinsic<0b01100101, 0b11110001, "uadd16", int_arm_uadd16>;
+def UADD8 : AAIIntrinsic<0b01100101, 0b11111001, "uadd8", int_arm_uadd8>;
+def USAX : AAIIntrinsic<0b01100101, 0b11110101, "usax", int_arm_usax>;
+def USUB16 : AAIIntrinsic<0b01100101, 0b11110111, "usub16", int_arm_usub16>;
+def USUB8 : AAIIntrinsic<0b01100101, 0b11111111, "usub8", int_arm_usub8>;
// Signed/Unsigned halving add/subtract
-def SHASX : AAI<0b01100011, 0b11110011, "shasx">;
-def SHADD16 : AAI<0b01100011, 0b11110001, "shadd16">;
-def SHADD8 : AAI<0b01100011, 0b11111001, "shadd8">;
-def SHSAX : AAI<0b01100011, 0b11110101, "shsax">;
-def SHSUB16 : AAI<0b01100011, 0b11110111, "shsub16">;
-def SHSUB8 : AAI<0b01100011, 0b11111111, "shsub8">;
-def UHASX : AAI<0b01100111, 0b11110011, "uhasx">;
-def UHADD16 : AAI<0b01100111, 0b11110001, "uhadd16">;
-def UHADD8 : AAI<0b01100111, 0b11111001, "uhadd8">;
-def UHSAX : AAI<0b01100111, 0b11110101, "uhsax">;
-def UHSUB16 : AAI<0b01100111, 0b11110111, "uhsub16">;
-def UHSUB8 : AAI<0b01100111, 0b11111111, "uhsub8">;
+def SHASX : AAIIntrinsic<0b01100011, 0b11110011, "shasx", int_arm_shasx>;
+def SHADD16 : AAIIntrinsic<0b01100011, 0b11110001, "shadd16", int_arm_shadd16>;
+def SHADD8 : AAIIntrinsic<0b01100011, 0b11111001, "shadd8", int_arm_shadd8>;
+def SHSAX : AAIIntrinsic<0b01100011, 0b11110101, "shsax", int_arm_shsax>;
+def SHSUB16 : AAIIntrinsic<0b01100011, 0b11110111, "shsub16", int_arm_shsub16>;
+def SHSUB8 : AAIIntrinsic<0b01100011, 0b11111111, "shsub8", int_arm_shsub8>;
+def UHASX : AAIIntrinsic<0b01100111, 0b11110011, "uhasx", int_arm_uhasx>;
+def UHADD16 : AAIIntrinsic<0b01100111, 0b11110001, "uhadd16", int_arm_uhadd16>;
+def UHADD8 : AAIIntrinsic<0b01100111, 0b11111001, "uhadd8", int_arm_uhadd8>;
+def UHSAX : AAIIntrinsic<0b01100111, 0b11110101, "uhsax", int_arm_uhsax>;
+def UHSUB16 : AAIIntrinsic<0b01100111, 0b11110111, "uhsub16", int_arm_uhsub16>;
+def UHSUB8 : AAIIntrinsic<0b01100111, 0b11111111, "uhsub8", int_arm_uhsub8>;
// Unsigned Sum of Absolute Differences [and Accumulate].
def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
MulFrm /* for convenience */, NoItinerary, "usad8",
- "\t$Rd, $Rn, $Rm", []>,
+ "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_usad8 GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
@@ -3711,7 +3742,8 @@ def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
}
def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
MulFrm /* for convenience */, NoItinerary, "usada8",
- "\t$Rd, $Rn, $Rm, $Ra", []>,
+ "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (int_arm_usada8 GPR:$Rn, GPR:$Rm, GPR:$Ra))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]>{
bits<4> Rd;
bits<4> Rn;
@@ -3726,7 +3758,6 @@ def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
}
// Signed/Unsigned saturate
-
def SSAT : AI<(outs GPRnopc:$Rd),
(ins imm1_32:$sat_imm, GPRnopc:$Rn, shift_imm:$sh),
SatFrm, NoItinerary, "ssat", "\t$Rd, $sat_imm, $Rn$sh", []>,
@@ -3795,6 +3826,10 @@ def : ARMV6Pat<(int_arm_usat GPRnopc:$a, imm0_31:$pos),
(USAT imm0_31:$pos, GPRnopc:$a, 0)>;
def : ARMPat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
(SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
+def : ARMV6Pat<(int_arm_ssat16 GPRnopc:$a, imm1_16:$pos),
+ (SSAT16 imm1_16:$pos, GPRnopc:$a)>;
+def : ARMV6Pat<(int_arm_usat16 GPRnopc:$a, imm0_15:$pos),
+ (USAT16 imm0_15:$pos, GPRnopc:$a)>;
//===----------------------------------------------------------------------===//
// Bitwise Instructions.
@@ -4220,8 +4255,8 @@ multiclass AI_smla<string opc> {
IIC_iMAC16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (ARMsmulwt GPRnopc:$Rn, GPRnopc:$Rm)))]>,
- Requires<[IsARM, HasV5TE, UseMulOps]>,
- Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>,
+ Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
}
}
@@ -4255,7 +4290,8 @@ def : ARMV5TEPat<(ARMsmlaltt GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
// Helper class for AI_smld.
class AMulDualIbase<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
- : AI<oops, iops, MulFrm, itin, opc, asm, []>, Requires<[IsARM, HasV6]> {
+ : AI<oops, iops, MulFrm, itin, opc, asm, []>,
+ Requires<[IsARM, HasV6]> {
bits<4> Rn;
bits<4> Rm;
let Inst{27-23} = 0b01110;
@@ -4305,20 +4341,40 @@ multiclass AI_smld<bit sub, string opc> {
Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]>;
def LD: AMulDualI64<1, sub, 0, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
- (ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ NoItinerary,
!strconcat(opc, "ld"), "\t$RdLo, $RdHi, $Rn, $Rm">,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">,
Sched<[WriteMAC64Lo, WriteMAC64Hi, ReadMUL, ReadMUL, ReadMAC, ReadMAC]>;
def LDX : AMulDualI64<1, sub, 1, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
- (ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ NoItinerary,
!strconcat(opc, "ldx"),"\t$RdLo, $RdHi, $Rn, $Rm">,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">,
Sched<[WriteMUL64Lo, WriteMUL64Hi, ReadMUL, ReadMUL]>;
-
}
defm SMLA : AI_smld<0, "smla">;
defm SMLS : AI_smld<1, "smls">;
+def : ARMV6Pat<(int_arm_smlad GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLAD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smladx GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLADX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smlsd GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLSD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smlsdx GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLSDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(ARMSmlald GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLALD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlaldx GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLALDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlsld GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLSLD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlsldx GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLSLDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+
multiclass AI_sdml<bit sub, string opc> {
def D:AMulDualI<0, sub, 0, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm),
@@ -4332,6 +4388,15 @@ multiclass AI_sdml<bit sub, string opc> {
defm SMUA : AI_sdml<0, "smua">;
defm SMUS : AI_sdml<1, "smus">;
+def : ARMV6Pat<(int_arm_smuad GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUAD GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smuadx GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUADX GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smusd GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUSD GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smusdx GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUSDX GPRnopc:$Rn, GPRnopc:$Rm)>;
+
//===----------------------------------------------------------------------===//
// Division Instructions (ARMv7-A with virtualization extension)
//
@@ -5648,6 +5713,32 @@ def : ARMV5MOPat<(add GPR:$acc,
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>,
Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
+def : ARMV5TEPat<(int_arm_smulbb GPR:$a, GPR:$b),
+ (SMULBB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulbt GPR:$a, GPR:$b),
+ (SMULBT GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smultb GPR:$a, GPR:$b),
+ (SMULTB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smultt GPR:$a, GPR:$b),
+ (SMULTT GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulwb GPR:$a, GPR:$b),
+ (SMULWB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulwt GPR:$a, GPR:$b),
+ (SMULWT GPR:$a, GPR:$b)>;
+
+def : ARMV5TEPat<(int_arm_smlabb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlabt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlatb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlatt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLATT GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlawb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlawt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLAWT GPR:$a, GPR:$b, GPR:$acc)>;
+
// Pre-v7 uses MCR for synchronization barriers.
def : ARMPat<(ARMMemBarrierMCR GPR:$zero), (MCR 15, 0, GPR:$zero, 7, 10, 5)>,
Requires<[IsARM, HasV6]>;
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index 9b08c612e16b..51290e5a5b93 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -5558,8 +5558,7 @@ defm VSRI : N2VShInsR_QHSD<1, 1, 0b0100, 1, "vsri">;
// VABS : Vector Absolute Value
defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
- IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
- int_arm_neon_vabs>;
+ IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s", abs>;
def VABSfd : N2VD<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
"vabs", "f32",
v2f32, v2f32, fabs>;
@@ -5575,29 +5574,6 @@ def VABShq : N2VQ<0b11, 0b11, 0b01, 0b01, 0b01110, 0,
v8f16, v8f16, fabs>,
Requires<[HasNEON, HasFullFP16]>;
-def : Pat<(xor (v2i32 (bitconvert (v8i8 (NEONvshrs DPR:$src, (i32 7))))),
- (v2i32 (bitconvert (v8i8 (add DPR:$src,
- (NEONvshrs DPR:$src, (i32 7))))))),
- (VABSv8i8 DPR:$src)>;
-def : Pat<(xor (v2i32 (bitconvert (v4i16 (NEONvshrs DPR:$src, (i32 15))))),
- (v2i32 (bitconvert (v4i16 (add DPR:$src,
- (NEONvshrs DPR:$src, (i32 15))))))),
- (VABSv4i16 DPR:$src)>;
-def : Pat<(xor (v2i32 (NEONvshrs DPR:$src, (i32 31))),
- (v2i32 (add DPR:$src, (NEONvshrs DPR:$src, (i32 31))))),
- (VABSv2i32 DPR:$src)>;
-def : Pat<(xor (v4i32 (bitconvert (v16i8 (NEONvshrs QPR:$src, (i32 7))))),
- (v4i32 (bitconvert (v16i8 (add QPR:$src,
- (NEONvshrs QPR:$src, (i32 7))))))),
- (VABSv16i8 QPR:$src)>;
-def : Pat<(xor (v4i32 (bitconvert (v8i16 (NEONvshrs QPR:$src, (i32 15))))),
- (v4i32 (bitconvert (v8i16 (add QPR:$src,
- (NEONvshrs QPR:$src, (i32 15))))))),
- (VABSv8i16 QPR:$src)>;
-def : Pat<(xor (v4i32 (NEONvshrs QPR:$src, (i32 31))),
- (v4i32 (add QPR:$src, (NEONvshrs QPR:$src, (i32 31))))),
- (VABSv4i32 QPR:$src)>;
-
// VQABS : Vector Saturating Absolute Value
defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index f710ee6a7e77..bf3d820e7b7d 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -1993,6 +1993,10 @@ def : Thumb2DSPPat<(add rGPR:$Rn,
def : Thumb2DSPPat<(add rGPR:$Rn,
(sext_inreg (rotr rGPR:$Rm, rot_imm:$rot), i16)),
(t2SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_sxtb16 rGPR:$Rn),
+ (t2SXTB16 rGPR:$Rn, 0)>;
+def : Thumb2DSPPat<(int_arm_sxtab16 rGPR:$Rn, rGPR:$Rm),
+ (t2SXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
// A simple right-shift can also be used in most cases (the exception is the
@@ -2026,6 +2030,9 @@ def : Thumb2DSPPat<(and (rotr rGPR:$Rm, rot_imm:$rot), 0x0000FFFF),
def : Thumb2DSPPat<(and (rotr rGPR:$Rm, rot_imm:$rot), 0x00FF00FF),
(t2UXTB16 rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_uxtb16 rGPR:$Rm),
+ (t2UXTB16 rGPR:$Rm, 0)>;
+
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
// The transformation should probably be done as a combiner action
// instead so we can include a check for masking back in the upper
@@ -2053,6 +2060,8 @@ def : Thumb2DSPPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, rot_imm:$rot),
def : Thumb2DSPPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot),
0xFFFF)),
(t2UXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_uxtab16 rGPR:$Rn, rGPR:$Rm),
+ (t2UXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
}
@@ -2137,10 +2146,9 @@ def : T2Pat<(ARMadde rGPR:$src, t2_so_imm_not:$imm, CPSR),
def : T2Pat<(ARMadde rGPR:$src, imm0_65535_neg:$imm, CPSR),
(t2SBCrr rGPR:$src, (t2MOVi16 (imm_not_XFORM imm:$imm)))>;
-// Select Bytes -- for disassembly only
-
def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
- NoItinerary, "sel", "\t$Rd, $Rn, $Rm", []>,
+ NoItinerary, "sel", "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_sel GPR:$Rn, GPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]> {
let Inst{31-27} = 0b11111;
let Inst{26-24} = 0b010;
@@ -2154,9 +2162,7 @@ def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
// A6.3.13, A6.3.14, A6.3.15 Parallel addition and subtraction (signed/unsigned)
// And Miscellaneous operations -- for disassembly only
class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc,
- list<dag> pat = [/* For disassembly only; pattern left blank */],
- dag iops = (ins rGPR:$Rn, rGPR:$Rm),
- string asm = "\t$Rd, $Rn, $Rm">
+ list<dag> pat, dag iops, string asm>
: T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, pat>,
Requires<[IsThumb2, HasDSP]> {
let Inst{31-27} = 0b11111;
@@ -2174,60 +2180,72 @@ class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc,
let Inst{3-0} = Rm;
}
-// Saturating add/subtract -- for disassembly only
-
-def t2QADD : T2I_pam<0b000, 0b1000, "qadd",
- [(set rGPR:$Rd, (int_arm_qadd rGPR:$Rn, rGPR:$Rm))],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QADD16 : T2I_pam<0b001, 0b0001, "qadd16">;
-def t2QADD8 : T2I_pam<0b000, 0b0001, "qadd8">;
-def t2QASX : T2I_pam<0b010, 0b0001, "qasx">;
-def t2QDADD : T2I_pam<0b000, 0b1001, "qdadd", [],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QDSUB : T2I_pam<0b000, 0b1011, "qdsub", [],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QSAX : T2I_pam<0b110, 0b0001, "qsax">;
-def t2QSUB : T2I_pam<0b000, 0b1010, "qsub",
- [(set rGPR:$Rd, (int_arm_qsub rGPR:$Rn, rGPR:$Rm))],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QSUB16 : T2I_pam<0b101, 0b0001, "qsub16">;
-def t2QSUB8 : T2I_pam<0b100, 0b0001, "qsub8">;
-def t2UQADD16 : T2I_pam<0b001, 0b0101, "uqadd16">;
-def t2UQADD8 : T2I_pam<0b000, 0b0101, "uqadd8">;
-def t2UQASX : T2I_pam<0b010, 0b0101, "uqasx">;
-def t2UQSAX : T2I_pam<0b110, 0b0101, "uqsax">;
-def t2UQSUB16 : T2I_pam<0b101, 0b0101, "uqsub16">;
-def t2UQSUB8 : T2I_pam<0b100, 0b0101, "uqsub8">;
-
-// Signed/Unsigned add/subtract -- for disassembly only
-
-def t2SASX : T2I_pam<0b010, 0b0000, "sasx">;
-def t2SADD16 : T2I_pam<0b001, 0b0000, "sadd16">;
-def t2SADD8 : T2I_pam<0b000, 0b0000, "sadd8">;
-def t2SSAX : T2I_pam<0b110, 0b0000, "ssax">;
-def t2SSUB16 : T2I_pam<0b101, 0b0000, "ssub16">;
-def t2SSUB8 : T2I_pam<0b100, 0b0000, "ssub8">;
-def t2UASX : T2I_pam<0b010, 0b0100, "uasx">;
-def t2UADD16 : T2I_pam<0b001, 0b0100, "uadd16">;
-def t2UADD8 : T2I_pam<0b000, 0b0100, "uadd8">;
-def t2USAX : T2I_pam<0b110, 0b0100, "usax">;
-def t2USUB16 : T2I_pam<0b101, 0b0100, "usub16">;
-def t2USUB8 : T2I_pam<0b100, 0b0100, "usub8">;
-
-// Signed/Unsigned halving add/subtract -- for disassembly only
-
-def t2SHASX : T2I_pam<0b010, 0b0010, "shasx">;
-def t2SHADD16 : T2I_pam<0b001, 0b0010, "shadd16">;
-def t2SHADD8 : T2I_pam<0b000, 0b0010, "shadd8">;
-def t2SHSAX : T2I_pam<0b110, 0b0010, "shsax">;
-def t2SHSUB16 : T2I_pam<0b101, 0b0010, "shsub16">;
-def t2SHSUB8 : T2I_pam<0b100, 0b0010, "shsub8">;
-def t2UHASX : T2I_pam<0b010, 0b0110, "uhasx">;
-def t2UHADD16 : T2I_pam<0b001, 0b0110, "uhadd16">;
-def t2UHADD8 : T2I_pam<0b000, 0b0110, "uhadd8">;
-def t2UHSAX : T2I_pam<0b110, 0b0110, "uhsax">;
-def t2UHSUB16 : T2I_pam<0b101, 0b0110, "uhsub16">;
-def t2UHSUB8 : T2I_pam<0b100, 0b0110, "uhsub8">;
+class T2I_pam_intrinsics<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
+ : T2I_pam<op22_20, op7_4, opc,
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm))],
+ (ins rGPR:$Rn, rGPR:$Rm), "\t$Rd, $Rn, $Rm">;
+
+class T2I_pam_intrinsics_rev<bits<3> op22_20, bits<4> op7_4, string opc>
+ : T2I_pam<op22_20, op7_4, opc, [],
+ (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
+
+// Saturating add/subtract
+def t2QADD16 : T2I_pam_intrinsics<0b001, 0b0001, "qadd16", int_arm_qadd16>;
+def t2QADD8 : T2I_pam_intrinsics<0b000, 0b0001, "qadd8", int_arm_qadd8>;
+def t2QASX : T2I_pam_intrinsics<0b010, 0b0001, "qasx", int_arm_qasx>;
+def t2UQSUB8 : T2I_pam_intrinsics<0b100, 0b0101, "uqsub8", int_arm_uqsub8>;
+def t2QSAX : T2I_pam_intrinsics<0b110, 0b0001, "qsax", int_arm_qsax>;
+def t2QSUB16 : T2I_pam_intrinsics<0b101, 0b0001, "qsub16", int_arm_qsub16>;
+def t2QSUB8 : T2I_pam_intrinsics<0b100, 0b0001, "qsub8", int_arm_qsub8>;
+def t2UQADD16 : T2I_pam_intrinsics<0b001, 0b0101, "uqadd16", int_arm_uqadd16>;
+def t2UQADD8 : T2I_pam_intrinsics<0b000, 0b0101, "uqadd8", int_arm_uqadd8>;
+def t2UQASX : T2I_pam_intrinsics<0b010, 0b0101, "uqasx", int_arm_uqasx>;
+def t2UQSAX : T2I_pam_intrinsics<0b110, 0b0101, "uqsax", int_arm_uqsax>;
+def t2UQSUB16 : T2I_pam_intrinsics<0b101, 0b0101, "uqsub16", int_arm_uqsub16>;
+def t2QADD : T2I_pam_intrinsics_rev<0b000, 0b1000, "qadd">;
+def t2QSUB : T2I_pam_intrinsics_rev<0b000, 0b1010, "qsub">;
+def t2QDADD : T2I_pam_intrinsics_rev<0b000, 0b1001, "qdadd">;
+def t2QDSUB : T2I_pam_intrinsics_rev<0b000, 0b1011, "qdsub">;
+
+def : Thumb2DSPPat<(int_arm_qadd rGPR:$Rm, rGPR:$Rn),
+ (t2QADD rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qsub rGPR:$Rm, rGPR:$Rn),
+ (t2QSUB rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qadd(int_arm_qadd rGPR:$Rm, rGPR:$Rm), rGPR:$Rn),
+ (t2QDADD rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qsub rGPR:$Rm, (int_arm_qadd rGPR:$Rn, rGPR:$Rn)),
+ (t2QDSUB rGPR:$Rm, rGPR:$Rn)>;
+
+// Signed/Unsigned add/subtract
+
+def t2SASX : T2I_pam_intrinsics<0b010, 0b0000, "sasx", int_arm_sasx>;
+def t2SADD16 : T2I_pam_intrinsics<0b001, 0b0000, "sadd16", int_arm_sadd16>;
+def t2SADD8 : T2I_pam_intrinsics<0b000, 0b0000, "sadd8", int_arm_sadd8>;
+def t2SSAX : T2I_pam_intrinsics<0b110, 0b0000, "ssax", int_arm_ssax>;
+def t2SSUB16 : T2I_pam_intrinsics<0b101, 0b0000, "ssub16", int_arm_ssub16>;
+def t2SSUB8 : T2I_pam_intrinsics<0b100, 0b0000, "ssub8", int_arm_ssub8>;
+def t2UASX : T2I_pam_intrinsics<0b010, 0b0100, "uasx", int_arm_uasx>;
+def t2UADD16 : T2I_pam_intrinsics<0b001, 0b0100, "uadd16", int_arm_uadd16>;
+def t2UADD8 : T2I_pam_intrinsics<0b000, 0b0100, "uadd8", int_arm_uadd8>;
+def t2USAX : T2I_pam_intrinsics<0b110, 0b0100, "usax", int_arm_usax>;
+def t2USUB16 : T2I_pam_intrinsics<0b101, 0b0100, "usub16", int_arm_usub16>;
+def t2USUB8 : T2I_pam_intrinsics<0b100, 0b0100, "usub8", int_arm_usub8>;
+
+// Signed/Unsigned halving add/subtract
+
+def t2SHASX : T2I_pam_intrinsics<0b010, 0b0010, "shasx", int_arm_shasx>;
+def t2SHADD16 : T2I_pam_intrinsics<0b001, 0b0010, "shadd16", int_arm_shadd16>;
+def t2SHADD8 : T2I_pam_intrinsics<0b000, 0b0010, "shadd8", int_arm_shadd8>;
+def t2SHSAX : T2I_pam_intrinsics<0b110, 0b0010, "shsax", int_arm_shsax>;
+def t2SHSUB16 : T2I_pam_intrinsics<0b101, 0b0010, "shsub16", int_arm_shsub16>;
+def t2SHSUB8 : T2I_pam_intrinsics<0b100, 0b0010, "shsub8", int_arm_shsub8>;
+def t2UHASX : T2I_pam_intrinsics<0b010, 0b0110, "uhasx", int_arm_uhasx>;
+def t2UHADD16 : T2I_pam_intrinsics<0b001, 0b0110, "uhadd16", int_arm_uhadd16>;
+def t2UHADD8 : T2I_pam_intrinsics<0b000, 0b0110, "uhadd8", int_arm_uhadd8>;
+def t2UHSAX : T2I_pam_intrinsics<0b110, 0b0110, "uhsax", int_arm_uhsax>;
+def t2UHSUB16 : T2I_pam_intrinsics<0b101, 0b0110, "uhsub16", int_arm_uhsub16>;
+def t2UHSUB8 : T2I_pam_intrinsics<0b100, 0b0110, "uhsub8", int_arm_uhsub8>;
// Helper class for disassembly only
// A6.3.16 & A6.3.17
@@ -2255,16 +2273,19 @@ class T2FourReg_mac<bit long, bits<3> op22_20, bits<4> op7_4, dag oops,
// Unsigned Sum of Absolute Differences [and Accumulate].
def t2USAD8 : T2ThreeReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm),
- NoItinerary, "usad8", "\t$Rd, $Rn, $Rm", []>,
+ NoItinerary, "usad8", "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (int_arm_usad8 rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]> {
let Inst{15-12} = 0b1111;
}
def t2USADA8 : T2FourReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), NoItinerary,
- "usada8", "\t$Rd, $Rn, $Rm, $Ra", []>,
+ "usada8", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (int_arm_usada8 rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>,
Requires<[IsThumb2, HasDSP]>;
// Signed/Unsigned saturate.
+let hasSideEffects = 1 in
class T2SatI<dag iops, string opc, string asm>
: T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, []> {
bits<4> Rd;
@@ -2313,10 +2334,16 @@ def t2USAT16: T2SatI<(ins imm0_15:$sat_imm, rGPR:$Rn),
let Inst{4} = 0;
}
-def : T2Pat<(int_arm_ssat GPR:$a, imm1_32:$pos), (t2SSAT imm1_32:$pos, GPR:$a, 0)>;
-def : T2Pat<(int_arm_usat GPR:$a, imm0_31:$pos), (t2USAT imm0_31:$pos, GPR:$a, 0)>;
def : T2Pat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
(t2SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
+def : T2Pat<(int_arm_ssat GPR:$a, imm1_32:$pos),
+ (t2SSAT imm1_32:$pos, GPR:$a, 0)>;
+def : T2Pat<(int_arm_usat GPR:$a, imm0_31:$pos),
+ (t2USAT imm0_31:$pos, GPR:$a, 0)>;
+def : T2Pat<(int_arm_ssat16 GPR:$a, imm1_16:$pos),
+ (t2SSAT16 imm1_16:$pos, GPR:$a)>;
+def : T2Pat<(int_arm_usat16 GPR:$a, imm0_15:$pos),
+ (t2USAT16 imm0_15:$pos, GPR:$a)>;
//===----------------------------------------------------------------------===//
// Shift and rotate Instructions.
@@ -2689,6 +2716,18 @@ def : Thumb2DSPPat<(mul sext_16_node:$Rn, (sra rGPR:$Rm, (i32 16))),
(t2SMULBT rGPR:$Rn, rGPR:$Rm)>;
def : Thumb2DSPPat<(mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm),
(t2SMULTB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulbb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULBB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulbt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULBT rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smultb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULTB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smultt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULTT rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulwb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULWB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulwt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULWT rGPR:$Rn, rGPR:$Rm)>;
class T2FourRegSMLA<bits<3> op22_20, bits<2> op5_4, string opc,
list<dag> pattern>
@@ -2730,6 +2769,19 @@ def : Thumb2DSPMulPat<(add rGPR:$Ra,
(mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm)),
(t2SMLATB rGPR:$Rn, rGPR:$Rm, rGPR:$Ra)>;
+def : Thumb2DSPPat<(int_arm_smlabb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlabt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlatb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlatt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLATT GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlawb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlawt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLAWT GPR:$a, GPR:$b, GPR:$acc)>;
+
// Halfword multiple accumulate long: SMLAL<x><y>
def t2SMLALBB : T2MlaLong<0b100, 0b1000, "smlalbb">,
Requires<[IsThumb2, HasDSP]>;
@@ -2749,39 +2801,44 @@ def : Thumb2DSPPat<(ARMsmlaltb GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
def : Thumb2DSPPat<(ARMsmlaltt GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
(t2SMLALTT $Rn, $Rm, $RLo, $RHi)>;
-class T2DualHalfMul<bits<3> op22_20, bits<4> op7_4, string opc>
+class T2DualHalfMul<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
: T2ThreeReg_mac<0, op22_20, op7_4,
(outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm),
- IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm", []>,
+ IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]>,
Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]> {
let Inst{15-12} = 0b1111;
}
// Dual halfword multiple: SMUAD, SMUSD, SMLAD, SMLSD, SMLALD, SMLSLD
-def t2SMUAD: T2DualHalfMul<0b010, 0b0000, "smuad">;
-def t2SMUADX: T2DualHalfMul<0b010, 0b0001, "smuadx">;
-def t2SMUSD: T2DualHalfMul<0b100, 0b0000, "smusd">;
-def t2SMUSDX: T2DualHalfMul<0b100, 0b0001, "smusdx">;
+def t2SMUAD: T2DualHalfMul<0b010, 0b0000, "smuad", int_arm_smuad>;
+def t2SMUADX: T2DualHalfMul<0b010, 0b0001, "smuadx", int_arm_smuadx>;
+def t2SMUSD: T2DualHalfMul<0b100, 0b0000, "smusd", int_arm_smusd>;
+def t2SMUSDX: T2DualHalfMul<0b100, 0b0001, "smusdx", int_arm_smusdx>;
-class T2DualHalfMulAdd<bits<3> op22_20, bits<4> op7_4, string opc>
+class T2DualHalfMulAdd<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
: T2FourReg_mac<0, op22_20, op7_4,
(outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra),
- IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm, $Ra", []>,
+ IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>,
Requires<[IsThumb2, HasDSP]>;
-def t2SMLAD : T2DualHalfMulAdd<0b010, 0b0000, "smlad">;
-def t2SMLADX : T2DualHalfMulAdd<0b010, 0b0001, "smladx">;
-def t2SMLSD : T2DualHalfMulAdd<0b100, 0b0000, "smlsd">;
-def t2SMLSDX : T2DualHalfMulAdd<0b100, 0b0001, "smlsdx">;
+def t2SMLAD : T2DualHalfMulAdd<0b010, 0b0000, "smlad", int_arm_smlad>;
+def t2SMLADX : T2DualHalfMulAdd<0b010, 0b0001, "smladx", int_arm_smladx>;
+def t2SMLSD : T2DualHalfMulAdd<0b100, 0b0000, "smlsd", int_arm_smlsd>;
+def t2SMLSDX : T2DualHalfMulAdd<0b100, 0b0001, "smlsdx", int_arm_smlsdx>;
class T2DualHalfMulAddLong<bits<3> op22_20, bits<4> op7_4, string opc>
: T2FourReg_mac<1, op22_20, op7_4,
(outs rGPR:$Ra, rGPR:$Rd),
- (ins rGPR:$Rn, rGPR:$Rm),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
IIC_iMAC64, opc, "\t$Ra, $Rd, $Rn, $Rm", []>,
+ RegConstraint<"$Ra = $RLo, $Rd = $RHi">,
Requires<[IsThumb2, HasDSP]>,
Sched<[WriteMAC64Lo, WriteMAC64Hi, ReadMUL, ReadMUL, ReadMAC, ReadMAC]>;
@@ -2790,6 +2847,15 @@ def t2SMLALDX : T2DualHalfMulAddLong<0b100, 0b1101, "smlaldx">;
def t2SMLSLD : T2DualHalfMulAddLong<0b101, 0b1100, "smlsld">;
def t2SMLSLDX : T2DualHalfMulAddLong<0b101, 0b1101, "smlsldx">;
+def : Thumb2DSPPat<(ARMSmlald rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLALD rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlaldx rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLALDX rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlsld rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLSLD rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlsldx rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLSLDX rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+
//===----------------------------------------------------------------------===//
// Division Instructions.
// Signed and unsigned division on v7-M
@@ -4640,6 +4706,19 @@ def : t2InstSubst<"and${s}${p} $Rd, $Rn, $imm",
def : t2InstSubst<"and${s}${p} $Rdn, $imm",
(t2BICri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
pred:$p, cc_out:$s)>;
+// And ORR <--> ORN
+def : t2InstSubst<"orn${s}${p} $Rd, $Rn, $imm",
+ (t2ORRri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orn${s}${p} $Rdn, $imm",
+ (t2ORRri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orr${s}${p} $Rd, $Rn, $imm",
+ (t2ORNri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orr${s}${p} $Rdn, $imm",
+ (t2ORNri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
// Likewise, "add Rd, t2_so_imm_neg" -> sub
def : t2InstSubst<"add${s}${p} $Rd, $Rn, $imm",
(t2SUBri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm_neg:$imm,
diff --git a/lib/Target/ARM/ARMRegisterBankInfo.cpp b/lib/Target/ARM/ARMRegisterBankInfo.cpp
index 7325817d446b..13a32211f88c 100644
--- a/lib/Target/ARM/ARMRegisterBankInfo.cpp
+++ b/lib/Target/ARM/ARMRegisterBankInfo.cpp
@@ -196,14 +196,14 @@ const RegisterBank &ARMRegisterBankInfo::getRegBankFromRegClass(
llvm_unreachable("Switch should handle all register classes");
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
auto Opc = MI.getOpcode();
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -258,7 +258,7 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
if (Ty.getSizeInBits() != 64 || Ty1.getSizeInBits() != 32 ||
Ty2.getSizeInBits() != 32)
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
OperandsMapping =
getOperandsMapping({&ARM::ValueMappings[ARM::DPR3OpsIdx],
&ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr,
@@ -271,14 +271,14 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
if (Ty.getSizeInBits() != 32 || Ty1.getSizeInBits() != 64 ||
MI.getOperand(2).getImm() % 32 != 0)
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
OperandsMapping = getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx],
&ARM::ValueMappings[ARM::DPR3OpsIdx],
nullptr, nullptr});
break;
}
default:
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
}
#ifndef NDEBUG
@@ -292,6 +292,6 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
#endif
- return InstructionMapping{DefaultMappingID, /*Cost=*/1, OperandsMapping,
- NumOperands};
+ return getInstructionMapping(DefaultMappingID, /*Cost=*/1, OperandsMapping,
+ NumOperands);
}
diff --git a/lib/Target/ARM/ARMRegisterBankInfo.h b/lib/Target/ARM/ARMRegisterBankInfo.h
index 5222c1e6389f..9650b358f319 100644
--- a/lib/Target/ARM/ARMRegisterBankInfo.h
+++ b/lib/Target/ARM/ARMRegisterBankInfo.h
@@ -36,7 +36,8 @@ public:
const RegisterBank &
getRegBankFromRegClass(const TargetRegisterClass &RC) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
index 2b0cd461df7a..4a943187ab6d 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
@@ -38,6 +38,7 @@ const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc
void ARMTargetStreamer::emitCurrentConstantPool() {
ConstantPools->emitForCurrentSection(Streamer);
+ ConstantPools->clearCacheForCurrentSection(Streamer);
}
// finish() - write out any non-empty assembler constant pools.
diff --git a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
index 1f355171ebd3..80357a63a4e1 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
@@ -70,7 +70,7 @@ void BPFAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned Size = Fixup.getKind() == FK_Data_4 ? 4 : 8;
for (unsigned i = 0; i != Size; ++i) {
- unsigned Idx = IsLittleEndian ? i : Size - i;
+ unsigned Idx = IsLittleEndian ? i : Size - i - 1;
Data[Fixup.getOffset() + Idx] = uint8_t(Value >> (i * 8));
}
} else {
diff --git a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
index 3396ddbe4fa6..87c212b6163f 100644
--- a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
+++ b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
@@ -553,7 +553,7 @@ static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo,
using namespace Hexagon;
static const MCPhysReg CtrlRegDecoderTable[] = {
/* 0 */ SA0, LC0, SA1, LC1,
- /* 4 */ P3_0, C5, C6, C7,
+ /* 4 */ P3_0, C5, M0, M1,
/* 8 */ USR, PC, UGP, GP,
/* 12 */ CS0, CS1, UPCYCLELO, UPCYCLEHI,
/* 16 */ FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI,
diff --git a/lib/Target/Hexagon/HexagonDepIICHVX.td b/lib/Target/Hexagon/HexagonDepIICHVX.td
new file mode 100644
index 000000000000..1c1788264c66
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonDepIICHVX.td
@@ -0,0 +1,1143 @@
+//===--- HexagonDepIICHVX.td ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def tc_0317c6ca : InstrItinClass;
+def tc_1b93bdc6 : InstrItinClass;
+def tc_2171ebae : InstrItinClass;
+def tc_28978789 : InstrItinClass;
+def tc_316c637c : InstrItinClass;
+def tc_354299ad : InstrItinClass;
+def tc_35e92f8e : InstrItinClass;
+def tc_38208312 : InstrItinClass;
+def tc_4105d6b5 : InstrItinClass;
+def tc_41f4b64e : InstrItinClass;
+def tc_41f99e1c : InstrItinClass;
+def tc_45453b98 : InstrItinClass;
+def tc_4e2a5159 : InstrItinClass;
+def tc_4fd8566e : InstrItinClass;
+def tc_51cd3aab : InstrItinClass;
+def tc_5a9fc4ec : InstrItinClass;
+def tc_5c120602 : InstrItinClass;
+def tc_5cbf490b : InstrItinClass;
+def tc_644584f8 : InstrItinClass;
+def tc_69b6dd20 : InstrItinClass;
+def tc_6b78cf13 : InstrItinClass;
+def tc_6fd9ad30 : InstrItinClass;
+def tc_71337255 : InstrItinClass;
+def tc_72ad7b54 : InstrItinClass;
+def tc_77a4c701 : InstrItinClass;
+def tc_7c3f55c4 : InstrItinClass;
+def tc_7e9f581b : InstrItinClass;
+def tc_7fa82b08 : InstrItinClass;
+def tc_7fa8b40f : InstrItinClass;
+def tc_85d237e3 : InstrItinClass;
+def tc_8b6a873f : InstrItinClass;
+def tc_908a4c8c : InstrItinClass;
+def tc_9311da3f : InstrItinClass;
+def tc_9777e6bf : InstrItinClass;
+def tc_97c165b9 : InstrItinClass;
+def tc_99093773 : InstrItinClass;
+def tc_9b9642a1 : InstrItinClass;
+def tc_9c267309 : InstrItinClass;
+def tc_a3127e12 : InstrItinClass;
+def tc_a4c9df3b : InstrItinClass;
+def tc_aedb9f9e : InstrItinClass;
+def tc_b06ab583 : InstrItinClass;
+def tc_b712833a : InstrItinClass;
+def tc_b77635b4 : InstrItinClass;
+def tc_bbaf280e : InstrItinClass;
+def tc_bf142ae2 : InstrItinClass;
+def tc_c00bf9c9 : InstrItinClass;
+def tc_c4b515c5 : InstrItinClass;
+def tc_cbf6d1dc : InstrItinClass;
+def tc_cedf314b : InstrItinClass;
+def tc_d2cb81ea : InstrItinClass;
+def tc_d5090f3e : InstrItinClass;
+def tc_d642eff3 : InstrItinClass;
+def tc_d725e5b0 : InstrItinClass;
+def tc_d7bea0ec : InstrItinClass;
+def tc_d98f4d63 : InstrItinClass;
+def tc_da979fb3 : InstrItinClass;
+def tc_db5b9e2f : InstrItinClass;
+def tc_e172d86a : InstrItinClass;
+def tc_e231aa4f : InstrItinClass;
+def tc_e3748cdf : InstrItinClass;
+def tc_e5053c8f : InstrItinClass;
+def tc_e6299d16 : InstrItinClass;
+def tc_eb669007 : InstrItinClass;
+def tc_eda67dcd : InstrItinClass;
+def tc_f3fc3f83 : InstrItinClass;
+
+class DepHVXItinV55 {
+ list<InstrItinData> DepHVXItinV55_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
+
+class DepHVXItinV60 {
+ list<InstrItinData> DepHVXItinV60_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
+
+class DepHVXItinV62 {
+ list<InstrItinData> DepHVXItinV62_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
diff --git a/lib/Target/Hexagon/HexagonDepIICScalar.td b/lib/Target/Hexagon/HexagonDepIICScalar.td
new file mode 100644
index 000000000000..261778bda724
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonDepIICScalar.td
@@ -0,0 +1,2504 @@
+//===--- HexagonDepIICScalar.td -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def tc_049dfb74 : InstrItinClass;
+def tc_0767081f : InstrItinClass;
+def tc_07ac815d : InstrItinClass;
+def tc_090485bb : InstrItinClass;
+def tc_09c86199 : InstrItinClass;
+def tc_09faec3b : InstrItinClass;
+def tc_0cb867f2 : InstrItinClass;
+def tc_1000eb10 : InstrItinClass;
+def tc_128719e8 : InstrItinClass;
+def tc_136c4786 : InstrItinClass;
+def tc_14da557c : InstrItinClass;
+def tc_1b6011fb : InstrItinClass;
+def tc_1b834fe7 : InstrItinClass;
+def tc_1e062b18 : InstrItinClass;
+def tc_1e69aa99 : InstrItinClass;
+def tc_1f9668cc : InstrItinClass;
+def tc_1fe8323c : InstrItinClass;
+def tc_20a8e109 : InstrItinClass;
+def tc_210b2456 : InstrItinClass;
+def tc_251c87b2 : InstrItinClass;
+def tc_261d9b78 : InstrItinClass;
+def tc_28d296df : InstrItinClass;
+def tc_29c14515 : InstrItinClass;
+def tc_2aaab1e0 : InstrItinClass;
+def tc_2c8fe5ae : InstrItinClass;
+def tc_2d1e6f5c : InstrItinClass;
+def tc_2e55aa16 : InstrItinClass;
+def tc_30665cb0 : InstrItinClass;
+def tc_336e698c : InstrItinClass;
+def tc_34e882a4 : InstrItinClass;
+def tc_35fb9d13 : InstrItinClass;
+def tc_37326008 : InstrItinClass;
+def tc_3993c58b : InstrItinClass;
+def tc_3b4892c6 : InstrItinClass;
+def tc_3bea1824 : InstrItinClass;
+def tc_3c10f809 : InstrItinClass;
+def tc_3d905451 : InstrItinClass;
+def tc_3e61d314 : InstrItinClass;
+def tc_3eab77bd : InstrItinClass;
+def tc_43068634 : InstrItinClass;
+def tc_45631a8d : InstrItinClass;
+def tc_47ab9233 : InstrItinClass;
+def tc_47f0b7ad : InstrItinClass;
+def tc_485bb57c : InstrItinClass;
+def tc_4997da4a : InstrItinClass;
+def tc_511f28f6 : InstrItinClass;
+def tc_537e2013 : InstrItinClass;
+def tc_53ee6546 : InstrItinClass;
+def tc_548f402d : InstrItinClass;
+def tc_5625c6c1 : InstrItinClass;
+def tc_580a779c : InstrItinClass;
+def tc_583510c7 : InstrItinClass;
+def tc_5d806107 : InstrItinClass;
+def tc_5fa2857c : InstrItinClass;
+def tc_5fe9fcd0 : InstrItinClass;
+def tc_6264c5e0 : InstrItinClass;
+def tc_639d93ee : InstrItinClass;
+def tc_63cd9d2d : InstrItinClass;
+def tc_65dc7cc4 : InstrItinClass;
+def tc_69bb508b : InstrItinClass;
+def tc_6c52d277 : InstrItinClass;
+def tc_6c576d46 : InstrItinClass;
+def tc_70cabf66 : InstrItinClass;
+def tc_7639d4b0 : InstrItinClass;
+def tc_7675c0e9 : InstrItinClass;
+def tc_76c4c5ef : InstrItinClass;
+def tc_77781686 : InstrItinClass;
+def tc_78b3c689 : InstrItinClass;
+def tc_7986ba30 : InstrItinClass;
+def tc_7bc567a7 : InstrItinClass;
+def tc_7c2dcd4d : InstrItinClass;
+def tc_7ca2ea10 : InstrItinClass;
+def tc_7d01cbdc : InstrItinClass;
+def tc_7d9a56cd : InstrItinClass;
+def tc_81a23d44 : InstrItinClass;
+def tc_821c4233 : InstrItinClass;
+def tc_82f0f122 : InstrItinClass;
+def tc_84630363 : InstrItinClass;
+def tc_86442910 : InstrItinClass;
+def tc_87601822 : InstrItinClass;
+def tc_88fa2da6 : InstrItinClass;
+def tc_8c8041e6 : InstrItinClass;
+def tc_8cb685d9 : InstrItinClass;
+def tc_8def9c57 : InstrItinClass;
+def tc_8f0a6bad : InstrItinClass;
+def tc_8fab9ac3 : InstrItinClass;
+def tc_92d1833c : InstrItinClass;
+def tc_94e6ffd9 : InstrItinClass;
+def tc_95c54f8b : InstrItinClass;
+def tc_9a13af9d : InstrItinClass;
+def tc_9b73d261 : InstrItinClass;
+def tc_9c18c9a5 : InstrItinClass;
+def tc_9c68db63 : InstrItinClass;
+def tc_9ce7a5ab : InstrItinClass;
+def tc_9da3628f : InstrItinClass;
+def tc_9dafb7d3 : InstrItinClass;
+def tc_9df8b0dc : InstrItinClass;
+def tc_9e86015f : InstrItinClass;
+def tc_9f518242 : InstrItinClass;
+def tc_a12a5971 : InstrItinClass;
+def tc_a1fb80e1 : InstrItinClass;
+def tc_a333d2a9 : InstrItinClass;
+def tc_a4567c39 : InstrItinClass;
+def tc_a87879e8 : InstrItinClass;
+def tc_a9c993d9 : InstrItinClass;
+def tc_aad55963 : InstrItinClass;
+def tc_ab1b5e74 : InstrItinClass;
+def tc_ae0722f7 : InstrItinClass;
+def tc_ae2c2dc2 : InstrItinClass;
+def tc_ae762521 : InstrItinClass;
+def tc_b08b653e : InstrItinClass;
+def tc_b08be45e : InstrItinClass;
+def tc_b0f50e3c : InstrItinClass;
+def tc_b189ad4c : InstrItinClass;
+def tc_b324366f : InstrItinClass;
+def tc_b5bfaa60 : InstrItinClass;
+def tc_b5f5a094 : InstrItinClass;
+def tc_b86c7e8b : InstrItinClass;
+def tc_baccf077 : InstrItinClass;
+def tc_bc5561d8 : InstrItinClass;
+def tc_bcf0e36e : InstrItinClass;
+def tc_bd16579e : InstrItinClass;
+def tc_be995eaf : InstrItinClass;
+def tc_bf6fa601 : InstrItinClass;
+def tc_c0cd91a8 : InstrItinClass;
+def tc_c14739d5 : InstrItinClass;
+def tc_c1dbc916 : InstrItinClass;
+def tc_c58f771a : InstrItinClass;
+def tc_c85212ca : InstrItinClass;
+def tc_c8f9a6f6 : InstrItinClass;
+def tc_ca280e8b : InstrItinClass;
+def tc_cbe45117 : InstrItinClass;
+def tc_cd321066 : InstrItinClass;
+def tc_d108a090 : InstrItinClass;
+def tc_d1b5a4b6 : InstrItinClass;
+def tc_d2609065 : InstrItinClass;
+def tc_d267fa19 : InstrItinClass;
+def tc_d2a33af5 : InstrItinClass;
+def tc_d63b71d1 : InstrItinClass;
+def tc_d6a805a8 : InstrItinClass;
+def tc_d95f4e98 : InstrItinClass;
+def tc_da79106e : InstrItinClass;
+def tc_dbe218dd : InstrItinClass;
+def tc_dcfee7ae : InstrItinClass;
+def tc_e17ce9ad : InstrItinClass;
+def tc_e2480a7f : InstrItinClass;
+def tc_e2c08bb4 : InstrItinClass;
+def tc_e2c31426 : InstrItinClass;
+def tc_e578178f : InstrItinClass;
+def tc_e836c161 : InstrItinClass;
+def tc_e8c7a357 : InstrItinClass;
+def tc_eb07ef6f : InstrItinClass;
+def tc_ecfaae86 : InstrItinClass;
+def tc_ef0ebaaa : InstrItinClass;
+def tc_ef2676fd : InstrItinClass;
+def tc_f027ebe9 : InstrItinClass;
+def tc_f055fbb6 : InstrItinClass;
+def tc_f1240c08 : InstrItinClass;
+def tc_f16d5b17 : InstrItinClass;
+def tc_f1aa2cdb : InstrItinClass;
+def tc_f26aa619 : InstrItinClass;
+def tc_f4608adc : InstrItinClass;
+def tc_faab1248 : InstrItinClass;
+def tc_fcee8723 : InstrItinClass;
+def tc_feb4974b : InstrItinClass;
+
+class DepScalarItinV4 {
+ list<InstrItinData> DepScalarItinV4_list = [
+ InstrItinData <tc_049dfb74, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_0767081f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_07ac815d, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_090485bb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09c86199, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09faec3b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_0cb867f2, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_1000eb10, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_128719e8, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_136c4786, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_14da557c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1b6011fb, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1b834fe7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e062b18, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e69aa99, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1f9668cc, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_1fe8323c, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_20a8e109, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_210b2456, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_251c87b2, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_261d9b78, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_28d296df, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_29c14515, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2aaab1e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2c8fe5ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2d1e6f5c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2e55aa16, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_30665cb0, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_336e698c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_34e882a4, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_35fb9d13, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_37326008, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3993c58b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3b4892c6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_3bea1824, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3c10f809, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3d905451, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_3e61d314, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3eab77bd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_43068634, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_45631a8d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_47ab9233, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_47f0b7ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_485bb57c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_4997da4a, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_511f28f6, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_537e2013, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_53ee6546, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_548f402d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5625c6c1, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_580a779c, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_583510c7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5d806107, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fa2857c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fe9fcd0, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6264c5e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_639d93ee, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_63cd9d2d, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_65dc7cc4, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_69bb508b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6c52d277, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_6c576d46, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_70cabf66, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7639d4b0, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7675c0e9, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_76c4c5ef, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_77781686, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_78b3c689, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7986ba30, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7bc567a7, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7c2dcd4d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7ca2ea10, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7d01cbdc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7d9a56cd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_81a23d44, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_821c4233, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_82f0f122, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_84630363, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_86442910, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_87601822, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_88fa2da6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8c8041e6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8cb685d9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8def9c57, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_8f0a6bad, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_8fab9ac3, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_92d1833c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_94e6ffd9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_95c54f8b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9a13af9d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9b73d261, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9c18c9a5, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9c68db63, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9ce7a5ab, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9da3628f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9dafb7d3, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9df8b0dc, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9e86015f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9f518242, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a12a5971, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a1fb80e1, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_a333d2a9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a4567c39, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_a87879e8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a9c993d9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_aad55963, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ab1b5e74, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae0722f7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae2c2dc2, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae762521, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b08b653e, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b08be45e, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b0f50e3c, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b189ad4c, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b324366f, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_b5bfaa60, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b5f5a094, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b86c7e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_baccf077, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_bc5561d8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_bcf0e36e, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_bd16579e, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_be995eaf, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_bf6fa601, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c0cd91a8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c14739d5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c1dbc916, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c58f771a, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c85212ca, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c8f9a6f6, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ca280e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_cbe45117, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_cd321066, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d108a090, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d1b5a4b6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d2609065, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d267fa19, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_d2a33af5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d63b71d1, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d6a805a8, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_d95f4e98, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_da79106e, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dbe218dd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dcfee7ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e17ce9ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2480a7f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e2c08bb4, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2c31426, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e578178f, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_e836c161, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e8c7a357, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_eb07ef6f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ecfaae86, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_ef0ebaaa, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ef2676fd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f027ebe9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f055fbb6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_f1240c08, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f16d5b17, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f1aa2cdb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f26aa619, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_f4608adc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_faab1248, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_fcee8723, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_feb4974b, [InstrStage<1, [SLOT3]>]> ];
+}
+
+class DepScalarItinV5 {
+ list<InstrItinData> DepScalarItinV5_list = [
+ InstrItinData <tc_049dfb74, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_0767081f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_07ac815d, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_090485bb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09c86199, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09faec3b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_0cb867f2, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_1000eb10, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_128719e8, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_136c4786, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_14da557c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1b6011fb, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1b834fe7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e062b18, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e69aa99, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1f9668cc, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_1fe8323c, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_20a8e109, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_210b2456, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_251c87b2, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_261d9b78, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_28d296df, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_29c14515, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2aaab1e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2c8fe5ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2d1e6f5c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2e55aa16, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_30665cb0, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_336e698c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_34e882a4, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_35fb9d13, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_37326008, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3993c58b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3b4892c6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_3bea1824, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3c10f809, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3d905451, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_3e61d314, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3eab77bd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_43068634, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_45631a8d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_47ab9233, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_47f0b7ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_485bb57c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_4997da4a, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_511f28f6, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_537e2013, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_53ee6546, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_548f402d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5625c6c1, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_580a779c, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_583510c7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5d806107, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fa2857c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fe9fcd0, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6264c5e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_639d93ee, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_63cd9d2d, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_65dc7cc4, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_69bb508b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6c52d277, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_6c576d46, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_70cabf66, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7639d4b0, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7675c0e9, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_76c4c5ef, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_77781686, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_78b3c689, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7986ba30, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7bc567a7, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7c2dcd4d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7ca2ea10, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7d01cbdc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7d9a56cd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_81a23d44, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_821c4233, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_82f0f122, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_84630363, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_86442910, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_87601822, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_88fa2da6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8c8041e6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8cb685d9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8def9c57, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_8f0a6bad, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_8fab9ac3, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_92d1833c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_94e6ffd9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_95c54f8b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9a13af9d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9b73d261, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9c18c9a5, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9c68db63, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9ce7a5ab, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9da3628f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9dafb7d3, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9df8b0dc, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9e86015f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9f518242, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a12a5971, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a1fb80e1, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_a333d2a9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a4567c39, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_a87879e8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a9c993d9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_aad55963, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ab1b5e74, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae0722f7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae2c2dc2, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae762521, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b08b653e, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b08be45e, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b0f50e3c, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b189ad4c, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b324366f, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_b5bfaa60, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b5f5a094, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b86c7e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_baccf077, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_bc5561d8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_bcf0e36e, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_bd16579e, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_be995eaf, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_bf6fa601, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c0cd91a8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c14739d5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c1dbc916, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c58f771a, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c85212ca, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c8f9a6f6, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ca280e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_cbe45117, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_cd321066, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d108a090, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d1b5a4b6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d2609065, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d267fa19, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_d2a33af5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d63b71d1, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d6a805a8, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_d95f4e98, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_da79106e, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dbe218dd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dcfee7ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e17ce9ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2480a7f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e2c08bb4, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2c31426, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e578178f, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_e836c161, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e8c7a357, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_eb07ef6f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ecfaae86, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_ef0ebaaa, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ef2676fd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f027ebe9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f055fbb6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_f1240c08, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f16d5b17, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f1aa2cdb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f26aa619, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_f4608adc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_faab1248, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_fcee8723, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_feb4974b, [InstrStage<1, [SLOT3]>]> ];
+}
+
+class DepScalarItinV55 {
+ list<InstrItinData> DepScalarItinV55_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 3, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
+
+class DepScalarItinV60 {
+ list<InstrItinData> DepScalarItinV60_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [2, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_3stall*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
+
+class DepScalarItinV62 {
+ list<InstrItinData> DepScalarItinV62_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [2, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_3*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_3stall*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_3*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
diff --git a/lib/Target/Hexagon/HexagonDepITypes.h b/lib/Target/Hexagon/HexagonDepITypes.h
index 331edaf5831d..be831b9501ea 100644
--- a/lib/Target/Hexagon/HexagonDepITypes.h
+++ b/lib/Target/Hexagon/HexagonDepITypes.h
@@ -15,38 +15,38 @@ enum Type {
TypeALU32_ADDI = 2,
TypeALU64 = 3,
TypeCJ = 4,
- TypeCOPROC_VMEM = 5,
- TypeCR = 7,
+ TypeCR = 6,
TypeCVI_HIST = 10,
TypeCVI_VA = 16,
TypeCVI_VA_DV = 17,
TypeCVI_VINLANESAT = 18,
- TypeCVI_VM_LD = 20,
- TypeCVI_VM_NEW_ST = 21,
- TypeCVI_VM_ST = 22,
- TypeCVI_VM_STU = 23,
- TypeCVI_VM_TMP_LD = 24,
- TypeCVI_VM_VP_LDU = 25,
- TypeCVI_VP = 26,
- TypeCVI_VP_VS = 27,
- TypeCVI_VS = 28,
- TypeCVI_VX = 30,
- TypeCVI_VX_DV = 31,
- TypeDUPLEX = 32,
- TypeENDLOOP = 33,
- TypeEXTENDER = 34,
- TypeJ = 35,
- TypeLD = 36,
- TypeM = 37,
- TypeMAPPING = 38,
- TypeNCJ = 39,
- TypePSEUDO = 40,
- TypeST = 41,
- TypeSUBINSN = 42,
- TypeS_2op = 43,
- TypeS_3op = 44,
- TypeV2LDST = 47,
- TypeV4LDST = 48
+ TypeCVI_VM_LD = 19,
+ TypeCVI_VM_NEW_ST = 20,
+ TypeCVI_VM_ST = 21,
+ TypeCVI_VM_STU = 22,
+ TypeCVI_VM_TMP_LD = 23,
+ TypeCVI_VM_VP_LDU = 24,
+ TypeCVI_VP = 25,
+ TypeCVI_VP_VS = 26,
+ TypeCVI_VS = 27,
+ TypeCVI_VX = 29,
+ TypeCVI_VX_DV = 30,
+ TypeCVI_VX_LATE = 31,
+ TypeDUPLEX = 33,
+ TypeENDLOOP = 34,
+ TypeEXTENDER = 35,
+ TypeJ = 36,
+ TypeLD = 37,
+ TypeM = 38,
+ TypeMAPPING = 39,
+ TypeNCJ = 40,
+ TypePSEUDO = 41,
+ TypeST = 42,
+ TypeSUBINSN = 43,
+ TypeS_2op = 44,
+ TypeS_3op = 45,
+ TypeV2LDST = 48,
+ TypeV4LDST = 49
};
}
}
diff --git a/lib/Target/Hexagon/HexagonDepITypes.td b/lib/Target/Hexagon/HexagonDepITypes.td
index b35f7ba6d2ab..ac1989e4dd82 100644
--- a/lib/Target/Hexagon/HexagonDepITypes.td
+++ b/lib/Target/Hexagon/HexagonDepITypes.td
@@ -13,35 +13,35 @@ def TypeALU32_3op : IType<1>;
def TypeALU32_ADDI : IType<2>;
def TypeALU64 : IType<3>;
def TypeCJ : IType<4>;
-def TypeCOPROC_VMEM : IType<5>;
-def TypeCR : IType<7>;
+def TypeCR : IType<6>;
def TypeCVI_HIST : IType<10>;
def TypeCVI_VA : IType<16>;
def TypeCVI_VA_DV : IType<17>;
def TypeCVI_VINLANESAT : IType<18>;
-def TypeCVI_VM_LD : IType<20>;
-def TypeCVI_VM_NEW_ST : IType<21>;
-def TypeCVI_VM_ST : IType<22>;
-def TypeCVI_VM_STU : IType<23>;
-def TypeCVI_VM_TMP_LD : IType<24>;
-def TypeCVI_VM_VP_LDU : IType<25>;
-def TypeCVI_VP : IType<26>;
-def TypeCVI_VP_VS : IType<27>;
-def TypeCVI_VS : IType<28>;
-def TypeCVI_VX : IType<30>;
-def TypeCVI_VX_DV : IType<31>;
-def TypeDUPLEX : IType<32>;
-def TypeENDLOOP : IType<33>;
-def TypeEXTENDER : IType<34>;
-def TypeJ : IType<35>;
-def TypeLD : IType<36>;
-def TypeM : IType<37>;
-def TypeMAPPING : IType<38>;
-def TypeNCJ : IType<39>;
-def TypePSEUDO : IType<40>;
-def TypeST : IType<41>;
-def TypeSUBINSN : IType<42>;
-def TypeS_2op : IType<43>;
-def TypeS_3op : IType<44>;
-def TypeV2LDST : IType<47>;
-def TypeV4LDST : IType<48>;
+def TypeCVI_VM_LD : IType<19>;
+def TypeCVI_VM_NEW_ST : IType<20>;
+def TypeCVI_VM_ST : IType<21>;
+def TypeCVI_VM_STU : IType<22>;
+def TypeCVI_VM_TMP_LD : IType<23>;
+def TypeCVI_VM_VP_LDU : IType<24>;
+def TypeCVI_VP : IType<25>;
+def TypeCVI_VP_VS : IType<26>;
+def TypeCVI_VS : IType<27>;
+def TypeCVI_VX : IType<29>;
+def TypeCVI_VX_DV : IType<30>;
+def TypeCVI_VX_LATE : IType<31>;
+def TypeDUPLEX : IType<33>;
+def TypeENDLOOP : IType<34>;
+def TypeEXTENDER : IType<35>;
+def TypeJ : IType<36>;
+def TypeLD : IType<37>;
+def TypeM : IType<38>;
+def TypeMAPPING : IType<39>;
+def TypeNCJ : IType<40>;
+def TypePSEUDO : IType<41>;
+def TypeST : IType<42>;
+def TypeSUBINSN : IType<43>;
+def TypeS_2op : IType<44>;
+def TypeS_3op : IType<45>;
+def TypeV2LDST : IType<48>;
+def TypeV4LDST : IType<49>;
diff --git a/lib/Target/Hexagon/HexagonDepInstrFormats.td b/lib/Target/Hexagon/HexagonDepInstrFormats.td
index d7a99f48803b..1b24be477158 100644
--- a/lib/Target/Hexagon/HexagonDepInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonDepInstrFormats.td
@@ -7,233 +7,140 @@
//
//===----------------------------------------------------------------------===//
-class Enc_12122225 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <3> Qd8;
- let Inst{2-0} = Qd8{2-0};
-}
-class Enc_16626097 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_13397056 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_7315939 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{24-22} = n1{4-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_15275738 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-5} = Ii{9-1};
+class Enc_890909 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12822813 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
bits <2> Pe4;
let Inst{6-5} = Pe4{1-0};
}
-class Enc_10282127 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
+class Enc_527412 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_efaed8 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{8-8} = Ii{0-0};
+}
+class Enc_a568d4 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_14264243 : OpcodeHexagon {
+class Enc_27b757 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+}
+class Enc_5de85f : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{11-8} = Rt16{3-0};
-}
-class Enc_6778937 : OpcodeHexagon {
- bits <5> Rxx32;
- let Inst{20-16} = Rxx32{4-0};
- bits <0> sgp10;
-}
-class Enc_5480539 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
}
-class Enc_11422009 : OpcodeHexagon {
+class Enc_0e41fa : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_16357011 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{8-4} = Vv32{4-0};
- bits <5> Vt32;
- let Inst{13-9} = Vt32{4-0};
- bits <4> Vdd16;
- let Inst{3-0} = Vdd16{3-0};
-}
-class Enc_4975051 : OpcodeHexagon {
- bits <19> Ii;
- let Inst{26-25} = Ii{18-17};
- let Inst{20-16} = Ii{16-12};
- let Inst{13-5} = Ii{11-3};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_14786238 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_15472748 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_6773159 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_12535811 : OpcodeHexagon {
+class Enc_802dc0 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{8-8} = Ii{0-0};
bits <2> Qv4;
let Inst{23-22} = Qv4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
}
-class Enc_14007201 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <8> II;
- let Inst{22-16} = II{7-1};
- let Inst{13-13} = II{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_6b197f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2577026 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{2-0} = Qt8{2-0};
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_1f5d8f : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_7305764 : OpcodeHexagon {
- bits <5> II;
- let Inst{12-8} = II{4-0};
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
+class Enc_51436c : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{23-22} = Ii{15-14};
+ let Inst{13-0} = Ii{13-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_11682941 : OpcodeHexagon {
- bits <19> Ii;
- let Inst{26-25} = Ii{18-17};
- let Inst{20-16} = Ii{16-12};
- let Inst{13-13} = Ii{11-11};
- let Inst{7-0} = Ii{10-3};
+class Enc_c7a204 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_16376009 : OpcodeHexagon {
+class Enc_db40cd : OpcodeHexagon {
bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{6-3} = Ii{5-2};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13249928 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{13-5} = Ii{8-0};
+class Enc_a1e29d : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> II;
+ let Inst{22-21} = II{4-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_1971351 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
+class Enc_d15d19 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13715847 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_e90a15 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{22-22} = n1{0-0};
}
-class Enc_13303422 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
+class Enc_e0a47a : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
bits <5> Rd32;
@@ -241,29 +148,32 @@ class Enc_13303422 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14574598 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_140c83 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_13094118 : OpcodeHexagon {
- bits <5> Css32;
- let Inst{20-16} = Css32{4-0};
+class Enc_7eee72 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4231995 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_d7dc10 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_844699 : OpcodeHexagon {
+class Enc_736575 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
@@ -271,74 +181,87 @@ class Enc_844699 : OpcodeHexagon {
let Inst{19-16} = Rs16{3-0};
bits <4> n1;
let Inst{28-28} = n1{3-3};
- let Inst{24-22} = n1{2-0};
+ let Inst{25-23} = n1{2-0};
}
-class Enc_8752140 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
+class Enc_8dec2e : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_eaa9f8 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <2> Qx4;
+ let Inst{1-0} = Qx4{1-0};
+}
+class Enc_509701 : OpcodeHexagon {
+ bits <19> Ii;
+ let Inst{26-25} = Ii{18-17};
+ let Inst{20-16} = Ii{16-12};
+ let Inst{13-5} = Ii{11-3};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_7978128 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{8-8} = Ii{0-0};
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
+class Enc_830e5d : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <8> II;
+ let Inst{22-16} = II{7-1};
+ let Inst{13-13} = II{0-0};
+ bits <2> Pu4;
+ let Inst{24-23} = Pu4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_10492541 : OpcodeHexagon {
+class Enc_79b8c8 : OpcodeHexagon {
bits <6> Ii;
let Inst{6-3} = Ii{5-2};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_0 : OpcodeHexagon {
-}
-class Enc_15733946 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_738356 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
+class Enc_58a8bf : OpcodeHexagon {
+ bits <3> Ii;
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14400220 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{9-5} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+class Enc_041d7b : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-23} = n1{3-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_15194851 : OpcodeHexagon {
+class Enc_f44229 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{13-13} = Ii{6-6};
+ let Inst{7-3} = Ii{5-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_14172170 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{5-5} = Ii{0-0};
+class Enc_aad80c : OpcodeHexagon {
bits <5> Vuu32;
let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
@@ -346,413 +269,269 @@ class Enc_14172170 : OpcodeHexagon {
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_10065510 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
+class Enc_87c142 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-4} = Ii{6-2};
+ bits <4> Rt16;
+ let Inst{3-0} = Rt16{3-0};
+}
+class Enc_86a14b : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{7-3} = Ii{7-3};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
+}
+class Enc_9a33d5 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14998517 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <3> n1;
- let Inst{29-29} = n1{2-2};
- let Inst{26-25} = n1{1-0};
+class Enc_a56825 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_16657398 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+class Enc_9ea4cf : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_14620934 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_ee5ed0 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+ bits <2> n1;
+ let Inst{9-8} = n1{1-0};
}
-class Enc_10075393 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_935d9b : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
-}
-class Enc_8638014 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13261538 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+class Enc_61f0b0 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_8990840 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-5} = Ii{10-2};
+class Enc_bd6011 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_5974204 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_65d691 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_4711514 : OpcodeHexagon {
- bits <2> Qu4;
- let Inst{9-8} = Qu4{1-0};
+class Enc_e8c45e : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{13-13} = Ii{6-6};
+ let Inst{7-3} = Ii{5-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_11492529 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
+class Enc_ca3887 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_9277990 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_a94f3b : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_6690615 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-4} = Ii{6-2};
+class Enc_625deb : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{10-8} = Ii{3-1};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
bits <4> Rt16;
let Inst{3-0} = Rt16{3-0};
}
-class Enc_1220199 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_7785569 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-22} = n1{4-1};
- let Inst{8-8} = n1{0-0};
+class Enc_1f5ba6 : OpcodeHexagon {
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2880796 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
+class Enc_cd82bc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{21-21} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <6> II;
+ let Inst{13-8} = II{5-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_6858527 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{4-0} = Vv32{4-0};
-}
-class Enc_11863656 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_151014 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <2> Px4;
- let Inst{6-5} = Px4{1-0};
-}
-class Enc_10333841 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_399e12 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
}
-class Enc_14044877 : OpcodeHexagon {
+class Enc_d7a65e : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-13} = Ii{5-5};
- let Inst{7-3} = Ii{4-0};
+ let Inst{12-7} = Ii{5-0};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_13691337 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <2> Qx4;
- let Inst{6-5} = Qx4{1-0};
}
-class Enc_3817033 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <3> Qt8;
- let Inst{10-8} = Qt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_607661 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3540372 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_6a5972 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> Rt16;
+ let Inst{11-8} = Rt16{3-0};
}
-class Enc_5200852 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_53dca9 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{11-8} = Ii{5-2};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_15949334 : OpcodeHexagon {
+class Enc_27fd0e : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_3831744 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_8280533 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_10969213 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_3974695 : OpcodeHexagon {
+class Enc_93af4c : OpcodeHexagon {
bits <7> Ii;
let Inst{10-4} = Ii{6-0};
bits <4> Rx16;
let Inst{3-0} = Rx16{3-0};
}
-class Enc_7255914 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_5bdd42 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-5} = Ii{6-3};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7212930 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_71f1b4 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_12781442 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
-}
-class Enc_799555 : OpcodeHexagon {
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_11083408 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_900013 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_9487067 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{19-16} = Ii{11-8};
- let Inst{12-5} = Ii{7-0};
- bits <2> Pu4;
- let Inst{22-21} = Pu4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_16014536 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_12419313 : OpcodeHexagon {
+class Enc_14640c : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{24-23} = n1{2-1};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-22} = n1{3-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_5503430 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_14767681 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_9093094 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <8> II;
- let Inst{22-16} = II{7-1};
- let Inst{13-13} = II{0-0};
- bits <2> Pu4;
- let Inst{24-23} = Pu4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_11542684 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{27-21} = Ii{15-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_8877260 : OpcodeHexagon {
+class Enc_31db33 : OpcodeHexagon {
+ bits <2> Qt4;
+ let Inst{6-5} = Qt4{1-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_1737833 : OpcodeHexagon {
+class Enc_65f095 : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-13} = Ii{5-5};
- let Inst{7-3} = Ii{4-0};
+ let Inst{6-3} = Ii{5-2};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_255516 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_784502 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_10721363 : OpcodeHexagon {
+class Enc_6413b6 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{23-23} = n1{1-1};
+ let Inst{13-13} = n1{0-0};
+}
+class Enc_7a0ea6 : OpcodeHexagon {
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+ bits <1> n1;
+ let Inst{9-9} = n1{0-0};
+}
+class Enc_84bff1 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -760,90 +539,138 @@ class Enc_10721363 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_7076358 : OpcodeHexagon {
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
+class Enc_74aef2 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_11930928 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_78e566 : OpcodeHexagon {
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_2410156 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
+class Enc_437f33 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_6735062 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
+class Enc_0527db : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rx16;
+ let Inst{3-0} = Rx16{3-0};
+}
+class Enc_420cf3 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{12-8} = Rd32{4-0};
}
-class Enc_7965855 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_e39bb2 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{9-4} = Ii{5-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_5202340 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vyy32;
- let Inst{4-0} = Vyy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_1b64fb : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_10568534 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <2> Pu4;
- let Inst{22-21} = Pu4{1-0};
+class Enc_c6220b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Ru32;
+ let Inst{12-8} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{2-0} = Nt8{2-0};
}
-class Enc_16730127 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_322e1b : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
let Inst{7-5} = Ii{2-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_11224149 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{13-13} = Ii{7-7};
- let Inst{7-3} = Ii{6-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ bits <6> II;
+ let Inst{23-23} = II{5-5};
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{12-8} = Rd32{4-0};
+}
+class Enc_989021 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vy32;
+ let Inst{12-8} = Vy32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
+}
+class Enc_178717 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-23} = n1{4-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
+}
+class Enc_78cbf0 : OpcodeHexagon {
+ bits <18> Ii;
+ let Inst{26-25} = Ii{17-16};
+ let Inst{20-16} = Ii{15-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_9772987 : OpcodeHexagon {
+class Enc_052c7d : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_fcf7a7 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_55355c : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -854,342 +681,259 @@ class Enc_9772987 : OpcodeHexagon {
bits <5> Rtt32;
let Inst{4-0} = Rtt32{4-0};
}
-class Enc_9238139 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_2082775 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_5790679 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{12-8} = Ii{8-4};
- let Inst{4-3} = Ii{3-2};
+class Enc_211aaa : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_9305257 : OpcodeHexagon {
- bits <5> Zu8;
- let Inst{12-8} = Zu8{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_3735566 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_12654528 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
+class Enc_6185fe : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{4-0} = Vvv32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_15290236 : OpcodeHexagon {
+class Enc_cd4705 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_11139981 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_2ebe3b : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_3d5b28 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_15546666 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-8} = Ii{8-6};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_5ab2be : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_486163 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
+class Enc_fef969 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{5-5} = Ii{0-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_2079016 : OpcodeHexagon {
+class Enc_63eaeb : OpcodeHexagon {
bits <2> Ii;
let Inst{1-0} = Ii{1-0};
bits <4> Rs16;
let Inst{7-4} = Rs16{3-0};
}
-class Enc_10095813 : OpcodeHexagon {
+class Enc_95441f : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_13133322 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_9422954 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_10642833 : OpcodeHexagon {
+class Enc_372c9d : OpcodeHexagon {
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14989332 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+class Enc_4dff07 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{4-0} = Vv32{4-0};
-}
-class Enc_10263630 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13937564 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_04c959 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_7171569 : OpcodeHexagon {
+class Enc_b62ef7 : OpcodeHexagon {
bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ let Inst{10-8} = Ii{2-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2702036 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_2b518f : OpcodeHexagon {
+ bits <32> Ii;
+ let Inst{27-16} = Ii{31-20};
+ let Inst{13-0} = Ii{19-6};
}
-class Enc_1928953 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
+class Enc_b388cf : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> II;
+ let Inst{22-21} = II{4-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_5853469 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_ad1c74 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+}
+class Enc_74d4e5 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_7692963 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_c90aca : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_15140689 : OpcodeHexagon {
+class Enc_222336 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_5e87ce : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{23-22} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_f7ea77 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_748676 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-0} = Ii{8-1};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
-}
-class Enc_3372766 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_7900405 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_245865 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_11930027 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-5} = Ii{9-1};
+class Enc_88d4d9 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{9-8} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
}
-class Enc_971574 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <6> II;
- let Inst{23-23} = II{5-5};
- let Inst{4-0} = II{4-0};
+class Enc_c0cdde : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
-}
-class Enc_13453446 : OpcodeHexagon {
- bits <24> Ii;
- let Inst{24-16} = Ii{23-15};
- let Inst{13-1} = Ii{14-2};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_6356866 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
+class Enc_226535 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_16246706 : OpcodeHexagon {
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_5326450 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_31aa6a : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_11687333 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_2771456 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_11282123 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
+class Enc_397f23 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{13-13} = Ii{7-7};
+ let Inst{7-3} = Ii{6-2};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
-}
-class Enc_518319 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{5-5} = Ii{0-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
}
-class Enc_16104442 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_7912540 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
-}
-class Enc_15560488 : OpcodeHexagon {
+class Enc_865390 : OpcodeHexagon {
bits <3> Ii;
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7581852 : OpcodeHexagon {
+class Enc_98c0b8 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
@@ -1197,144 +941,139 @@ class Enc_7581852 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_10030031 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_3915770 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_bfbf03 : OpcodeHexagon {
+ bits <2> Qs4;
+ let Inst{9-8} = Qs4{1-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_4075554 : OpcodeHexagon {
+class Enc_ecbcc8 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+}
+class Enc_f5e933 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_11326438 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_3fc427 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_4050532 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+class Enc_01d3d0 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_14461004 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
+class Enc_b0e9d8 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_13344657 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{8-8} = Ii{0-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_13114546 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{5-5} = Ii{0-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+class Enc_3694bd : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{23-22} = n1{1-0};
}
-class Enc_14530015 : OpcodeHexagon {
+class Enc_a42857 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-23} = n1{4-2};
- let Inst{13-13} = n1{1-1};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-22} = n1{3-1};
let Inst{8-8} = n1{0-0};
}
-class Enc_5967898 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
+class Enc_b7fad3 : OpcodeHexagon {
bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
+ let Inst{9-8} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_223005 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{6-3} = Ii{5-2};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_15450971 : OpcodeHexagon {
+class Enc_9e4c3f : OpcodeHexagon {
+ bits <6> II;
+ let Inst{13-8} = II{5-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-22} = n1{4-1};
- let Inst{13-13} = n1{0-0};
+ bits <4> Rd16;
+ let Inst{19-16} = Rd16{3-0};
}
-class Enc_15536400 : OpcodeHexagon {
+class Enc_8b8d61 : OpcodeHexagon {
bits <6> Ii;
- let Inst{3-0} = Ii{5-2};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rd32;
+ let Inst{12-8} = Rd32{4-0};
}
-class Enc_1291652 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{8-8} = Ii{0-0};
+class Enc_88c16c : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_5636753 : OpcodeHexagon {
+class Enc_770858 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{6-5} = Ps4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_5757366 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+class Enc_bd811a : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Cd32;
+ let Inst{4-0} = Cd32{4-0};
}
-class Enc_9752128 : OpcodeHexagon {
+class Enc_b05839 : OpcodeHexagon {
bits <7> Ii;
let Inst{8-5} = Ii{6-3};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13618890 : OpcodeHexagon {
+class Enc_bc03e5 : OpcodeHexagon {
bits <17> Ii;
let Inst{26-25} = Ii{16-15};
let Inst{20-16} = Ii{14-10};
@@ -1343,33 +1082,7 @@ class Enc_13618890 : OpcodeHexagon {
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_5890213 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_5582416 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_13536408 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{3-0} = Ii{3-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
-}
-class Enc_9773189 : OpcodeHexagon {
+class Enc_412ff0 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Ru32;
@@ -1377,420 +1090,547 @@ class Enc_9773189 : OpcodeHexagon {
bits <5> Rxx32;
let Inst{12-8} = Rxx32{4-0};
}
-class Enc_2152247 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
+class Enc_c9a18e : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_be32a5 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_e6abcf : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_12848507 : OpcodeHexagon {
+class Enc_6339d5 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
+ let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ let Inst{12-8} = Ru32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_16279406 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
+class Enc_d6990d : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_1734121 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{10-8} = Ii{3-1};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{3-0} = Rt16{3-0};
+class Enc_6c9440 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_766909 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_0d8adb : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
-}
-class Enc_4527648 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_8849208 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
+class Enc_50e578 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_1cf4ca : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_48b75f : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_9894557 : OpcodeHexagon {
+class Enc_b97f71 : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <6> II;
- let Inst{23-21} = II{5-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
+ let Inst{8-5} = Ii{5-2};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_9d1247 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-5} = Ii{6-3};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4109168 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
+class Enc_f4413a : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14560494 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_f7430e : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+}
+class Enc_e7581c : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_9773167 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_2301d6 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{8-8} = Ii{0-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_1898420 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
+class Enc_c31910 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{23-21} = Ii{7-5};
+ let Inst{13-13} = Ii{4-4};
+ let Inst{7-5} = Ii{3-1};
+ let Inst{3-3} = Ii{0-0};
+ bits <5> II;
+ let Inst{12-8} = II{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_11498120 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_2f2f04 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{5-5} = Ii{0-0};
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_15459921 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_8d8a30 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10058269 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_10197700 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_12608570 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{26-25} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-5} = Ii{9-1};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4804090 : OpcodeHexagon {
- bits <6> Ss64;
- let Inst{21-16} = Ss64{5-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14973146 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_5718302 : OpcodeHexagon {
+class Enc_2d7491 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-5} = Ii{10-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_2103742 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
+class Enc_a803e0 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
}
-class Enc_7564330 : OpcodeHexagon {
+class Enc_45364e : OpcodeHexagon {
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_2176383 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{9-4} = Ii{5-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_b909d2 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <7> n1;
+ let Inst{28-28} = n1{6-6};
+ let Inst{25-22} = n1{5-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_7736768 : OpcodeHexagon {
+class Enc_e6c957 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_fa3ba4 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{26-25} = Ii{13-12};
+ let Inst{13-5} = Ii{11-3};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_0d8870 : OpcodeHexagon {
bits <12> Ii;
let Inst{26-25} = Ii{11-10};
let Inst{13-13} = Ii{9-9};
let Inst{7-0} = Ii{8-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_13189194 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{5-5} = Ii{0-0};
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+class Enc_9fae8a : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_5154851 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_18c338 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <8> II;
+ let Inst{22-16} = II{7-1};
+ let Inst{13-13} = II{0-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_5ccba9 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_1329520 : OpcodeHexagon {
+class Enc_0ed752 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Cdd32;
let Inst{4-0} = Cdd32{4-0};
}
-class Enc_14057553 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9223889 : OpcodeHexagon {
+class Enc_143445 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
+}
+class Enc_3a3d62 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_3e3989 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-22} = n1{4-1};
+ let Inst{8-8} = n1{0-0};
+}
+class Enc_152467 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_daea09 : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{23-22} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <2> Pu4;
+ let Inst{9-8} = Pu4{1-0};
}
-class Enc_10979813 : OpcodeHexagon {
+class Enc_f37377 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+}
+class Enc_a198f6 : OpcodeHexagon {
bits <7> Ii;
- let Inst{13-13} = Ii{6-6};
- let Inst{7-3} = Ii{5-1};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{10-5} = Ii{6-1};
+ bits <2> Pt4;
+ let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_13490067 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{2-0} = Qt8{2-0};
+class Enc_3dac0b : OpcodeHexagon {
+ bits <2> Qt4;
+ let Inst{6-5} = Qt4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_10076500 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_163381 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{26-25} = Ii{13-12};
- let Inst{13-5} = Ii{11-3};
+class Enc_e38e1f : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <2> Pu4;
+ let Inst{22-21} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_10328975 : OpcodeHexagon {
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_f8ecf9 : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Vvv32;
+ let Inst{20-16} = Vvv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_14939491 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
+class Enc_7f1a05 : OpcodeHexagon {
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ry32;
+ let Inst{12-8} = Ry32{4-0};
+}
+class Enc_2df31d : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{9-4} = Ii{7-2};
bits <4> Rd16;
let Inst{3-0} = Rd16{3-0};
}
-class Enc_8891794 : OpcodeHexagon {
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_7723767 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_25bef0 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_2639299 : OpcodeHexagon {
+class Enc_f82302 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{11-8} = Rd16{3-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{23-23} = n1{0-0};
}
-class Enc_11552785 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_83ee64 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_11849200 : OpcodeHexagon {
+class Enc_adf111 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <2> Qx4;
+ let Inst{1-0} = Qx4{1-0};
+}
+class Enc_46c951 : OpcodeHexagon {
bits <6> Ii;
let Inst{12-7} = Ii{5-0};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
}
-class Enc_14868535 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{23-22} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-1} = Ii{8-2};
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
+class Enc_5d6c34 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_4df4e9 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_48594 : OpcodeHexagon {
+class Enc_91b9fe : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6608821 : OpcodeHexagon {
- bits <4> Ii;
+class Enc_a7b8e8 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
-}
-class Enc_11049656 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{13-13} = Ii{8-8};
- let Inst{7-3} = Ii{7-3};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{7-5} = Ii{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_2b3f60 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <2> Px4;
+ let Inst{6-5} = Px4{1-0};
}
-class Enc_117962 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{23-21} = Ii{7-5};
- let Inst{13-13} = Ii{4-4};
- let Inst{7-5} = Ii{3-1};
- let Inst{3-3} = Ii{0-0};
- bits <5> II;
- let Inst{12-8} = II{4-0};
+class Enc_bd1cbc : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5900401 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_a30110 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_36641 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
+class Enc_f3f408 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_9626139 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
+class Enc_690862 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+}
+class Enc_2a3787 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-5} = Ii{10-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_11971407 : OpcodeHexagon {
+class Enc_d5c73f : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_3f97c8 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{6-3} = Ii{5-2};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_d50cd3 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_729ff7 : OpcodeHexagon {
bits <3> Ii;
let Inst{7-5} = Ii{2-0};
bits <5> Rtt32;
@@ -1800,37 +1640,32 @@ class Enc_11971407 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_9852473 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-5} = Ii{10-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_217147 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+}
+class Enc_b9c5fb : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_6495334 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
+class Enc_f394d3 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_1186018 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{26-25} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-0} = Ii{8-1};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_0cb018 : OpcodeHexagon {
+ bits <5> Cs32;
+ let Inst{20-16} = Cs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15999208 : OpcodeHexagon {
+class Enc_541f26 : OpcodeHexagon {
bits <18> Ii;
let Inst{26-25} = Ii{17-16};
let Inst{20-16} = Ii{15-11};
@@ -1839,446 +1674,302 @@ class Enc_15999208 : OpcodeHexagon {
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_11477246 : OpcodeHexagon {
+class Enc_724154 : OpcodeHexagon {
bits <6> II;
let Inst{5-0} = II{5-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Re32;
let Inst{20-16} = Re32{4-0};
}
-class Enc_7971062 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{23-22} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4327792 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+class Enc_179b35 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_10326434 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_585242 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-3} = Ii{4-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_1572239 : OpcodeHexagon {
- bits <2> Qt4;
- let Inst{6-5} = Qt4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
}
-class Enc_6372758 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+class Enc_cf1927 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_15793331 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
+class Enc_b84c4c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <6> II;
+ let Inst{23-21} = II{5-3};
+ let Inst{7-5} = II{2-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11424254 : OpcodeHexagon {
- bits <2> Qt4;
- let Inst{6-5} = Qt4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+class Enc_9ac432 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Pu4;
+ let Inst{7-6} = Pu4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_4983213 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{10-0} = Ii{13-3};
+class Enc_8203bb : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
}
-class Enc_16035138 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
-}
-class Enc_8225953 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{13-13} = Ii{7-7};
- let Inst{7-3} = Ii{6-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+class Enc_e66a97 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_4397470 : OpcodeHexagon {
- bits <5> II;
- let Inst{12-8} = II{4-0};
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
}
-class Enc_1004392 : OpcodeHexagon {
+class Enc_8c2412 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{6-5} = Ps4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
-}
-class Enc_16319737 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{26-25} = Ii{13-12};
- let Inst{13-13} = Ii{11-11};
- let Inst{7-0} = Ii{10-3};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
-}
-class Enc_2296022 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9664427 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <3> Qss8;
- let Inst{2-0} = Qss8{2-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_877823 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
+class Enc_284ebb : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_1589406 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+class Enc_733b27 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6900405 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+class Enc_22c845 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{10-0} = Ii{13-3};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14150875 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-22} = n1{3-0};
-}
-class Enc_15707793 : OpcodeHexagon {
+class Enc_9b0bc1 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Gd32;
- let Inst{4-0} = Gd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_14689096 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
+class Enc_ea4c54 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_9915754 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_b72622 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{5-5} = Ii{0-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_7470998 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <2> Qx4;
- let Inst{1-0} = Qx4{1-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_11471622 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_569cfe : OpcodeHexagon {
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_14363183 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_15816255 : OpcodeHexagon {
+class Enc_96ce4f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5321335 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <4> Vdd16;
- let Inst{7-4} = Vdd16{3-0};
-}
-class Enc_12702821 : OpcodeHexagon {
+class Enc_143a3c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <6> II;
+ let Inst{23-21} = II{5-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
bits <5> Rxx32;
let Inst{4-0} = Rxx32{4-0};
}
-class Enc_449439 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-5} = Ii{8-0};
+class Enc_57a33e : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-3} = Ii{7-3};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_2054304 : OpcodeHexagon {
+class Enc_311abd : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <6> Sd64;
- let Inst{5-0} = Sd64{5-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_236434 : OpcodeHexagon {
+class Enc_a1640c : OpcodeHexagon {
bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
-}
-class Enc_5598813 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_8409782 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
+class Enc_de0214 : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-5} = Ii{9-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15182416 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{8-8} = Ii{0-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_a90628 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_4501395 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_fda92c : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{26-25} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-0} = Ii{8-1};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_831a7d : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6039436 : OpcodeHexagon {
- bits <3> Qtt8;
- let Inst{2-0} = Qtt8{2-0};
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_476163 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_11a146 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_11281763 : OpcodeHexagon {
+class Enc_b15941 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9929262 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
-}
-class Enc_13174858 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8437395 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_16578332 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-8} = Ii{8-6};
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_12829314 : OpcodeHexagon {
+class Enc_b78edd : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-23} = n1{2-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_9744403 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{13-9} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{8-4} = Vv32{4-0};
- bits <4> Vdd16;
- let Inst{3-0} = Vdd16{3-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10968391 : OpcodeHexagon {
+class Enc_a27588 : OpcodeHexagon {
bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <7> n1;
- let Inst{28-28} = n1{6-6};
- let Inst{25-22} = n1{5-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_64199 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-4} = Ii{6-2};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_2a7b91 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{8-8} = Ii{0-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11039423 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
+class Enc_b43b67 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <2> Qx4;
+ let Inst{6-5} = Qx4{1-0};
}
-class Enc_6730375 : OpcodeHexagon {
+class Enc_4aca3a : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
+ bits <3> n1;
+ let Inst{29-29} = n1{2-2};
+ let Inst{26-25} = n1{1-0};
}
-class Enc_16213761 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_13204995 : OpcodeHexagon {
+class Enc_b38ffc : OpcodeHexagon {
bits <4> Ii;
let Inst{11-8} = Ii{3-0};
bits <4> Rs16;
@@ -2286,79 +1977,26 @@ class Enc_13204995 : OpcodeHexagon {
bits <4> Rt16;
let Inst{3-0} = Rt16{3-0};
}
-class Enc_13338314 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_9920336 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <5> Rtt32;
- let Inst{4-0} = Rtt32{4-0};
-}
-class Enc_15380240 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_cda00a : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{19-16} = Ii{11-8};
+ let Inst{12-5} = Ii{7-0};
+ bits <2> Pu4;
+ let Inst{22-21} = Pu4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3296020 : OpcodeHexagon {
+class Enc_2fbf3c : OpcodeHexagon {
bits <3> Ii;
let Inst{10-8} = Ii{2-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_2428539 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{24-23} = n1{2-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_10039393 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9372046 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2901241 : OpcodeHexagon {
+class Enc_70b24b : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
@@ -2366,424 +2004,294 @@ class Enc_2901241 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_16145290 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{6-5} = Ps4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_13783220 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_12261611 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6135183 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rx16;
- let Inst{3-0} = Rx16{3-0};
-}
-class Enc_5523416 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_2ae154 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_13472494 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_50b5ac : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_16303398 : OpcodeHexagon {
+class Enc_2ea740 : OpcodeHexagon {
bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_3494181 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+}
+class Enc_08d755 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_13983714 : OpcodeHexagon {
+class Enc_1178da : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_931653 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-5} = Ii{6-3};
+class Enc_8dbe85 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7622936 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_5a18b3 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{22-22} = n1{1-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_8773155 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
+class Enc_14d27a : OpcodeHexagon {
bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
-}
-class Enc_5401217 : OpcodeHexagon {
+ let Inst{12-8} = II{4-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <3> n1;
- let Inst{28-28} = n1{2-2};
- let Inst{24-23} = n1{1-0};
}
-class Enc_6736678 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
+class Enc_a05677 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3457570 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
+class Enc_f0cca7 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <6> II;
+ let Inst{20-16} = II{5-1};
+ let Inst{13-13} = II{0-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_500cb0 : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
bits <5> Vxx32;
let Inst{4-0} = Vxx32{4-0};
}
-class Enc_3813442 : OpcodeHexagon {
+class Enc_7e5a82 : OpcodeHexagon {
bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_3135259 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5486172 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{2-0} = Nt8{2-0};
+class Enc_12b6e9 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11081334 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
+class Enc_6f70ca : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{8-4} = Ii{7-3};
}
-class Enc_9470751 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_7222b7 : OpcodeHexagon {
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_2683366 : OpcodeHexagon {
- bits <3> Quu8;
- let Inst{10-8} = Quu8{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Qdd8;
- let Inst{5-3} = Qdd8{2-0};
+class Enc_e3b0c4 : OpcodeHexagon {
}
-class Enc_15830826 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{10-0} = Ii{13-3};
+class Enc_a255dc : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4967902 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
+class Enc_cb4b4e : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
-}
-class Enc_14287645 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_8324216 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_913538 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
-}
-class Enc_16311032 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_9864697 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <6> II;
- let Inst{20-16} = II{5-1};
- let Inst{13-13} = II{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11205051 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{11-8} = Ii{5-2};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{3-0} = Rt16{3-0};
-}
-class Enc_5611087 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-5} = Ii{6-3};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
+class Enc_9cdba7 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_10915758 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8943121 : OpcodeHexagon {
+class Enc_5cd7e9 : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-5} = Ii{9-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_1539665 : OpcodeHexagon {
- bits <5> Cs32;
- let Inst{20-16} = Cs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_454a26 : OpcodeHexagon {
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8479583 : OpcodeHexagon {
+class Enc_a6853f : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{23-23} = n1{1-1};
+ bits <6> n1;
+ let Inst{29-29} = n1{5-5};
+ let Inst{26-25} = n1{4-3};
+ let Inst{23-22} = n1{2-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_313333 : OpcodeHexagon {
+class Enc_c175d0 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+}
+class Enc_895bd9 : OpcodeHexagon {
+ bits <2> Qu4;
+ let Inst{9-8} = Qu4{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
bits <5> Vx32;
let Inst{4-0} = Vx32{4-0};
}
-class Enc_11544269 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{13-13} = n1{0-0};
+class Enc_ea23e4 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_9018141 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Cd32;
- let Inst{4-0} = Cd32{4-0};
+class Enc_4dc228 : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{12-8} = Ii{8-4};
+ let Inst{4-3} = Ii{3-2};
+ bits <10> II;
+ let Inst{20-16} = II{9-5};
+ let Inst{7-5} = II{4-2};
+ let Inst{1-0} = II{1-0};
+}
+class Enc_10bc21 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_1aaec1 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_6152036 : OpcodeHexagon {
+class Enc_329361 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Gdd32;
- let Inst{4-0} = Gdd32{4-0};
-}
-class Enc_1954437 : OpcodeHexagon {
- bits <6> Sss64;
- let Inst{21-16} = Sss64{5-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_3742184 : OpcodeHexagon {
+class Enc_d2c7f1 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_1835415 : OpcodeHexagon {
+class Enc_3680c2 : OpcodeHexagon {
bits <7> Ii;
- let Inst{10-5} = Ii{6-1};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{11-5} = Ii{6-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_1085466 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_1ef990 : OpcodeHexagon {
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13150110 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
+class Enc_e957fb : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-0} = Ii{8-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_6772177 : OpcodeHexagon {
- bits <5> Zu8;
- let Inst{12-8} = Zu8{4-0};
- bits <5> Zd8;
- let Inst{4-0} = Zd8{4-0};
-}
-class Enc_6616512 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
+class Enc_c9e3bc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_1886960 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_2835415 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{10-5} = Ii{7-2};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14024197 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_12297800 : OpcodeHexagon {
- bits <18> Ii;
- let Inst{26-25} = Ii{17-16};
- let Inst{20-16} = Ii{15-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
}
-class Enc_7254313 : OpcodeHexagon {
+class Enc_2e1979 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -2793,20 +2301,12 @@ class Enc_7254313 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_677558 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-5} = Ii{8-3};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_6223403 : OpcodeHexagon {
+class Enc_0b2e5b : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
@@ -2814,220 +2314,178 @@ class Enc_6223403 : OpcodeHexagon {
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_674613 : OpcodeHexagon {
+class Enc_d483b9 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{5-5} = Ii{0-0};
bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_16479122 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{7-3} = Ii{7-3};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
-}
-class Enc_11704059 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_9165078 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{8-3} = Ii{8-3};
- bits <3> Rtt8;
- let Inst{2-0} = Rtt8{2-0};
+class Enc_51635c : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-4} = Ii{6-2};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_15376009 : OpcodeHexagon {
+class Enc_e26546 : OpcodeHexagon {
bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{6-3} = Ii{4-1};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_8838398 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{21-21} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <6> II;
- let Inst{13-8} = II{5-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_2328527 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_1451363 : OpcodeHexagon {
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_4030179 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_70fb07 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_13770697 : OpcodeHexagon {
+class Enc_277737 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{22-21} = Ii{7-6};
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-5} = Ii{4-2};
bits <5> Ru32;
let Inst{4-0} = Ru32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ry32;
- let Inst{12-8} = Ry32{4-0};
-}
-class Enc_12212978 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ let Inst{12-8} = Rd32{4-0};
+}
+class Enc_5c124a : OpcodeHexagon {
+ bits <19> Ii;
+ let Inst{26-25} = Ii{18-17};
+ let Inst{20-16} = Ii{16-12};
+ let Inst{13-13} = Ii{11-11};
+ let Inst{7-0} = Ii{10-3};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_12665927 : OpcodeHexagon {
+class Enc_928ca1 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_2082956 : OpcodeHexagon {
- bits <32> Ii;
- let Inst{27-16} = Ii{31-20};
- let Inst{13-0} = Ii{19-6};
+class Enc_da664b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_7b7ba8 : OpcodeHexagon {
+ bits <2> Qu4;
+ let Inst{9-8} = Qu4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+}
+class Enc_47ee5e : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ru32;
+ let Inst{12-8} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{2-0} = Nt8{2-0};
}
-class Enc_220949 : OpcodeHexagon {
+class Enc_8bcba4 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
+}
+class Enc_3a2484 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-23} = n1{3-1};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-23} = n1{2-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_9939385 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{12-8} = Ii{8-4};
- let Inst{4-3} = Ii{3-2};
- bits <10> II;
- let Inst{20-16} = II{9-5};
- let Inst{7-5} = II{4-2};
- let Inst{1-0} = II{1-0};
-}
-class Enc_2117024 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-8} = Ii{7-3};
- let Inst{4-2} = Ii{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8390029 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
+class Enc_a5ed8a : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_10989558 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_cb9321 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{27-21} = Ii{15-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_668704 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{25-22} = n1{3-0};
}
-class Enc_5972412 : OpcodeHexagon {
+class Enc_a7341a : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_12851489 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_9554661 : OpcodeHexagon {
+class Enc_5eac98 : OpcodeHexagon {
bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4202401 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6091631 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{9-8} = Qs4{1-0};
- bits <2> Qt4;
- let Inst{23-22} = Qt4{1-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_10157519 : OpcodeHexagon {
+class Enc_02553a : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{11-5} = Ii{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_4835423 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{10-5} = Ii{5-0};
+class Enc_acd6ed : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{10-5} = Ii{8-3};
bits <2> Pt4;
let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14046916 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
-}
-class Enc_2921694 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_8732960 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-8} = Ii{7-3};
- let Inst{4-2} = Ii{2-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5338033 : OpcodeHexagon {
+class Enc_8e583a : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
@@ -3035,355 +2493,212 @@ class Enc_5338033 : OpcodeHexagon {
let Inst{19-16} = Rs16{3-0};
bits <5> n1;
let Inst{28-28} = n1{4-4};
- let Inst{24-22} = n1{3-1};
+ let Inst{25-23} = n1{3-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_6956613 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_b886fd : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_2153798 : OpcodeHexagon {
+class Enc_24a7dc : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_16210172 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{10-8} = Qt8{2-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
-}
-class Enc_5023792 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_1244745 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_10002182 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
+class Enc_2d829e : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{10-0} = Ii{13-3};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
}
-class Enc_12492533 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_4f4ed7 : OpcodeHexagon {
+ bits <18> Ii;
+ let Inst{26-25} = Ii{17-16};
+ let Inst{20-16} = Ii{15-11};
+ let Inst{13-5} = Ii{10-2};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_1774350 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
+class Enc_84b2cd : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+}
+class Enc_8dbdfe : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{13-13} = Ii{7-7};
+ let Inst{7-3} = Ii{6-2};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_2703240 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+class Enc_90cd8b : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_6975103 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_bd0b33 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_9789480 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_12244921 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+class Enc_c7cd90 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_8674673 : OpcodeHexagon {
+class Enc_405228 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{23-22} = n1{1-0};
-}
-class Enc_8514936 : OpcodeHexagon {
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_13455308 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <3> n1;
+ let Inst{28-28} = n1{2-2};
+ let Inst{24-23} = n1{1-0};
}
-class Enc_10188026 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_81ac1d : OpcodeHexagon {
+ bits <24> Ii;
+ let Inst{24-16} = Ii{23-15};
+ let Inst{13-1} = Ii{14-2};
}
-class Enc_3158657 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_395cc4 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10597934 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
- bits <2> n1;
- let Inst{9-8} = n1{1-0};
-}
-class Enc_10612292 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <2> Qx4;
- let Inst{1-0} = Qx4{1-0};
-}
-class Enc_5178985 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_3967902 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2462143 : OpcodeHexagon {
+class Enc_a51a9a : OpcodeHexagon {
bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{12-8} = Ii{7-3};
+ let Inst{4-2} = Ii{2-0};
}
-class Enc_9849208 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
+class Enc_d44e31 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{4-0} = Rt32{4-0};
}
-class Enc_12618352 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_7303598 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
-}
-class Enc_13823098 : OpcodeHexagon {
- bits <5> Gss32;
- let Inst{20-16} = Gss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_16388420 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
+class Enc_f77fbc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_8328140 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
}
-class Enc_1793896 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_d2216a : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_4944558 : OpcodeHexagon {
- bits <2> Qu4;
- let Inst{9-8} = Qu4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
+class Enc_85bf58 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13211717 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{20-16} = Vvv32{4-0};
+class Enc_71bb9b : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_8170340 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <3> Qdd8;
- let Inst{2-0} = Qdd8{2-0};
+class Enc_52a5dd : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14071773 : OpcodeHexagon {
+class Enc_5e2823 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_8605375 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_28a2dc : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12711252 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{9-8} = Pv4{1-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_8202458 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_5138b3 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_8577055 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
+class Enc_84d359 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{3-0} = Ii{3-0};
bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-23} = n1{3-1};
- let Inst{8-8} = n1{0-0};
+ let Inst{7-4} = Rs16{3-0};
}
-class Enc_1409050 : OpcodeHexagon {
+class Enc_e07374 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
-}
-class Enc_7466005 : OpcodeHexagon {
- bits <5> Gs32;
- let Inst{20-16} = Gs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_2380082 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
+class Enc_323f2d : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_10067774 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_11000933 : OpcodeHexagon {
+class Enc_1a9974 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -3393,55 +2708,66 @@ class Enc_11000933 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
let Inst{12-8} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{2-0} = Nt8{2-0};
+ bits <5> Rtt32;
+ let Inst{4-0} = Rtt32{4-0};
}
-class Enc_13201267 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_1de724 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-22} = n1{2-0};
}
-class Enc_1989309 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{4-0} = Vvv32{4-0};
+class Enc_dd766a : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
+}
+class Enc_0b51ce : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_9082775 : OpcodeHexagon {
+class Enc_b4e6cf : OpcodeHexagon {
bits <10> Ii;
let Inst{21-21} = Ii{9-9};
let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_8065534 : OpcodeHexagon {
- bits <4> Ii;
+class Enc_44215c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
let Inst{6-3} = Ii{3-0};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_4631106 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_a21d47 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{10-5} = Ii{5-0};
bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Pu4;
- let Inst{7-6} = Pu4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ let Inst{12-11} = Pt4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_11065510 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
+class Enc_cc449f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
bits <5> Rt32;
@@ -3449,70 +2775,7 @@ class Enc_11065510 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6673186 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_8498433 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_4395009 : OpcodeHexagon {
- bits <7> Ii;
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10926598 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
-}
-class Enc_7606379 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_8131399 : OpcodeHexagon {
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_11522288 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_114098 : OpcodeHexagon {
+class Enc_645d54 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{5-5} = Ii{0-0};
@@ -3523,47 +2786,29 @@ class Enc_114098 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5654851 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_667b39 : OpcodeHexagon {
+ bits <5> Css32;
+ let Inst{20-16} = Css32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_12023037 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{6-5} = Ps4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_176263 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{9-4} = Ii{7-2};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_6130414 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{23-22} = Ii{15-14};
- let Inst{13-0} = Ii{13-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_631197 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <6> II;
- let Inst{23-21} = II{5-3};
- let Inst{7-5} = II{2-0};
+class Enc_927852 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_163a3c : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_16214129 : OpcodeHexagon {
+class Enc_b087ac : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
@@ -3571,507 +2816,412 @@ class Enc_16214129 : OpcodeHexagon {
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_8333157 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_4834775 : OpcodeHexagon {
- bits <6> II;
- let Inst{13-8} = II{5-0};
+class Enc_b1e1fb : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rd16;
- let Inst{19-16} = Rd16{3-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{25-23} = n1{3-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_16601956 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_1f19b5 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{9-5} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_b8c967 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_15946706 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{6-5} = Ii{1-0};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
-}
-class Enc_6923828 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
-}
-class Enc_1332717 : OpcodeHexagon {
+class Enc_fb6577 : OpcodeHexagon {
bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ let Inst{9-8} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_1786883 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <6> Sdd64;
- let Inst{5-0} = Sdd64{5-0};
-}
-class Enc_14303394 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9282127 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
-}
-class Enc_2813446 : OpcodeHexagon {
+class Enc_2bae10 : OpcodeHexagon {
bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_364753 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{23-23} = n1{0-0};
+ let Inst{10-8} = Ii{3-1};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_12477789 : OpcodeHexagon {
- bits <15> Ii;
- let Inst{21-21} = Ii{14-14};
- let Inst{13-13} = Ii{13-13};
- let Inst{11-1} = Ii{12-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_c4dc92 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_44555 : OpcodeHexagon {
+class Enc_03833b : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8497723 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_dbd70c : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_4359901 : OpcodeHexagon {
+class Enc_f6fe0b : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{22-22} = n1{0-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{24-22} = n1{4-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_11271630 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_9e2e1c : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_10501894 : OpcodeHexagon {
+class Enc_8df4be : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{26-25} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-5} = Ii{9-1};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_66bce1 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{11-8} = Rd16{3-0};
+}
+class Enc_b8309d : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{8-3} = Ii{8-3};
+ bits <3> Rtt8;
+ let Inst{2-0} = Rtt8{2-0};
}
-class Enc_9768377 : OpcodeHexagon {
+class Enc_5e8512 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_16268019 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_8814718 : OpcodeHexagon {
- bits <18> Ii;
- let Inst{26-25} = Ii{17-16};
- let Inst{20-16} = Ii{15-11};
- let Inst{13-5} = Ii{10-2};
+class Enc_4f677b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_6212930 : OpcodeHexagon {
+class Enc_3d920a : OpcodeHexagon {
bits <6> Ii;
let Inst{8-5} = Ii{5-2};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5462762 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+class Enc_e83554 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_6154421 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{13-13} = Ii{6-6};
- let Inst{7-3} = Ii{5-1};
+class Enc_ed48be : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{6-5} = Ii{1-0};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
+}
+class Enc_f8c1c4 : OpcodeHexagon {
bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ let Inst{12-11} = Pv4{1-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_8940892 : OpcodeHexagon {
+class Enc_1aa186 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_3531000 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{11-5} = Ii{6-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+class Enc_134437 : OpcodeHexagon {
+ bits <2> Qs4;
+ let Inst{9-8} = Qs4{1-0};
+ bits <2> Qt4;
+ let Inst{23-22} = Qt4{1-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_14311138 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_97d666 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2216485 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
+class Enc_f82eaf : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{10-5} = Ii{7-2};
+ bits <2> Pt4;
+ let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_12395768 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_11047413 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_1256611 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_7884306 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{8-4} = Ii{7-3};
-}
-class Enc_11244923 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8612939 : OpcodeHexagon {
+class Enc_69d63b : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{22-22} = n1{1-1};
- let Inst{13-13} = n1{0-0};
}
-class Enc_16355964 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
+class Enc_f79415 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+}
+class Enc_ce6828 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{26-25} = Ii{13-12};
+ let Inst{13-13} = Ii{11-11};
+ let Inst{7-0} = Ii{10-3};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12616482 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_5915771 : OpcodeHexagon {
+class Enc_800e04 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{24-22} = n1{3-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_14459927 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-22} = n1{4-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_7504828 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_ad1831 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_14209223 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_0fa531 : OpcodeHexagon {
+ bits <15> Ii;
+ let Inst{21-21} = Ii{14-14};
+ let Inst{13-13} = Ii{13-13};
+ let Inst{11-1} = Ii{12-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_3931661 : OpcodeHexagon {
+class Enc_7eaeb6 : OpcodeHexagon {
bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{6-3} = Ii{5-2};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13606251 : OpcodeHexagon {
+class Enc_f55a0c : OpcodeHexagon {
bits <6> Ii;
let Inst{11-8} = Ii{5-2};
bits <4> Rs16;
let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_11475992 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_13133231 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <4> Rt16;
+ let Inst{3-0} = Rt16{3-0};
}
-class Enc_9959498 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{22-21} = Ii{7-6};
- let Inst{13-13} = Ii{5-5};
- let Inst{7-5} = Ii{4-2};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
+class Enc_f20719 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
}
-class Enc_8919369 : OpcodeHexagon {
+class Enc_eafd18 : OpcodeHexagon {
+ bits <5> II;
+ let Inst{12-8} = II{4-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{24-23} = n1{3-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_2968094 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{11-5} = Ii{6-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
}
-class Enc_4813442 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_7b523d : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_4684887 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{25-23} = n1{2-0};
+class Enc_47ef61 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15606259 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_cc857d : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_2268028 : OpcodeHexagon {
- bits <3> Qtt8;
- let Inst{10-8} = Qtt8{2-0};
- bits <3> Qdd8;
- let Inst{5-3} = Qdd8{2-0};
+class Enc_7fa7f6 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_13430430 : OpcodeHexagon {
+class Enc_0f8bab : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <3> Qxx8;
- let Inst{2-0} = Qxx8{2-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_13336212 : OpcodeHexagon {
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
- bits <1> n1;
- let Inst{9-9} = n1{0-0};
+class Enc_7eb485 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_15008287 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_864a5a : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{12-8} = Ii{8-4};
+ let Inst{4-3} = Ii{3-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_4897205 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{9-8} = Qs4{1-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+class Enc_c2b48e : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8038806 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
+class Enc_8c6530 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12669374 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_971347 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_448f7f : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_1997594 : OpcodeHexagon {
+class Enc_da8d43 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-3} = Ii{4-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11940513 : OpcodeHexagon {
+class Enc_a6ce9c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{3-0} = Ii{5-2};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+}
+class Enc_eca7c8 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
@@ -4079,104 +3229,13 @@ class Enc_11940513 : OpcodeHexagon {
bits <5> Rt32;
let Inst{4-0} = Rt32{4-0};
}
-class Enc_2735552 : OpcodeHexagon {
+class Enc_4b39e4 : OpcodeHexagon {
bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_16410950 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6226085 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14193700 : OpcodeHexagon {
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_15763937 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <6> n1;
- let Inst{29-29} = n1{5-5};
- let Inst{26-25} = n1{4-3};
- let Inst{23-22} = n1{2-1};
- let Inst{13-13} = n1{0-0};
-}
-class Enc_2492727 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_13425035 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_4135257 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{10-8} = Ii{3-1};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_14631806 : OpcodeHexagon {
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_12397062 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_11959851 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
diff --git a/lib/Target/Hexagon/HexagonDepInstrInfo.td b/lib/Target/Hexagon/HexagonDepInstrInfo.td
index d910d4af2191..2dc74632e9be 100644
--- a/lib/Target/Hexagon/HexagonDepInstrInfo.td
+++ b/lib/Target/Hexagon/HexagonDepInstrInfo.td
@@ -11,36 +11,39 @@ def A2_abs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = abs($Rs32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_absp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = abs($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000100;
+let prefersSlot3 = 1;
}
def A2_abssat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = abs($Rs32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_add : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011000;
@@ -56,145 +59,157 @@ def A2_addh_h16_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_sat_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_l16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_l16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_l16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_l16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = add($Rs32,#$Ii)",
-ALU32_ADDI_tc_1_SLOT0123, TypeALU32_ADDI>, Enc_11542684, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_ADDI>, Enc_cb9321, PredNewRel, ImmRegRel {
let Inst{31-28} = 0b1011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -213,7 +228,7 @@ def A2_addp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -224,10 +239,11 @@ def A2_addpsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let isCommutable = 1;
}
@@ -235,12 +251,13 @@ def A2_addsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -249,32 +266,34 @@ def A2_addsp : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rs32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64> {
+tc_bd16579e, TypeALU64> {
let isPseudo = 1;
}
def A2_addsph : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):raw:hi",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_bd16579e, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_addspl : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):raw:lo",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_bd16579e, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_and : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001000;
@@ -290,7 +309,7 @@ def A2_andir : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = and($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, ImmRegRel {
let Inst{31-22} = 0b0111011000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -306,7 +325,7 @@ def A2_andp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = and($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -316,7 +335,7 @@ def A2_aslh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000000;
let hasNewValue = 1;
@@ -328,7 +347,7 @@ def A2_asrh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000001;
let hasNewValue = 1;
@@ -340,7 +359,7 @@ def A2_combine_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.h,$Rs32.h)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011100;
@@ -352,7 +371,7 @@ def A2_combine_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.h,$Rs32.l)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011101;
@@ -364,7 +383,7 @@ def A2_combine_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.l,$Rs32.h)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011110;
@@ -376,7 +395,7 @@ def A2_combine_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.l,$Rs32.l)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011111;
@@ -388,7 +407,7 @@ def A2_combineii : HInst<
(outs DoubleRegs:$Rdd32),
(ins s32_0Imm:$Ii, s8_0Imm:$II),
"$Rdd32 = combine(#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_14007201 {
+tc_548f402d, TypeALU32_2op>, Enc_18c338 {
let Inst{31-23} = 0b011111000;
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
@@ -403,7 +422,7 @@ def A2_combinew : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1997594, PredNewRel {
+tc_548f402d, TypeALU32_3op>, Enc_be32a5, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110101000;
@@ -415,87 +434,95 @@ def A2_max : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = max($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_maxp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = max($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_maxu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = maxu($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_maxup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = maxu($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_min : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = min($Rt32,$Rs32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_minp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = min($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_minu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = minu($Rt32,$Rs32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_minup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = minu($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_neg : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = neg($Rs32)",
-PSEUDO, TypeALU32_2op> {
+tc_f16d5b17, TypeALU32_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -505,7 +532,7 @@ def A2_negp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = neg($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000100;
}
@@ -513,18 +540,19 @@ def A2_negsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = neg($Rs32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_nop : HInst<
(outs),
(ins),
"nop",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_0 {
+tc_e2c31426, TypeALU32_2op>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b0111111100000000;
}
@@ -532,7 +560,7 @@ def A2_not : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = not($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_f16d5b17, TypeALU32_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -542,7 +570,7 @@ def A2_notp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = not($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000100;
}
@@ -550,7 +578,7 @@ def A2_or : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001001;
@@ -566,7 +594,7 @@ def A2_orir : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = or($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, ImmRegRel {
let Inst{31-22} = 0b0111011010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -582,7 +610,7 @@ def A2_orp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = or($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -592,7 +620,7 @@ def A2_paddf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011000;
@@ -608,7 +636,7 @@ def A2_paddfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011000;
@@ -625,7 +653,7 @@ def A2_paddif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if (!$Pu4) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011101001;
let isPredicated = 1;
@@ -645,7 +673,7 @@ def A2_paddifnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if (!$Pu4.new) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-23} = 0b011101001;
let isPredicated = 1;
@@ -666,7 +694,7 @@ def A2_paddit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if ($Pu4) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011101000;
let isPredicated = 1;
@@ -685,7 +713,7 @@ def A2_padditnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if ($Pu4.new) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-23} = 0b011101000;
let isPredicated = 1;
@@ -705,7 +733,7 @@ def A2_paddt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011000;
@@ -720,7 +748,7 @@ def A2_paddtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011000;
@@ -736,7 +764,7 @@ def A2_pandf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001000;
@@ -750,7 +778,7 @@ def A2_pandfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001000;
@@ -765,7 +793,7 @@ def A2_pandt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001000;
@@ -778,7 +806,7 @@ def A2_pandtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001000;
@@ -792,7 +820,7 @@ def A2_porf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001001;
@@ -806,7 +834,7 @@ def A2_porfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001001;
@@ -821,7 +849,7 @@ def A2_port : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001001;
@@ -834,7 +862,7 @@ def A2_portnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001001;
@@ -848,7 +876,7 @@ def A2_psubf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011001;
@@ -862,7 +890,7 @@ def A2_psubfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011001;
@@ -877,7 +905,7 @@ def A2_psubt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011001;
@@ -890,7 +918,7 @@ def A2_psubtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011001;
@@ -904,7 +932,7 @@ def A2_pxorf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001011;
@@ -918,7 +946,7 @@ def A2_pxorfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001011;
@@ -933,7 +961,7 @@ def A2_pxort : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001011;
@@ -946,7 +974,7 @@ def A2_pxortnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001011;
@@ -960,18 +988,19 @@ def A2_roundsat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = round($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_94e6ffd9, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = sat($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
@@ -982,7 +1011,7 @@ def A2_satb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -993,7 +1022,7 @@ def A2_sath : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sath($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1004,7 +1033,7 @@ def A2_satub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satub($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1015,7 +1044,7 @@ def A2_satuh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satuh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1026,7 +1055,7 @@ def A2_sub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011001;
@@ -1041,145 +1070,157 @@ def A2_subh_h16_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_sat_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_l16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_l16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_l16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_l16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = sub($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -1188,7 +1229,7 @@ def A2_subri : HInst<
(outs IntRegs:$Rd32),
(ins s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = sub(#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, PredNewRel, ImmRegRel {
let Inst{31-22} = 0b0111011001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1204,12 +1245,13 @@ def A2_subsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1217,7 +1259,7 @@ def A2_svaddh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vaddh($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110000;
@@ -1230,12 +1272,13 @@ def A2_svaddhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vaddh($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -1244,12 +1287,13 @@ def A2_svadduhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vadduh($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -1258,12 +1302,13 @@ def A2_svavgh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vavgh($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_511f28f6, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
let isCommutable = 1;
}
@@ -1271,12 +1316,13 @@ def A2_svavghs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vavgh($Rs32,$Rt32):rnd",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_76c4c5ef, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
let isCommutable = 1;
}
@@ -1284,19 +1330,20 @@ def A2_svnavgh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vnavgh($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_511f28f6, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
}
def A2_svsubh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubh($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110100;
@@ -1308,12 +1355,13 @@ def A2_svsubhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubh($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1321,12 +1369,13 @@ def A2_svsubuhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubuh($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1334,7 +1383,7 @@ def A2_swiz : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = swiz($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -1344,7 +1393,7 @@ def A2_sxtb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000101;
let hasNewValue = 1;
@@ -1356,7 +1405,7 @@ def A2_sxth : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000111;
let hasNewValue = 1;
@@ -1368,7 +1417,7 @@ def A2_sxtw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = sxtw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100010;
}
@@ -1376,7 +1425,7 @@ def A2_tfr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000011;
let hasNewValue = 1;
@@ -1389,7 +1438,7 @@ def A2_tfrcrr : HInst<
(outs IntRegs:$Rd32),
(ins CtrRegs:$Cs32),
"$Rd32 = $Cs32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_1539665 {
+tc_3b4892c6, TypeCR>, Enc_0cb018 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01101010000;
let hasNewValue = 1;
@@ -1399,7 +1448,7 @@ def A2_tfrf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let hasNewValue = 1;
@@ -1414,7 +1463,7 @@ def A2_tfrfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let hasNewValue = 1;
@@ -1430,7 +1479,7 @@ def A2_tfrih : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, u16_0Imm:$Ii),
"$Rx32.h = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_6130414 {
+tc_548f402d, TypeALU32_2op>, Enc_51436c {
let Inst{21-21} = 0b1;
let Inst{31-24} = 0b01110010;
let hasNewValue = 1;
@@ -1441,7 +1490,7 @@ def A2_tfril : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, u16_0Imm:$Ii),
"$Rx32.l = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_6130414 {
+tc_548f402d, TypeALU32_2op>, Enc_51436c {
let Inst{21-21} = 0b1;
let Inst{31-24} = 0b01110001;
let hasNewValue = 1;
@@ -1452,7 +1501,7 @@ def A2_tfrp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let BaseOpcode = "A2_tfrp";
let isPredicable = 1;
let isPseudo = 1;
@@ -1461,7 +1510,7 @@ def A2_tfrpf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if (!$Pu4) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let BaseOpcode = "A2_tfrp";
@@ -1471,7 +1520,7 @@ def A2_tfrpfnew : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if (!$Pu4.new) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let isPredicatedNew = 1;
@@ -1482,7 +1531,7 @@ def A2_tfrpi : HInst<
(outs DoubleRegs:$Rdd32),
(ins s8_0Imm:$Ii),
"$Rdd32 = #$Ii",
-ALU64_tc_1_SLOT23, TypeALU64> {
+tc_548f402d, TypeALU64> {
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
let isMoveImm = 1;
@@ -1492,7 +1541,7 @@ def A2_tfrpt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if ($Pu4) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let BaseOpcode = "A2_tfrp";
let isPseudo = 1;
@@ -1501,7 +1550,7 @@ def A2_tfrptnew : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if ($Pu4.new) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedNew = 1;
let BaseOpcode = "A2_tfrp";
@@ -1511,7 +1560,7 @@ def A2_tfrrcr : HInst<
(outs CtrRegs:$Cd32),
(ins IntRegs:$Rs32),
"$Cd32 = $Rs32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9018141 {
+tc_82f0f122, TypeCR>, Enc_bd811a {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01100010001;
let hasNewValue = 1;
@@ -1521,7 +1570,7 @@ def A2_tfrsi : HInst<
(outs IntRegs:$Rd32),
(ins s32_0Imm:$Ii),
"$Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_7971062, PredNewRel, ImmRegRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e87ce, PredNewRel, ImmRegRel {
let Inst{21-21} = 0b0;
let Inst{31-24} = 0b01111000;
let hasNewValue = 1;
@@ -1543,7 +1592,7 @@ def A2_tfrt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1557,7 +1606,7 @@ def A2_tfrtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1572,41 +1621,45 @@ def A2_vabsh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
}
def A2_vabshsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsh($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vabsw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsw($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
}
def A2_vabswsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsw($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddb_map : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddb($Rss32,$Rtt32)",
-PSEUDO, TypeMAPPING> {
+tc_9c18c9a5, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -1614,7 +1667,7 @@ def A2_vaddh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1623,17 +1676,18 @@ def A2_vaddhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddh($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddub($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1642,27 +1696,29 @@ def A2_vaddubs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddub($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vadduhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vadduh($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1671,26 +1727,28 @@ def A2_vaddws : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddw($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vavgh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavghcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32):crnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_63cd9d2d, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
@@ -1700,79 +1758,87 @@ def A2_vavghr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavgub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgub($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavgubr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgub($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguh($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavguwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguw($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavgw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavgwcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32):crnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_63cd9d2d, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
@@ -1782,16 +1848,17 @@ def A2_vavgwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vcmpbeq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b110000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1800,7 +1867,7 @@ def A2_vcmpbgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b111000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1809,7 +1876,7 @@ def A2_vcmpheq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1818,7 +1885,7 @@ def A2_vcmphgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1827,7 +1894,7 @@ def A2_vcmphgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1836,7 +1903,7 @@ def A2_vcmpweq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1845,7 +1912,7 @@ def A2_vcmpwgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1854,7 +1921,7 @@ def A2_vcmpwgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1863,133 +1930,147 @@ def A2_vconj : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vconj($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000100;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vmaxb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxb($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxub($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxuh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxuh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxuw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxuw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vmaxw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vminb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminb($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vminh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminub($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminuh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminuh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminuw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminuw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vnavgh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_cd321066, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
+let prefersSlot3 = 1;
}
def A2_vnavghcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32):crnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2000,7 +2081,7 @@ def A2_vnavghr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32):rnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2011,16 +2092,17 @@ def A2_vnavgw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_cd321066, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
+let prefersSlot3 = 1;
}
def A2_vnavgwcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32):crnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2031,7 +2113,7 @@ def A2_vnavgwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32):rnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2042,7 +2124,7 @@ def A2_vraddub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vraddub($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -2052,7 +2134,7 @@ def A2_vraddub_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vraddub($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -2063,7 +2145,7 @@ def A2_vrsadub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrsadub($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -2073,7 +2155,7 @@ def A2_vrsadub_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrsadub($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -2084,7 +2166,7 @@ def A2_vsubb_map : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vsubb($Rss32,$Rtt32)",
-PSEUDO, TypeMAPPING> {
+tc_9c18c9a5, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -2092,7 +2174,7 @@ def A2_vsubh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubh($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2101,17 +2183,18 @@ def A2_vsubhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubh($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubub($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2120,27 +2203,29 @@ def A2_vsububs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubub($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubuhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubuh($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubw($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2149,17 +2234,18 @@ def A2_vsubws : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubw($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_xor : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001011;
@@ -2174,7 +2260,7 @@ def A2_xorp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = xor($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2184,7 +2270,7 @@ def A2_zxtb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let hasNewValue = 1;
let opNewValue = 0;
let BaseOpcode = "A2_zxtb";
@@ -2196,7 +2282,7 @@ def A2_zxth : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000110;
let hasNewValue = 1;
@@ -2208,7 +2294,7 @@ def A4_addp_c : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Px4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Px4in),
"$Rdd32 = add($Rss32,$Rtt32,$Px4):carry",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_151014 {
+tc_a87879e8, TypeS_3op>, Enc_2b3f60 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010110;
@@ -2219,7 +2305,7 @@ def A4_andn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = and($Rt32,~$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001100;
@@ -2231,7 +2317,7 @@ def A4_andnp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = and($Rtt32,~$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2240,32 +2326,34 @@ def A4_bitsplit : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = bitsplit($Rs32,$Rt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_1997594 {
+tc_7ca2ea10, TypeALU64>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010100001;
+let prefersSlot3 = 1;
}
def A4_bitspliti : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rdd32 = bitsplit($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_5654851 {
+tc_7ca2ea10, TypeS_2op>, Enc_311abd {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001000110;
+let prefersSlot3 = 1;
}
def A4_boundscheck : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rs32,$Rtt32)",
-M_tc_3x_SLOT23, TypeALU64> {
+tc_c58f771a, TypeALU64> {
let isPseudo = 1;
}
def A4_boundscheck_hi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rss32,$Rtt32):raw:hi",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -2274,7 +2362,7 @@ def A4_boundscheck_lo : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rss32,$Rtt32):raw:lo",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -2283,7 +2371,7 @@ def A4_cmpbeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.eq($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b110000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2296,7 +2384,7 @@ def A4_cmpbeqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Pd4 = cmpb.eq($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101000;
@@ -2309,7 +2397,7 @@ def A4_cmpbgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.gt($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2321,7 +2409,7 @@ def A4_cmpbgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s8_0Imm:$Ii),
"$Pd4 = cmpb.gt($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101001;
@@ -2333,7 +2421,7 @@ def A4_cmpbgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.gtu($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b111000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2345,7 +2433,7 @@ def A4_cmpbgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmpb.gtu($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3531000, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_02553a, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011101010;
@@ -2362,7 +2450,7 @@ def A4_cmpheq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.eq($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2375,7 +2463,7 @@ def A4_cmpheqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmph.eq($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101000;
@@ -2393,7 +2481,7 @@ def A4_cmphgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.gt($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2405,7 +2493,7 @@ def A4_cmphgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmph.gt($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101001;
@@ -2422,7 +2510,7 @@ def A4_cmphgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.gtu($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2434,7 +2522,7 @@ def A4_cmphgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmph.gtu($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3531000, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_02553a, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011101010;
@@ -2451,7 +2539,7 @@ def A4_combineii : HInst<
(outs DoubleRegs:$Rdd32),
(ins s8_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = combine(#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9864697 {
+tc_548f402d, TypeALU32_2op>, Enc_f0cca7 {
let Inst{31-21} = 0b01111100100;
let isExtendable = 1;
let opExtendable = 2;
@@ -2463,7 +2551,7 @@ def A4_combineir : HInst<
(outs DoubleRegs:$Rdd32),
(ins s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rdd32 = combine(#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_2462143 {
+tc_548f402d, TypeALU32_2op>, Enc_9cdba7 {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011001;
let isExtendable = 1;
@@ -2476,7 +2564,7 @@ def A4_combineri : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rdd32 = combine($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_2462143 {
+tc_548f402d, TypeALU32_2op>, Enc_9cdba7 {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011000;
let isExtendable = 1;
@@ -2489,7 +2577,7 @@ def A4_cround_ri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = cround($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2501,7 +2589,7 @@ def A4_cround_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cround($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -2513,14 +2601,14 @@ def A4_ext : HInst<
(outs),
(ins u26_6Imm:$Ii),
"immext(#$Ii)",
-EXTENDER_tc_1_SLOT0123, TypeEXTENDER>, Enc_2082956 {
+tc_9a13af9d, TypeEXTENDER>, Enc_2b518f {
let Inst{31-28} = 0b0000;
}
def A4_modwrapu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = modwrap($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2532,7 +2620,7 @@ def A4_orn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = or($Rt32,~$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001101;
@@ -2544,7 +2632,7 @@ def A4_ornp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = or($Rtt32,~$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2553,7 +2641,7 @@ def A4_paslhf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000000;
@@ -2567,7 +2655,7 @@ def A4_paslhfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000000;
@@ -2582,7 +2670,7 @@ def A4_paslht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000000;
@@ -2595,7 +2683,7 @@ def A4_paslhtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000000;
@@ -2609,7 +2697,7 @@ def A4_pasrhf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000001;
@@ -2623,7 +2711,7 @@ def A4_pasrhfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000001;
@@ -2638,7 +2726,7 @@ def A4_pasrht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000001;
@@ -2651,7 +2739,7 @@ def A4_pasrhtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000001;
@@ -2665,7 +2753,7 @@ def A4_psxtbf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000101;
@@ -2679,7 +2767,7 @@ def A4_psxtbfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000101;
@@ -2694,7 +2782,7 @@ def A4_psxtbt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000101;
@@ -2707,7 +2795,7 @@ def A4_psxtbtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000101;
@@ -2721,7 +2809,7 @@ def A4_psxthf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000111;
@@ -2735,7 +2823,7 @@ def A4_psxthfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000111;
@@ -2750,7 +2838,7 @@ def A4_psxtht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000111;
@@ -2763,7 +2851,7 @@ def A4_psxthtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000111;
@@ -2777,7 +2865,7 @@ def A4_pzxtbf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000100;
@@ -2791,7 +2879,7 @@ def A4_pzxtbfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000100;
@@ -2806,7 +2894,7 @@ def A4_pzxtbt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000100;
@@ -2819,7 +2907,7 @@ def A4_pzxtbtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000100;
@@ -2833,7 +2921,7 @@ def A4_pzxthf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000110;
@@ -2847,7 +2935,7 @@ def A4_pzxthfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000110;
@@ -2862,7 +2950,7 @@ def A4_pzxtht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000110;
@@ -2875,7 +2963,7 @@ def A4_pzxthtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000110;
@@ -2889,7 +2977,7 @@ def A4_rcmpeq : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011010;
@@ -2903,7 +2991,7 @@ def A4_rcmpeqi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_16355964, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_b8c967, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011010;
let hasNewValue = 1;
@@ -2920,7 +3008,7 @@ def A4_rcmpneq : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = !cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011011;
@@ -2934,7 +3022,7 @@ def A4_rcmpneqi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = !cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_16355964, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_b8c967, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011011;
let hasNewValue = 1;
@@ -2951,7 +3039,7 @@ def A4_round_ri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = round($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2963,7 +3051,7 @@ def A4_round_ri_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = round($Rs32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2976,7 +3064,7 @@ def A4_round_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = round($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -2988,7 +3076,7 @@ def A4_round_rr_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = round($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -3001,7 +3089,7 @@ def A4_subp_c : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Px4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Px4in),
"$Rdd32 = sub($Rss32,$Rtt32,$Px4):carry",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_151014 {
+tc_a87879e8, TypeS_3op>, Enc_2b3f60 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010111;
@@ -3012,7 +3100,7 @@ def A4_tfrcpp : HInst<
(outs DoubleRegs:$Rdd32),
(ins CtrRegs64:$Css32),
"$Rdd32 = $Css32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_13094118 {
+tc_3b4892c6, TypeCR>, Enc_667b39 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01101000000;
}
@@ -3020,7 +3108,7 @@ def A4_tfrpcp : HInst<
(outs CtrRegs64:$Cdd32),
(ins DoubleRegs:$Rss32),
"$Cdd32 = $Rss32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_1329520 {
+tc_82f0f122, TypeCR>, Enc_0ed752 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01100011001;
}
@@ -3028,7 +3116,7 @@ def A4_tlbmatch : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Pd4 = tlbmatch($Rss32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2492727 {
+tc_e2c08bb4, TypeALU64>, Enc_03833b {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3038,7 +3126,7 @@ def A4_vcmpbeq_any : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = any8(vcmpb.eq($Rss32,$Rtt32))",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3047,7 +3135,7 @@ def A4_vcmpbeqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u8_0Imm:$Ii),
"$Pd4 = vcmpb.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3056,7 +3144,7 @@ def A4_vcmpbgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3065,7 +3153,7 @@ def A4_vcmpbgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpb.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3074,7 +3162,7 @@ def A4_vcmpbgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmpb.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3083,7 +3171,7 @@ def A4_vcmpheqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmph.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3092,7 +3180,7 @@ def A4_vcmphgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmph.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3101,7 +3189,7 @@ def A4_vcmphgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmph.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3110,7 +3198,7 @@ def A4_vcmpweqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpw.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3119,7 +3207,7 @@ def A4_vcmpwgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpw.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3128,7 +3216,7 @@ def A4_vcmpwgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmpw.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b100;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3137,7 +3225,7 @@ def A4_vrmaxh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3148,7 +3236,7 @@ def A4_vrmaxuh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxuh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3159,7 +3247,7 @@ def A4_vrmaxuw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxuw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3170,7 +3258,7 @@ def A4_vrmaxw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3181,7 +3269,7 @@ def A4_vrminh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3192,7 +3280,7 @@ def A4_vrminuh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminuh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3203,7 +3291,7 @@ def A4_vrminuw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminuw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3214,7 +3302,7 @@ def A4_vrminw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3225,7 +3313,7 @@ def A5_ACS : HInst<
(outs DoubleRegs:$Rxx32, PredRegs:$Pe4),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32,$Pe4 = vacsh($Rss32,$Rtt32)",
-M_tc_3stall_SLOT23, TypeM>, Enc_12822813, Requires<[HasV55T]> {
+tc_ae0722f7, TypeM>, Enc_831a7d, Requires<[HasV55T]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -3238,7 +3326,7 @@ def A5_vaddhubs : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vaddhub($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9277990, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_3op>, Enc_d2216a, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
@@ -3251,7 +3339,7 @@ def A6_vminub_RdP : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Pe4),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32,$Pe4 = vminub($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_766909, Requires<[HasV62T]> {
+tc_583510c7, TypeM>, Enc_d2c7f1, Requires<[HasV62T]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -3262,7 +3350,7 @@ def C2_all8 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = all8($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011101000;
}
@@ -3270,7 +3358,7 @@ def C2_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = and($Pt4,$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011000000;
@@ -3279,7 +3367,7 @@ def C2_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = and($Pt4,!$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011011000;
@@ -3288,7 +3376,7 @@ def C2_any8 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = any8($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011100000;
}
@@ -3296,7 +3384,7 @@ def C2_bitsclr : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = bitsclr($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111100;
@@ -3305,7 +3393,7 @@ def C2_bitsclri : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u6_0Imm:$Ii),
"$Pd4 = bitsclr($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_14574598 {
+tc_5fa2857c, TypeS_2op>, Enc_5d6c34 {
let Inst{7-2} = 0b000000;
let Inst{31-21} = 0b10000101100;
}
@@ -3313,7 +3401,7 @@ def C2_bitsset : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = bitsset($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111010;
@@ -3322,7 +3410,7 @@ def C2_ccombinewf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111101000;
@@ -3334,7 +3422,7 @@ def C2_ccombinewnewf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111101000;
@@ -3347,7 +3435,7 @@ def C2_ccombinewnewt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111101000;
@@ -3359,7 +3447,7 @@ def C2_ccombinewt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111101000;
@@ -3370,7 +3458,7 @@ def C2_cmoveif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if (!$Pu4) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111101;
@@ -3392,7 +3480,7 @@ def C2_cmoveit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if ($Pu4) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111100;
@@ -3413,7 +3501,7 @@ def C2_cmovenewif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if (!$Pu4.new) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_b08be45e, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111101;
@@ -3436,7 +3524,7 @@ def C2_cmovenewit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if ($Pu4.new) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_b08be45e, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111100;
@@ -3458,7 +3546,7 @@ def C2_cmpeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010000;
@@ -3471,7 +3559,7 @@ def C2_cmpeqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-22} = 0b0111010100;
let CextOpcode = "C2_cmpeq";
@@ -3487,7 +3575,7 @@ def C2_cmpeqp : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3498,7 +3586,7 @@ def C2_cmpgei : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s8_0Imm:$Ii),
"$Pd4 = cmp.ge($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_9df8b0dc, TypeALU32_2op> {
let isCompare = 1;
let isPseudo = 1;
}
@@ -3506,7 +3594,7 @@ def C2_cmpgeui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Pd4 = cmp.geu($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_9df8b0dc, TypeALU32_2op> {
let isCompare = 1;
let isPseudo = 1;
}
@@ -3514,7 +3602,7 @@ def C2_cmpgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.gt($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010010;
@@ -3526,7 +3614,7 @@ def C2_cmpgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmp.gt($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-22} = 0b0111010101;
let CextOpcode = "C2_cmpgt";
@@ -3542,7 +3630,7 @@ def C2_cmpgtp : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3552,7 +3640,7 @@ def C2_cmpgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.gtu($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010011;
@@ -3564,7 +3652,7 @@ def C2_cmpgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmp.gtu($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_13249928, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_c0cdde, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-21} = 0b01110101100;
let CextOpcode = "C2_cmpgtu";
@@ -3580,7 +3668,7 @@ def C2_cmpgtup : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3590,7 +3678,7 @@ def C2_cmplt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.lt($Rs32,$Rt32)",
-PSEUDO, TypeALU32_3op> {
+tc_9df8b0dc, TypeALU32_3op> {
let isCompare = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -3599,7 +3687,7 @@ def C2_cmpltu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.ltu($Rs32,$Rt32)",
-PSEUDO, TypeALU32_3op> {
+tc_9df8b0dc, TypeALU32_3op> {
let isCompare = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -3608,7 +3696,7 @@ def C2_mask : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4),
"$Rdd32 = mask($Pt4)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_10328975 {
+tc_b86c7e8b, TypeS_2op>, Enc_78e566 {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b1000011000000000;
@@ -3617,7 +3705,7 @@ def C2_mux : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mux($Pu4,$Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139 {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110100000;
@@ -3629,7 +3717,7 @@ def C2_muxii : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii, s8_0Imm:$II),
"$Rd32 = mux($Pu4,#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9093094 {
+tc_1b6011fb, TypeALU32_2op>, Enc_830e5d {
let Inst{31-25} = 0b0111101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -3643,7 +3731,7 @@ def C2_muxir : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = mux($Pu4,$Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534 {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011100110;
let hasNewValue = 1;
@@ -3659,7 +3747,7 @@ def C2_muxri : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = mux($Pu4,#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534 {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011100111;
let hasNewValue = 1;
@@ -3675,7 +3763,7 @@ def C2_not : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = not($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011110000;
}
@@ -3683,7 +3771,7 @@ def C2_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = or($Pt4,$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011001000;
@@ -3692,7 +3780,7 @@ def C2_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = or($Pt4,!$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011111000;
@@ -3701,7 +3789,7 @@ def C2_pxfer_map : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = $Ps4",
-S_2op_tc_1_SLOT23, TypeMAPPING> {
+tc_d63b71d1, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -3709,7 +3797,7 @@ def C2_tfrpr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Ps4),
"$Rd32 = $Ps4",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_11139981 {
+tc_b86c7e8b, TypeS_2op>, Enc_f5e933 {
let Inst{13-5} = 0b000000000;
let Inst{31-18} = 0b10001001010000;
let hasNewValue = 1;
@@ -3719,7 +3807,7 @@ def C2_tfrrp : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32),
"$Pd4 = $Rs32",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_4527648 {
+tc_47f0b7ad, TypeS_2op>, Enc_48b75f {
let Inst{13-2} = 0b000000000000;
let Inst{31-21} = 0b10000101010;
}
@@ -3727,18 +3815,19 @@ def C2_vitpack : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Rd32 = vitpack($Ps4,$Pt4)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_6735062 {
+tc_7ca2ea10, TypeS_2op>, Enc_527412 {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b10001001000000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def C2_vmux : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmux($Pu4,$Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_7606379 {
+tc_d1b5a4b6, TypeALU64>, Enc_329361 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010001000;
@@ -3747,7 +3836,7 @@ def C2_xor : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = xor($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011010000;
@@ -3756,7 +3845,7 @@ def C4_addipc : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = add(pc,#$Ii)",
-CR_tc_2_SLOT3, TypeCR>, Enc_9554661 {
+tc_1fe8323c, TypeCR>, Enc_607661 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0110101001001001;
@@ -3772,7 +3861,7 @@ def C4_and_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,and($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011000100;
@@ -3781,7 +3870,7 @@ def C4_and_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,and($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011100100;
@@ -3790,7 +3879,7 @@ def C4_and_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,or($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011001100;
@@ -3799,7 +3888,7 @@ def C4_and_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,or($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011101100;
@@ -3808,7 +3897,7 @@ def C4_cmplte : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.gt($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010010;
@@ -3820,7 +3909,7 @@ def C4_cmpltei : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = !cmp.gt($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-22} = 0b0111010101;
let CextOpcode = "C4_cmplte";
@@ -3836,7 +3925,7 @@ def C4_cmplteu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.gtu($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010011;
@@ -3848,7 +3937,7 @@ def C4_cmplteui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = !cmp.gtu($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_13249928, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_c0cdde, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-21} = 0b01110101100;
let CextOpcode = "C4_cmplteu";
@@ -3864,7 +3953,7 @@ def C4_cmpneq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010000;
@@ -3877,7 +3966,7 @@ def C4_cmpneqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = !cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-22} = 0b0111010100;
let CextOpcode = "C4_cmpneq";
@@ -3893,7 +3982,7 @@ def C4_fastcorner9 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = fastcorner9($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b100100;
let Inst{13-10} = 0b1000;
let Inst{31-18} = 0b01101011000000;
@@ -3902,7 +3991,7 @@ def C4_fastcorner9_not : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = !fastcorner9($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b100100;
let Inst{13-10} = 0b1000;
let Inst{31-18} = 0b01101011000100;
@@ -3911,7 +4000,7 @@ def C4_nbitsclr : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !bitsclr($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111101;
@@ -3920,7 +4009,7 @@ def C4_nbitsclri : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u6_0Imm:$Ii),
"$Pd4 = !bitsclr($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_14574598 {
+tc_5fa2857c, TypeS_2op>, Enc_5d6c34 {
let Inst{7-2} = 0b000000;
let Inst{31-21} = 0b10000101101;
}
@@ -3928,7 +4017,7 @@ def C4_nbitsset : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !bitsset($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111011;
@@ -3937,7 +4026,7 @@ def C4_or_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,and($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011010100;
@@ -3946,7 +4035,7 @@ def C4_or_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,and($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011110100;
@@ -3955,7 +4044,7 @@ def C4_or_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,or($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011011100;
@@ -3964,7 +4053,7 @@ def C4_or_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,or($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011111100;
@@ -3973,319 +4062,293 @@ def F2_conv_d2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_d2df($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_d2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_d2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2d : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2d($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2d_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2d($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2ud : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2ud($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2ud_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2ud($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2uw : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2uw($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2uw_chop : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2uw($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000101;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2w : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2w($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2w_chop : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2w($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2d : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2d($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2d_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2d($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2ud : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2ud($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2ud_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2ud($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2uw : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2uw($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2uw_chop : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2uw($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001011011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2w : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2w($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2w_chop : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2w($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001011100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_ud2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_ud2df($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_ud2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_ud2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000001;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_uw2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_uw2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_uw2sf : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_uw2sf($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011001;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_w2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_w2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_w2sf : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_w2sf($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_dfclass : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Pd4 = dfclass($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_14400220, Requires<[HasV5T]> {
+tc_5fa2857c, TypeALU64>, Enc_1f19b5, Requires<[HasV5T]> {
let Inst{4-2} = 0b100;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b11011100100;
@@ -4296,7 +4359,7 @@ def F2_dfcmpeq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4308,7 +4371,7 @@ def F2_dfcmpge : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.ge($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4320,7 +4383,7 @@ def F2_dfcmpgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4332,7 +4395,7 @@ def F2_dfcmpuo : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.uo($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4344,7 +4407,7 @@ def F2_dfimm_n : HInst<
(outs DoubleRegs:$Rdd32),
(ins u10_0Imm:$Ii),
"$Rdd32 = dfmake(#$Ii):neg",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_2702036, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_e6c957, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101100101;
let prefersSlot3 = 1;
@@ -4353,7 +4416,7 @@ def F2_dfimm_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins u10_0Imm:$Ii),
"$Rdd32 = dfmake(#$Ii):pos",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_2702036, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_e6c957, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101100100;
let prefersSlot3 = 1;
@@ -4362,14 +4425,13 @@ def F2_sfadd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfadd($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let isCommutable = 1;
}
@@ -4377,7 +4439,7 @@ def F2_sfclass : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = sfclass($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742, Requires<[HasV5T]> {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101111;
@@ -4388,7 +4450,7 @@ def F2_sfcmpeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.eq($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4400,7 +4462,7 @@ def F2_sfcmpge : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.ge($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4412,7 +4474,7 @@ def F2_sfcmpgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.gt($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4424,7 +4486,7 @@ def F2_sfcmpuo : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.uo($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4436,52 +4498,48 @@ def F2_sffixupd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sffixupd($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011110;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffixupn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sffixupn($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011110;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffixupr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sffixupr($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011101;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffma : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4489,14 +4547,13 @@ def F2_sffma_lib : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += sfmpy($Rs32,$Rt32):lib",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4504,14 +4561,13 @@ def F2_sffma_sc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32, PredRegs:$Pu4),
"$Rx32 += sfmpy($Rs32,$Rt32,$Pu4):scale",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_15194851, Requires<[HasV5T]> {
+tc_2e55aa16, TypeM>, Enc_437f33, Requires<[HasV5T]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4519,14 +4575,13 @@ def F2_sffms : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4534,14 +4589,13 @@ def F2_sffms_lib : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= sfmpy($Rs32,$Rt32):lib",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4549,7 +4603,7 @@ def F2_sfimm_n : HInst<
(outs IntRegs:$Rd32),
(ins u10_0Imm:$Ii),
"$Rd32 = sfmake(#$Ii):neg",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9082775, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_6c9440, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101011001;
let hasNewValue = 1;
@@ -4560,7 +4614,7 @@ def F2_sfimm_p : HInst<
(outs IntRegs:$Rd32),
(ins u10_0Imm:$Ii),
"$Rd32 = sfmake(#$Ii):pos",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9082775, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_6c9440, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101011000;
let hasNewValue = 1;
@@ -4571,20 +4625,19 @@ def F2_sfinvsqrta : HInst<
(outs IntRegs:$Rd32, PredRegs:$Pe4),
(ins IntRegs:$Rs32),
"$Rd32,$Pe4 = sfinvsqrta($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_5718302, Requires<[HasV5T]> {
+tc_f1aa2cdb, TypeS_2op>, Enc_890909, Requires<[HasV5T]> {
let Inst{13-7} = 0b0000000;
let Inst{31-21} = 0b10001011111;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
let isPredicateLate = 1;
-let prefersSlot3 = 1;
}
def F2_sfmax : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmax($Rs32,$Rt32)",
-M_tc_2_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_f1240c08, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011100;
@@ -4598,7 +4651,7 @@ def F2_sfmin : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmin($Rs32,$Rt32)",
-M_tc_2_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_f1240c08, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011100;
@@ -4612,14 +4665,13 @@ def F2_sfmpy : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let isCommutable = 1;
}
@@ -4627,7 +4679,7 @@ def F2_sfrecipa : HInst<
(outs IntRegs:$Rd32, PredRegs:$Pe4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32,$Pe4 = sfrecipa($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_5853469, Requires<[HasV5T]> {
+tc_09c86199, TypeM>, Enc_a94f3b, Requires<[HasV5T]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011111;
@@ -4635,27 +4687,25 @@ let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
let isPredicateLate = 1;
-let prefersSlot3 = 1;
}
def F2_sfsub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfsub($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def J2_call : HInst<
(outs),
(ins a30_2Imm:$Ii),
"call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_13453446, PredRel {
+tc_639d93ee, TypeJ>, Enc_81ac1d, PredRel {
let Inst{0-0} = 0b0;
let Inst{31-25} = 0b0101101;
let isCall = 1;
@@ -4675,7 +4725,7 @@ def J2_callf : HInst<
(outs),
(ins PredRegs:$Pu4, a30_2Imm:$Ii),
"if (!$Pu4) call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_14868535, PredRel {
+tc_0767081f, TypeJ>, Enc_daea09, PredRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b1;
@@ -4699,7 +4749,7 @@ def J2_callr : HInst<
(outs),
(ins IntRegs:$Rs32),
"callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_ecfaae86, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010000101;
let cofMax1 = 1;
@@ -4713,7 +4763,7 @@ def J2_callrf : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953 {
+tc_84630363, TypeJ>, Enc_88d4d9 {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010001001;
@@ -4731,7 +4781,7 @@ def J2_callrt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953 {
+tc_84630363, TypeJ>, Enc_88d4d9 {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010001000;
@@ -4748,7 +4798,7 @@ def J2_callt : HInst<
(outs),
(ins PredRegs:$Pu4, a30_2Imm:$Ii),
"if ($Pu4) call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_14868535, PredRel {
+tc_0767081f, TypeJ>, Enc_daea09, PredRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b0;
@@ -4771,16 +4821,18 @@ def J2_endloop0 : HInst<
(outs),
(ins),
"endloop0",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC0, SA0];
let Defs = [LC0, P3, PC, USR];
+let isBranch = 1;
+let isTerminator = 1;
let isPseudo = 1;
}
def J2_endloop01 : HInst<
(outs),
(ins),
"endloop01",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC0, LC1, SA0, SA1];
let Defs = [LC0, LC1, P3, PC, USR];
let isPseudo = 1;
@@ -4789,16 +4841,18 @@ def J2_endloop1 : HInst<
(outs),
(ins),
"endloop1",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC1, SA1];
let Defs = [LC1, PC];
+let isBranch = 1;
+let isTerminator = 1;
let isPseudo = 1;
}
def J2_jump : HInst<
(outs),
(ins b30_2Imm:$Ii),
"jump $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_13453446, PredNewRel {
+tc_a333d2a9, TypeJ>, Enc_81ac1d, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{31-25} = 0b0101100;
let isTerminator = 1;
@@ -4818,7 +4872,7 @@ def J2_jumpf : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_1b834fe7, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b1;
@@ -4841,7 +4895,7 @@ def J2_jumpf_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, b15_2Imm:$Ii),
"if (!$Pu4) jump $Ii",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_1b834fe7, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -4849,7 +4903,7 @@ def J2_jumpfnew : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4.new) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b010;
let Inst{21-21} = 0b1;
@@ -4873,7 +4927,7 @@ def J2_jumpfnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4.new) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b110;
let Inst{21-21} = 0b1;
@@ -4897,7 +4951,7 @@ def J2_jumpfpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, Requires<[HasV60T]>, PredNewRel {
+tc_b5bfaa60, TypeJ>, Enc_daea09, Requires<[HasV60T]>, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b100;
let Inst{21-21} = 0b1;
@@ -4920,7 +4974,7 @@ def J2_jumpr : HInst<
(outs),
(ins IntRegs:$Rs32),
"jumpr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059, PredNewRel {
+tc_b08b653e, TypeJ>, Enc_ecbcc8, PredNewRel {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010010100;
let isTerminator = 1;
@@ -4937,7 +4991,7 @@ def J2_jumprf : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_07ac815d, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010011011;
@@ -4956,7 +5010,7 @@ def J2_jumprf_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr $Rs32",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_07ac815d, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -4964,7 +5018,7 @@ def J2_jumprfnew : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0010;
let Inst{31-21} = 0b01010011011;
@@ -4984,7 +5038,7 @@ def J2_jumprfnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0110;
let Inst{31-21} = 0b01010011011;
@@ -5004,7 +5058,7 @@ def J2_jumprfpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, Requires<[HasV60T]>, PredNewRel {
+tc_a1fb80e1, TypeJ>, Enc_88d4d9, Requires<[HasV60T]>, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0100;
let Inst{31-21} = 0b01010011011;
@@ -5023,7 +5077,7 @@ def J2_jumprgtez : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32>=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000101;
@@ -5038,7 +5092,7 @@ def J2_jumprgtezpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32>=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000101;
@@ -5053,7 +5107,7 @@ def J2_jumprltez : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32<=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000111;
@@ -5068,7 +5122,7 @@ def J2_jumprltezpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32<=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000111;
@@ -5083,7 +5137,7 @@ def J2_jumprnz : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32==#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000110;
@@ -5098,7 +5152,7 @@ def J2_jumprnzpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32==#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000110;
@@ -5113,7 +5167,7 @@ def J2_jumprt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_07ac815d, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010011010;
@@ -5131,7 +5185,7 @@ def J2_jumprt_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr $Rs32",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_07ac815d, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -5139,7 +5193,7 @@ def J2_jumprtnew : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0010;
let Inst{31-21} = 0b01010011010;
@@ -5158,7 +5212,7 @@ def J2_jumprtnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0110;
let Inst{31-21} = 0b01010011010;
@@ -5177,7 +5231,7 @@ def J2_jumprtpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, Requires<[HasV60T]>, PredNewRel {
+tc_a1fb80e1, TypeJ>, Enc_88d4d9, Requires<[HasV60T]>, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0100;
let Inst{31-21} = 0b01010011010;
@@ -5195,7 +5249,7 @@ def J2_jumprz : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32!=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000100;
@@ -5210,7 +5264,7 @@ def J2_jumprzpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32!=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000100;
@@ -5225,7 +5279,7 @@ def J2_jumpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_1b834fe7, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b0;
@@ -5247,7 +5301,7 @@ def J2_jumpt_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, b15_2Imm:$Ii),
"if ($Pu4) jump $Ii",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_1b834fe7, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -5255,7 +5309,7 @@ def J2_jumptnew : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4.new) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b010;
let Inst{21-21} = 0b0;
@@ -5278,7 +5332,7 @@ def J2_jumptnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4.new) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b110;
let Inst{21-21} = 0b0;
@@ -5301,7 +5355,7 @@ def J2_jumptpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, Requires<[HasV60T]>, PredNewRel {
+tc_b5bfaa60, TypeJ>, Enc_daea09, Requires<[HasV60T]>, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b100;
let Inst{21-21} = 0b0;
@@ -5323,7 +5377,7 @@ def J2_loop0i : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"loop0($Ii,#$II)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9939385 {
+tc_1000eb10, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001000;
@@ -5338,7 +5392,7 @@ def J2_loop0r : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"loop0($Ii,$Rs32)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_5790679 {
+tc_f055fbb6, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5354,7 +5408,7 @@ def J2_loop1i : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"loop1($Ii,#$II)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9939385 {
+tc_1000eb10, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001001;
@@ -5369,7 +5423,7 @@ def J2_loop1r : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"loop1($Ii,$Rs32)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_5790679 {
+tc_f055fbb6, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5385,7 +5439,7 @@ def J2_pause : HInst<
(outs),
(ins u8_0Imm:$Ii),
"pause(#$Ii)",
-J_tc_2early_SLOT2, TypeJ>, Enc_8732960 {
+tc_b189ad4c, TypeJ>, Enc_a51a9a {
let Inst{1-0} = 0b00;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5396,7 +5450,7 @@ def J2_ploop1si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp1loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001101;
@@ -5412,7 +5466,7 @@ def J2_ploop1sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp1loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5429,7 +5483,7 @@ def J2_ploop2si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp2loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001110;
@@ -5445,7 +5499,7 @@ def J2_ploop2sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp2loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5462,7 +5516,7 @@ def J2_ploop3si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp3loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001111;
@@ -5478,7 +5532,7 @@ def J2_ploop3sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp3loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5495,7 +5549,7 @@ def J2_trap0 : HInst<
(outs),
(ins u8_0Imm:$Ii),
"trap0(#$Ii)",
-J_tc_2early_SLOT2, TypeJ>, Enc_8732960 {
+tc_cbe45117, TypeJ>, Enc_a51a9a {
let Inst{1-0} = 0b00;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5506,7 +5560,7 @@ def J4_cmpeq_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5531,7 +5585,7 @@ def J4_cmpeq_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5556,7 +5610,7 @@ def J4_cmpeq_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010001;
@@ -5579,7 +5633,7 @@ def J4_cmpeq_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010001;
@@ -5602,7 +5656,7 @@ def J4_cmpeq_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010001;
@@ -5625,7 +5679,7 @@ def J4_cmpeq_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010001;
@@ -5648,7 +5702,7 @@ def J4_cmpeq_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5672,7 +5726,7 @@ def J4_cmpeq_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5696,7 +5750,7 @@ def J4_cmpeq_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010000;
@@ -5718,7 +5772,7 @@ def J4_cmpeq_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010000;
@@ -5740,7 +5794,7 @@ def J4_cmpeq_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010000;
@@ -5762,7 +5816,7 @@ def J4_cmpeq_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010000;
@@ -5784,7 +5838,7 @@ def J4_cmpeqi_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5809,7 +5863,7 @@ def J4_cmpeqi_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5834,7 +5888,7 @@ def J4_cmpeqi_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000001;
@@ -5857,7 +5911,7 @@ def J4_cmpeqi_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000001;
@@ -5880,7 +5934,7 @@ def J4_cmpeqi_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001001;
@@ -5903,7 +5957,7 @@ def J4_cmpeqi_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001001;
@@ -5926,7 +5980,7 @@ def J4_cmpeqi_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5950,7 +6004,7 @@ def J4_cmpeqi_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5974,7 +6028,7 @@ def J4_cmpeqi_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000000;
@@ -5996,7 +6050,7 @@ def J4_cmpeqi_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000000;
@@ -6018,7 +6072,7 @@ def J4_cmpeqi_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001000;
@@ -6040,7 +6094,7 @@ def J4_cmpeqi_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001000;
@@ -6062,7 +6116,7 @@ def J4_cmpeqn1_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4359901, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_e90a15, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6087,7 +6141,7 @@ def J4_cmpeqn1_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8612939, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_5a18b3, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6112,7 +6166,7 @@ def J4_cmpeqn1_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_844699, PredRel {
+tc_d108a090, TypeCJ>, Enc_1de724, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001000111;
@@ -6135,7 +6189,7 @@ def J4_cmpeqn1_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5338033, PredRel {
+tc_d108a090, TypeCJ>, Enc_14640c, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001000111;
@@ -6158,7 +6212,7 @@ def J4_cmpeqn1_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14150875, PredRel {
+tc_d108a090, TypeCJ>, Enc_668704, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001001111;
@@ -6181,7 +6235,7 @@ def J4_cmpeqn1_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_15450971, PredRel {
+tc_d108a090, TypeCJ>, Enc_800e04, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001001111;
@@ -6204,7 +6258,7 @@ def J4_cmpeqn1_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_14998517, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_4aca3a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6228,7 +6282,7 @@ def J4_cmpeqn1_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_11544269, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_f7ea77, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6252,7 +6306,7 @@ def J4_cmpeqn1_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5401217, PredRel {
+tc_d108a090, TypeCJ>, Enc_405228, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001000110;
@@ -6274,7 +6328,7 @@ def J4_cmpeqn1_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12419313, PredRel {
+tc_d108a090, TypeCJ>, Enc_3a2484, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001000110;
@@ -6296,7 +6350,7 @@ def J4_cmpeqn1_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_4684887, PredRel {
+tc_d108a090, TypeCJ>, Enc_736575, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001001110;
@@ -6318,7 +6372,7 @@ def J4_cmpeqn1_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_220949, PredRel {
+tc_d108a090, TypeCJ>, Enc_8e583a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001001110;
@@ -6340,7 +6394,7 @@ def J4_cmpgt_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6365,7 +6419,7 @@ def J4_cmpgt_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6390,7 +6444,7 @@ def J4_cmpgt_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010011;
@@ -6413,7 +6467,7 @@ def J4_cmpgt_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010011;
@@ -6436,7 +6490,7 @@ def J4_cmpgt_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010011;
@@ -6459,7 +6513,7 @@ def J4_cmpgt_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010011;
@@ -6482,7 +6536,7 @@ def J4_cmpgt_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6506,7 +6560,7 @@ def J4_cmpgt_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6530,7 +6584,7 @@ def J4_cmpgt_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010010;
@@ -6552,7 +6606,7 @@ def J4_cmpgt_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010010;
@@ -6574,7 +6628,7 @@ def J4_cmpgt_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010010;
@@ -6596,7 +6650,7 @@ def J4_cmpgt_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010010;
@@ -6618,7 +6672,7 @@ def J4_cmpgti_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6643,7 +6697,7 @@ def J4_cmpgti_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6668,7 +6722,7 @@ def J4_cmpgti_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000011;
@@ -6691,7 +6745,7 @@ def J4_cmpgti_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000011;
@@ -6714,7 +6768,7 @@ def J4_cmpgti_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001011;
@@ -6737,7 +6791,7 @@ def J4_cmpgti_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001011;
@@ -6760,7 +6814,7 @@ def J4_cmpgti_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6784,7 +6838,7 @@ def J4_cmpgti_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6808,7 +6862,7 @@ def J4_cmpgti_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000010;
@@ -6830,7 +6884,7 @@ def J4_cmpgti_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000010;
@@ -6852,7 +6906,7 @@ def J4_cmpgti_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001010;
@@ -6874,7 +6928,7 @@ def J4_cmpgti_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001010;
@@ -6896,7 +6950,7 @@ def J4_cmpgtn1_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8674673, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_3694bd, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6921,7 +6975,7 @@ def J4_cmpgtn1_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15763937, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_a6853f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6946,7 +7000,7 @@ def J4_cmpgtn1_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5915771, PredRel {
+tc_d108a090, TypeCJ>, Enc_a42857, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001000111;
@@ -6969,7 +7023,7 @@ def J4_cmpgtn1_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7315939, PredRel {
+tc_d108a090, TypeCJ>, Enc_f6fe0b, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001000111;
@@ -6992,7 +7046,7 @@ def J4_cmpgtn1_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7785569, PredRel {
+tc_d108a090, TypeCJ>, Enc_3e3989, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001001111;
@@ -7015,7 +7069,7 @@ def J4_cmpgtn1_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_10968391, PredRel {
+tc_d108a090, TypeCJ>, Enc_b909d2, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001001111;
@@ -7038,7 +7092,7 @@ def J4_cmpgtn1_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_364753, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_f82302, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -7062,7 +7116,7 @@ def J4_cmpgtn1_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8479583, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_6413b6, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -7086,7 +7140,7 @@ def J4_cmpgtn1_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_2428539, PredRel {
+tc_d108a090, TypeCJ>, Enc_b78edd, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001000110;
@@ -7108,7 +7162,7 @@ def J4_cmpgtn1_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_8919369, PredRel {
+tc_d108a090, TypeCJ>, Enc_041d7b, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001000110;
@@ -7130,7 +7184,7 @@ def J4_cmpgtn1_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_8577055, PredRel {
+tc_d108a090, TypeCJ>, Enc_b1e1fb, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001001110;
@@ -7152,7 +7206,7 @@ def J4_cmpgtn1_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14530015, PredRel {
+tc_d108a090, TypeCJ>, Enc_178717, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001001110;
@@ -7174,7 +7228,7 @@ def J4_cmpgtu_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7199,7 +7253,7 @@ def J4_cmpgtu_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7224,7 +7278,7 @@ def J4_cmpgtu_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010101;
@@ -7247,7 +7301,7 @@ def J4_cmpgtu_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010101;
@@ -7270,7 +7324,7 @@ def J4_cmpgtu_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010101;
@@ -7293,7 +7347,7 @@ def J4_cmpgtu_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010101;
@@ -7316,7 +7370,7 @@ def J4_cmpgtu_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7340,7 +7394,7 @@ def J4_cmpgtu_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7364,7 +7418,7 @@ def J4_cmpgtu_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010100;
@@ -7386,7 +7440,7 @@ def J4_cmpgtu_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010100;
@@ -7408,7 +7462,7 @@ def J4_cmpgtu_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010100;
@@ -7430,7 +7484,7 @@ def J4_cmpgtu_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010100;
@@ -7452,7 +7506,7 @@ def J4_cmpgtui_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7477,7 +7531,7 @@ def J4_cmpgtui_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7502,7 +7556,7 @@ def J4_cmpgtui_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000101;
@@ -7525,7 +7579,7 @@ def J4_cmpgtui_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000101;
@@ -7548,7 +7602,7 @@ def J4_cmpgtui_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001101;
@@ -7571,7 +7625,7 @@ def J4_cmpgtui_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001101;
@@ -7594,7 +7648,7 @@ def J4_cmpgtui_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7618,7 +7672,7 @@ def J4_cmpgtui_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7642,7 +7696,7 @@ def J4_cmpgtui_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000100;
@@ -7664,7 +7718,7 @@ def J4_cmpgtui_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000100;
@@ -7686,7 +7740,7 @@ def J4_cmpgtui_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001100;
@@ -7708,7 +7762,7 @@ def J4_cmpgtui_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001100;
@@ -7730,7 +7784,7 @@ def J4_cmplt_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gt($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7755,7 +7809,7 @@ def J4_cmplt_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gt($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7780,7 +7834,7 @@ def J4_cmplt_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gt($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7804,7 +7858,7 @@ def J4_cmplt_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gt($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7828,7 +7882,7 @@ def J4_cmpltu_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gtu($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7853,7 +7907,7 @@ def J4_cmpltu_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gtu($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7878,7 +7932,7 @@ def J4_cmpltu_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gtu($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7902,7 +7956,7 @@ def J4_cmpltu_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gtu($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7926,7 +7980,7 @@ def J4_hintjumpr : HInst<
(outs),
(ins IntRegs:$Rs32),
"hintjr($Rs32)",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_b08b653e, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010010101;
let isTerminator = 1;
@@ -7938,7 +7992,7 @@ def J4_jumpseti : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u6_0Imm:$II, b30_2Imm:$Ii),
"$Rd16 = #$II ; jump $Ii",
-COMPOUND, TypeCJ>, Enc_4834775 {
+tc_1e062b18, TypeCJ>, Enc_9e4c3f {
let Inst{0-0} = 0b0;
let Inst{31-22} = 0b0001011000;
let hasNewValue = 1;
@@ -7956,7 +8010,7 @@ def J4_jumpsetr : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"$Rd16 = $Rs16 ; jump $Ii",
-COMPOUND, TypeCJ>, Enc_2639299 {
+tc_1e062b18, TypeCJ>, Enc_66bce1 {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001011100;
@@ -7975,7 +8029,7 @@ def J4_tstbit0_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!tstbit($Ns8.new,#0)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -7999,7 +8053,7 @@ def J4_tstbit0_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!tstbit($Ns8.new,#0)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -8023,7 +8077,7 @@ def J4_tstbit0_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001000111;
@@ -8045,7 +8099,7 @@ def J4_tstbit0_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001000111;
@@ -8067,7 +8121,7 @@ def J4_tstbit0_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001001111;
@@ -8089,7 +8143,7 @@ def J4_tstbit0_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001001111;
@@ -8111,7 +8165,7 @@ def J4_tstbit0_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (tstbit($Ns8.new,#0)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -8134,7 +8188,7 @@ def J4_tstbit0_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (tstbit($Ns8.new,#0)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -8157,7 +8211,7 @@ def J4_tstbit0_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001000110;
@@ -8178,7 +8232,7 @@ def J4_tstbit0_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001000110;
@@ -8199,7 +8253,7 @@ def J4_tstbit0_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001001110;
@@ -8220,7 +8274,7 @@ def J4_tstbit0_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001001110;
@@ -8241,7 +8295,7 @@ def L2_deallocframe : HInst<
(outs),
(ins),
"deallocframe",
-LD_tc_ld_SLOT01, TypeLD>, Enc_0 {
+tc_c1dbc916, TypeLD>, Enc_3a3d62 {
let Inst{4-0} = 0b11110;
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010000000;
@@ -8255,7 +8309,7 @@ def L2_loadalignb_io : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Ryy32 = memb_fifo($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_449439 {
+tc_14da557c, TypeLD>, Enc_a27588 {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8272,7 +8326,7 @@ def L2_loadalignb_pbr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110100;
let accessSize = ByteAccess;
@@ -8283,7 +8337,7 @@ def L2_loadalignb_pci : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_971347 {
+tc_d2a33af5, TypeLD>, Enc_74aef2 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000100;
let addrMode = PostInc;
@@ -8296,7 +8350,7 @@ def L2_loadalignb_pcr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000100;
let addrMode = PostInc;
@@ -8309,7 +8363,7 @@ def L2_loadalignb_pi : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Ryy32 = memb_fifo($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6372758 {
+tc_ae762521, TypeLD>, Enc_6b197f {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010100;
let addrMode = PostInc;
@@ -8321,7 +8375,7 @@ def L2_loadalignb_pr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100100;
let addrMode = PostInc;
@@ -8333,7 +8387,7 @@ def L2_loadalignb_zomap : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32),
"$Ryy32 = memb_fifo($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let Constraints = "$Ryy32 = $Ryy32in";
@@ -8342,7 +8396,7 @@ def L2_loadalignh_io : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32, s31_1Imm:$Ii),
"$Ryy32 = memh_fifo($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11930027 {
+tc_14da557c, TypeLD>, Enc_5cd7e9 {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8359,7 +8413,7 @@ def L2_loadalignh_pbr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110010;
let accessSize = HalfWordAccess;
@@ -8370,7 +8424,7 @@ def L2_loadalignh_pci : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_1971351 {
+tc_d2a33af5, TypeLD>, Enc_9e2e1c {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000010;
let addrMode = PostInc;
@@ -8383,7 +8437,7 @@ def L2_loadalignh_pcr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000010;
let addrMode = PostInc;
@@ -8396,7 +8450,7 @@ def L2_loadalignh_pi : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Ryy32 = memh_fifo($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_3372766 {
+tc_ae762521, TypeLD>, Enc_bd1cbc {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010010;
let addrMode = PostInc;
@@ -8408,7 +8462,7 @@ def L2_loadalignh_pr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100010;
let addrMode = PostInc;
@@ -8420,7 +8474,7 @@ def L2_loadalignh_zomap : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32),
"$Ryy32 = memh_fifo($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let Constraints = "$Ryy32 = $Ryy32in";
@@ -8429,7 +8483,7 @@ def L2_loadbsw2_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = membh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738 {
+tc_bf6fa601, TypeLD>, Enc_de0214 {
let Inst{24-21} = 0b0001;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8447,7 +8501,7 @@ def L2_loadbsw2_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110001;
let hasNewValue = 1;
@@ -8460,7 +8514,7 @@ def L2_loadbsw2_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000001;
let hasNewValue = 1;
@@ -8475,7 +8529,7 @@ def L2_loadbsw2_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000001;
let hasNewValue = 1;
@@ -8490,7 +8544,7 @@ def L2_loadbsw2_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = membh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009 {
+tc_65dc7cc4, TypeLD>, Enc_152467 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010001;
let hasNewValue = 1;
@@ -8504,7 +8558,7 @@ def L2_loadbsw2_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100001;
let hasNewValue = 1;
@@ -8518,7 +8572,7 @@ def L2_loadbsw2_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = membh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8528,7 +8582,7 @@ def L2_loadbsw4_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rdd32 = membh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_9852473 {
+tc_bf6fa601, TypeLD>, Enc_2d7491 {
let Inst{24-21} = 0b0111;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8544,7 +8598,7 @@ def L2_loadbsw4_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110111;
let accessSize = WordAccess;
@@ -8555,7 +8609,7 @@ def L2_loadbsw4_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_3931661 {
+tc_3eab77bd, TypeLD>, Enc_70b24b {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000111;
let addrMode = PostInc;
@@ -8568,7 +8622,7 @@ def L2_loadbsw4_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000111;
let addrMode = PostInc;
@@ -8581,7 +8635,7 @@ def L2_loadbsw4_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rdd32 = membh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_8752140 {
+tc_65dc7cc4, TypeLD>, Enc_71f1b4 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010111;
let addrMode = PostInc;
@@ -8593,7 +8647,7 @@ def L2_loadbsw4_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100111;
let addrMode = PostInc;
@@ -8605,7 +8659,7 @@ def L2_loadbsw4_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = membh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -8613,7 +8667,7 @@ def L2_loadbzw2_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memubh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738 {
+tc_bf6fa601, TypeLD>, Enc_de0214 {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8631,7 +8685,7 @@ def L2_loadbzw2_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110011;
let hasNewValue = 1;
@@ -8644,7 +8698,7 @@ def L2_loadbzw2_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000011;
let hasNewValue = 1;
@@ -8659,7 +8713,7 @@ def L2_loadbzw2_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000011;
let hasNewValue = 1;
@@ -8674,7 +8728,7 @@ def L2_loadbzw2_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memubh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009 {
+tc_65dc7cc4, TypeLD>, Enc_152467 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010011;
let hasNewValue = 1;
@@ -8688,7 +8742,7 @@ def L2_loadbzw2_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100011;
let hasNewValue = 1;
@@ -8702,7 +8756,7 @@ def L2_loadbzw2_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memubh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8712,7 +8766,7 @@ def L2_loadbzw4_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rdd32 = memubh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_9852473 {
+tc_bf6fa601, TypeLD>, Enc_2d7491 {
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8728,7 +8782,7 @@ def L2_loadbzw4_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110101;
let accessSize = WordAccess;
@@ -8739,7 +8793,7 @@ def L2_loadbzw4_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_3931661 {
+tc_3eab77bd, TypeLD>, Enc_70b24b {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000101;
let addrMode = PostInc;
@@ -8752,7 +8806,7 @@ def L2_loadbzw4_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000101;
let addrMode = PostInc;
@@ -8765,7 +8819,7 @@ def L2_loadbzw4_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rdd32 = memubh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_8752140 {
+tc_65dc7cc4, TypeLD>, Enc_71f1b4 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010101;
let addrMode = PostInc;
@@ -8777,7 +8831,7 @@ def L2_loadbzw4_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100101;
let addrMode = PostInc;
@@ -8789,7 +8843,7 @@ def L2_loadbzw4_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memubh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -8797,7 +8851,7 @@ def L2_loadrb_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = memb($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14461004, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_211aaa, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8818,7 +8872,7 @@ def L2_loadrb_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111000;
let hasNewValue = 1;
@@ -8831,7 +8885,7 @@ def L2_loadrb_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_16303398 {
+tc_3eab77bd, TypeLD>, Enc_e0a47a {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001000;
let hasNewValue = 1;
@@ -8846,7 +8900,7 @@ def L2_loadrb_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001000;
let hasNewValue = 1;
@@ -8861,7 +8915,7 @@ def L2_loadrb_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5598813, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_222336, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011000;
let hasNewValue = 1;
@@ -8877,7 +8931,7 @@ def L2_loadrb_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101000;
let hasNewValue = 1;
@@ -8891,7 +8945,7 @@ def L2_loadrb_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8901,7 +8955,7 @@ def L2_loadrbgp : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memb(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -8920,7 +8974,7 @@ def L2_loadrd_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s29_3Imm:$Ii),
"$Rdd32 = memd($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_163381, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_fa3ba4, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8939,7 +8993,7 @@ def L2_loadrd_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111110;
let accessSize = DoubleWordAccess;
@@ -8950,7 +9004,7 @@ def L2_loadrd_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_931653 {
+tc_3eab77bd, TypeLD>, Enc_b05839 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001110;
let addrMode = PostInc;
@@ -8963,7 +9017,7 @@ def L2_loadrd_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001110;
let addrMode = PostInc;
@@ -8976,7 +9030,7 @@ def L2_loadrd_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii),
"$Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_9752128, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_5bdd42, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011110;
let addrMode = PostInc;
@@ -8990,7 +9044,7 @@ def L2_loadrd_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101110;
let addrMode = PostInc;
@@ -9002,7 +9056,7 @@ def L2_loadrd_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9010,7 +9064,7 @@ def L2_loadrdgp : HInst<
(outs DoubleRegs:$Rdd32),
(ins u29_3Imm:$Ii),
"$Rdd32 = memd(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4975051, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_509701, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b01001;
let accessSize = DoubleWordAccess;
@@ -9027,7 +9081,7 @@ def L2_loadrh_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_de0214, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9048,7 +9102,7 @@ def L2_loadrh_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111010;
let hasNewValue = 1;
@@ -9061,7 +9115,7 @@ def L2_loadrh_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001010;
let hasNewValue = 1;
@@ -9076,7 +9130,7 @@ def L2_loadrh_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001010;
let hasNewValue = 1;
@@ -9091,7 +9145,7 @@ def L2_loadrh_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_152467, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011010;
let hasNewValue = 1;
@@ -9107,7 +9161,7 @@ def L2_loadrh_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101010;
let hasNewValue = 1;
@@ -9121,7 +9175,7 @@ def L2_loadrh_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9131,7 +9185,7 @@ def L2_loadrhgp : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memh(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9150,7 +9204,7 @@ def L2_loadri_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rd32 = memw($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_8990840, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_2a3787, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9171,7 +9225,7 @@ def L2_loadri_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111100;
let hasNewValue = 1;
@@ -9184,7 +9238,7 @@ def L2_loadri_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14303394 {
+tc_3eab77bd, TypeLD>, Enc_27fd0e {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001100;
let hasNewValue = 1;
@@ -9199,7 +9253,7 @@ def L2_loadri_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001100;
let hasNewValue = 1;
@@ -9214,7 +9268,7 @@ def L2_loadri_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_16376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_3d920a, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011100;
let hasNewValue = 1;
@@ -9230,7 +9284,7 @@ def L2_loadri_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101100;
let hasNewValue = 1;
@@ -9244,7 +9298,7 @@ def L2_loadri_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9254,7 +9308,7 @@ def L2_loadrigp : HInst<
(outs IntRegs:$Rd32),
(ins u30_2Imm:$Ii),
"$Rd32 = memw(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_8814718, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_4f4ed7, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9273,7 +9327,7 @@ def L2_loadrub_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = memub($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14461004, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_211aaa, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9294,7 +9348,7 @@ def L2_loadrub_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111001;
let hasNewValue = 1;
@@ -9307,7 +9361,7 @@ def L2_loadrub_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_16303398 {
+tc_3eab77bd, TypeLD>, Enc_e0a47a {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001001;
let hasNewValue = 1;
@@ -9322,7 +9376,7 @@ def L2_loadrub_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001001;
let hasNewValue = 1;
@@ -9337,7 +9391,7 @@ def L2_loadrub_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5598813, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_222336, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011001;
let hasNewValue = 1;
@@ -9353,7 +9407,7 @@ def L2_loadrub_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101001;
let hasNewValue = 1;
@@ -9367,7 +9421,7 @@ def L2_loadrub_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9377,7 +9431,7 @@ def L2_loadrubgp : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memub(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9396,7 +9450,7 @@ def L2_loadruh_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memuh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_de0214, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9417,7 +9471,7 @@ def L2_loadruh_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111011;
let hasNewValue = 1;
@@ -9430,7 +9484,7 @@ def L2_loadruh_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001011;
let hasNewValue = 1;
@@ -9445,7 +9499,7 @@ def L2_loadruh_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001011;
let hasNewValue = 1;
@@ -9460,7 +9514,7 @@ def L2_loadruh_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_152467, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011011;
let hasNewValue = 1;
@@ -9476,7 +9530,7 @@ def L2_loadruh_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101011;
let hasNewValue = 1;
@@ -9490,7 +9544,7 @@ def L2_loadruh_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9500,7 +9554,7 @@ def L2_loadruhgp : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memuh(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9519,20 +9573,20 @@ def L2_loadw_locked : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memw_locked($Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4075554 {
+tc_29c14515, TypeLD>, Enc_5e2823 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010010000;
let hasNewValue = 1;
let opNewValue = 0;
let accessSize = WordAccess;
-let isSoloAX = 1;
let mayLoad = 1;
+let isSoloAX = 1;
}
def L2_ploadrbf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101000;
let isPredicated = 1;
@@ -9554,7 +9608,7 @@ def L2_ploadrbf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9571,7 +9625,7 @@ def L2_ploadrbf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9581,7 +9635,7 @@ def L2_ploadrbfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111000;
let isPredicated = 1;
@@ -9604,7 +9658,7 @@ def L2_ploadrbfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9622,7 +9676,7 @@ def L2_ploadrbfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9632,7 +9686,7 @@ def L2_ploadrbt_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001000;
let isPredicated = 1;
@@ -9653,7 +9707,7 @@ def L2_ploadrbt_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9669,7 +9723,7 @@ def L2_ploadrbt_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9679,7 +9733,7 @@ def L2_ploadrbtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011000;
let isPredicated = 1;
@@ -9701,7 +9755,7 @@ def L2_ploadrbtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9718,7 +9772,7 @@ def L2_ploadrbtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9728,7 +9782,7 @@ def L2_ploadrdf_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101110;
let isPredicated = 1;
@@ -9748,7 +9802,7 @@ def L2_ploadrdf_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_ae762521, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9763,7 +9817,7 @@ def L2_ploadrdf_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9771,7 +9825,7 @@ def L2_ploadrdfnew_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111110;
let isPredicated = 1;
@@ -9792,7 +9846,7 @@ def L2_ploadrdfnew_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_e578178f, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9808,7 +9862,7 @@ def L2_ploadrdfnew_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9816,7 +9870,7 @@ def L2_ploadrdt_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if ($Pt4) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001110;
let isPredicated = 1;
@@ -9835,7 +9889,7 @@ def L2_ploadrdt_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if ($Pt4) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_ae762521, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9849,7 +9903,7 @@ def L2_ploadrdt_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9857,7 +9911,7 @@ def L2_ploadrdtnew_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011110;
let isPredicated = 1;
@@ -9877,7 +9931,7 @@ def L2_ploadrdtnew_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_e578178f, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9892,7 +9946,7 @@ def L2_ploadrdtnew_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9900,7 +9954,7 @@ def L2_ploadrhf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101010;
let isPredicated = 1;
@@ -9922,7 +9976,7 @@ def L2_ploadrhf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -9939,7 +9993,7 @@ def L2_ploadrhf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9949,7 +10003,7 @@ def L2_ploadrhfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111010;
let isPredicated = 1;
@@ -9972,7 +10026,7 @@ def L2_ploadrhfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -9990,7 +10044,7 @@ def L2_ploadrhfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10000,7 +10054,7 @@ def L2_ploadrht_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001010;
let isPredicated = 1;
@@ -10021,7 +10075,7 @@ def L2_ploadrht_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -10037,7 +10091,7 @@ def L2_ploadrht_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10047,7 +10101,7 @@ def L2_ploadrhtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011010;
let isPredicated = 1;
@@ -10069,7 +10123,7 @@ def L2_ploadrhtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -10086,7 +10140,7 @@ def L2_ploadrhtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10096,7 +10150,7 @@ def L2_ploadrif_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if (!$Pt4) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101100;
let isPredicated = 1;
@@ -10118,7 +10172,7 @@ def L2_ploadrif_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if (!$Pt4) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10135,7 +10189,7 @@ def L2_ploadrif_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10145,7 +10199,7 @@ def L2_ploadrifnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111100;
let isPredicated = 1;
@@ -10168,7 +10222,7 @@ def L2_ploadrifnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10186,7 +10240,7 @@ def L2_ploadrifnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10196,7 +10250,7 @@ def L2_ploadrit_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if ($Pt4) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001100;
let isPredicated = 1;
@@ -10217,7 +10271,7 @@ def L2_ploadrit_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if ($Pt4) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10233,7 +10287,7 @@ def L2_ploadrit_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10243,7 +10297,7 @@ def L2_ploadritnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011100;
let isPredicated = 1;
@@ -10265,7 +10319,7 @@ def L2_ploadritnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10282,7 +10336,7 @@ def L2_ploadritnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10292,7 +10346,7 @@ def L2_ploadrubf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101001;
let isPredicated = 1;
@@ -10314,7 +10368,7 @@ def L2_ploadrubf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10331,7 +10385,7 @@ def L2_ploadrubf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10341,7 +10395,7 @@ def L2_ploadrubfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111001;
let isPredicated = 1;
@@ -10364,7 +10418,7 @@ def L2_ploadrubfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10382,7 +10436,7 @@ def L2_ploadrubfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10392,7 +10446,7 @@ def L2_ploadrubt_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001001;
let isPredicated = 1;
@@ -10413,7 +10467,7 @@ def L2_ploadrubt_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10429,7 +10483,7 @@ def L2_ploadrubt_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10439,7 +10493,7 @@ def L2_ploadrubtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011001;
let isPredicated = 1;
@@ -10461,7 +10515,7 @@ def L2_ploadrubtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10478,7 +10532,7 @@ def L2_ploadrubtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10488,7 +10542,7 @@ def L2_ploadruhf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101011;
let isPredicated = 1;
@@ -10510,7 +10564,7 @@ def L2_ploadruhf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10527,7 +10581,7 @@ def L2_ploadruhf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10537,7 +10591,7 @@ def L2_ploadruhfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111011;
let isPredicated = 1;
@@ -10560,7 +10614,7 @@ def L2_ploadruhfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10578,7 +10632,7 @@ def L2_ploadruhfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10588,7 +10642,7 @@ def L2_ploadruht_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001011;
let isPredicated = 1;
@@ -10609,7 +10663,7 @@ def L2_ploadruht_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10625,7 +10679,7 @@ def L2_ploadruht_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10635,7 +10689,7 @@ def L2_ploadruhtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011011;
let isPredicated = 1;
@@ -10657,7 +10711,7 @@ def L2_ploadruhtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10674,7 +10728,7 @@ def L2_ploadruhtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10684,14 +10738,14 @@ def L4_add_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10702,7 +10756,7 @@ def L4_add_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10710,14 +10764,14 @@ def L4_add_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10728,7 +10782,7 @@ def L4_add_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10736,14 +10790,14 @@ def L4_add_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10754,7 +10808,7 @@ def L4_add_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10762,14 +10816,14 @@ def L4_and_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10780,7 +10834,7 @@ def L4_and_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10788,14 +10842,14 @@ def L4_and_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10806,7 +10860,7 @@ def L4_and_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10814,14 +10868,14 @@ def L4_and_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10832,7 +10886,7 @@ def L4_and_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10840,14 +10894,14 @@ def L4_iadd_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10858,7 +10912,7 @@ def L4_iadd_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10866,14 +10920,14 @@ def L4_iadd_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10884,7 +10938,7 @@ def L4_iadd_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10892,14 +10946,14 @@ def L4_iadd_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10910,7 +10964,7 @@ def L4_iadd_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10918,14 +10972,14 @@ def L4_iand_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10936,7 +10990,7 @@ def L4_iand_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10944,14 +10998,14 @@ def L4_iand_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10962,7 +11016,7 @@ def L4_iand_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10970,14 +11024,14 @@ def L4_iand_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10988,7 +11042,7 @@ def L4_iand_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10996,14 +11050,14 @@ def L4_ior_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11014,7 +11068,7 @@ def L4_ior_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11022,14 +11076,14 @@ def L4_ior_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11040,7 +11094,7 @@ def L4_ior_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11048,14 +11102,14 @@ def L4_ior_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11066,7 +11120,7 @@ def L4_ior_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11074,14 +11128,14 @@ def L4_isub_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11092,7 +11146,7 @@ def L4_isub_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11100,14 +11154,14 @@ def L4_isub_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11118,7 +11172,7 @@ def L4_isub_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11126,14 +11180,14 @@ def L4_isub_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11144,7 +11198,7 @@ def L4_isub_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11152,7 +11206,7 @@ def L4_loadalignb_ap : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Re32),
(ins DoubleRegs:$Ryy32in, u32_0Imm:$II),
"$Ryy32 = memb_fifo($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11047413 {
+tc_261d9b78, TypeLD>, Enc_f394d3 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010100;
@@ -11160,8 +11214,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 3;
@@ -11174,13 +11228,13 @@ def L4_loadalignb_ur : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Ryy32 = memb_fifo($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_7303598 {
+tc_baccf077, TypeLD>, Enc_04c959 {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100100;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11194,7 +11248,7 @@ def L4_loadalignh_ap : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Re32),
(ins DoubleRegs:$Ryy32in, u32_0Imm:$II),
"$Ryy32 = memh_fifo($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11047413 {
+tc_261d9b78, TypeLD>, Enc_f394d3 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010010;
@@ -11202,8 +11256,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 3;
@@ -11216,13 +11270,13 @@ def L4_loadalignh_ur : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Ryy32 = memh_fifo($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_7303598 {
+tc_baccf077, TypeLD>, Enc_04c959 {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100010;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11236,7 +11290,7 @@ def L4_loadbsw2_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = membh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010001;
@@ -11246,8 +11300,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11259,15 +11313,15 @@ def L4_loadbsw2_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = membh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163 {
+tc_7d9a56cd, TypeLD>, Enc_4f677b {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11280,7 +11334,7 @@ def L4_loadbsw4_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = membh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010111;
@@ -11288,8 +11342,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11301,13 +11355,13 @@ def L4_loadbsw4_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = membh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416 {
+tc_7d9a56cd, TypeLD>, Enc_6185fe {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100111;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11320,7 +11374,7 @@ def L4_loadbzw2_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memubh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010011;
@@ -11330,8 +11384,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11343,15 +11397,15 @@ def L4_loadbzw2_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memubh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163 {
+tc_7d9a56cd, TypeLD>, Enc_4f677b {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100011;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11364,7 +11418,7 @@ def L4_loadbzw4_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = memubh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010101;
@@ -11372,8 +11426,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11385,13 +11439,13 @@ def L4_loadbzw4_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = memubh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416 {
+tc_7d9a56cd, TypeLD>, Enc_6185fe {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100101;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11404,18 +11458,18 @@ def L4_loadd_locked : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memd_locked($Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4030179 {
+tc_29c14515, TypeLD>, Enc_3a3d62 {
let Inst{13-5} = 0b010000000;
let Inst{31-21} = 0b10010010000;
let accessSize = DoubleWordAccess;
-let isSoloAX = 1;
let mayLoad = 1;
+let isSoloAX = 1;
}
def L4_loadrb_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memb($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011000;
@@ -11425,8 +11479,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11438,7 +11492,7 @@ def L4_loadrb_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010000;
let hasNewValue = 1;
@@ -11455,15 +11509,15 @@ def L4_loadrb_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memb($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101000;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11477,7 +11531,7 @@ def L4_loadrd_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = memd($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011110;
@@ -11485,8 +11539,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11498,7 +11552,7 @@ def L4_loadrd_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7581852, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_84bff1, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010110;
let addrMode = BaseRegOffset;
@@ -11513,13 +11567,13 @@ def L4_loadrd_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = memd($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_6185fe, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101110;
let addrMode = BaseLongOffset;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11533,7 +11587,7 @@ def L4_loadrh_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011010;
@@ -11543,8 +11597,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11556,7 +11610,7 @@ def L4_loadrh_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010010;
let hasNewValue = 1;
@@ -11573,15 +11627,15 @@ def L4_loadrh_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101010;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11595,7 +11649,7 @@ def L4_loadri_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memw($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011100;
@@ -11605,8 +11659,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11618,7 +11672,7 @@ def L4_loadri_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010100;
let hasNewValue = 1;
@@ -11635,15 +11689,15 @@ def L4_loadri_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memw($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101100;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11657,7 +11711,7 @@ def L4_loadrub_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memub($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011001;
@@ -11667,8 +11721,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11680,7 +11734,7 @@ def L4_loadrub_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010001;
let hasNewValue = 1;
@@ -11697,15 +11751,15 @@ def L4_loadrub_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memub($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11719,7 +11773,7 @@ def L4_loadruh_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memuh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011011;
@@ -11729,8 +11783,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11742,7 +11796,7 @@ def L4_loadruh_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010011;
let hasNewValue = 1;
@@ -11759,15 +11813,15 @@ def L4_loadruh_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memuh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101011;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11781,14 +11835,14 @@ def L4_or_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11799,7 +11853,7 @@ def L4_or_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11807,14 +11861,14 @@ def L4_or_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11825,7 +11879,7 @@ def L4_or_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11833,14 +11887,14 @@ def L4_or_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11851,7 +11905,7 @@ def L4_or_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11859,7 +11913,7 @@ def L4_ploadrbf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111000;
@@ -11869,8 +11923,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11884,7 +11938,7 @@ def L4_ploadrbf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -11901,7 +11955,7 @@ def L4_ploadrbfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111000;
@@ -11911,9 +11965,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11927,7 +11981,7 @@ def L4_ploadrbfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -11945,7 +11999,7 @@ def L4_ploadrbt_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111000;
@@ -11954,8 +12008,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11969,7 +12023,7 @@ def L4_ploadrbt_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000000;
let isPredicated = 1;
let hasNewValue = 1;
@@ -11985,7 +12039,7 @@ def L4_ploadrbtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111000;
@@ -11994,9 +12048,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -12010,7 +12064,7 @@ def L4_ploadrbtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010000;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12027,7 +12081,7 @@ def L4_ploadrdf_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111110;
@@ -12035,8 +12089,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12050,7 +12104,7 @@ def L4_ploadrdf_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110001110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12065,7 +12119,7 @@ def L4_ploadrdfnew_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111110;
@@ -12073,9 +12127,9 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12089,7 +12143,7 @@ def L4_ploadrdfnew_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110011110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12105,15 +12159,15 @@ def L4_ploadrdt_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111110;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12127,7 +12181,7 @@ def L4_ploadrdt_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110000110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -12141,16 +12195,16 @@ def L4_ploadrdtnew_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111110;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12164,7 +12218,7 @@ def L4_ploadrdtnew_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110010110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -12179,7 +12233,7 @@ def L4_ploadrhf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111010;
@@ -12189,8 +12243,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12204,7 +12258,7 @@ def L4_ploadrhf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12221,7 +12275,7 @@ def L4_ploadrhfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111010;
@@ -12231,9 +12285,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12247,7 +12301,7 @@ def L4_ploadrhfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12265,7 +12319,7 @@ def L4_ploadrht_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111010;
@@ -12274,8 +12328,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12289,7 +12343,7 @@ def L4_ploadrht_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000010;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12305,7 +12359,7 @@ def L4_ploadrhtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111010;
@@ -12314,9 +12368,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12330,7 +12384,7 @@ def L4_ploadrhtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010010;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12347,7 +12401,7 @@ def L4_ploadrif_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111100;
@@ -12357,8 +12411,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12372,7 +12426,7 @@ def L4_ploadrif_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12389,7 +12443,7 @@ def L4_ploadrifnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111100;
@@ -12399,9 +12453,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12415,7 +12469,7 @@ def L4_ploadrifnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12433,7 +12487,7 @@ def L4_ploadrit_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111100;
@@ -12442,8 +12496,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12457,7 +12511,7 @@ def L4_ploadrit_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12473,7 +12527,7 @@ def L4_ploadritnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111100;
@@ -12482,9 +12536,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12498,7 +12552,7 @@ def L4_ploadritnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12515,7 +12569,7 @@ def L4_ploadrubf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111001;
@@ -12525,8 +12579,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12540,7 +12594,7 @@ def L4_ploadrubf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001001;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12557,7 +12611,7 @@ def L4_ploadrubfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111001;
@@ -12567,9 +12621,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12583,7 +12637,7 @@ def L4_ploadrubfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011001;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12601,7 +12655,7 @@ def L4_ploadrubt_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111001;
@@ -12610,8 +12664,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12625,7 +12679,7 @@ def L4_ploadrubt_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000001;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12641,7 +12695,7 @@ def L4_ploadrubtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111001;
@@ -12650,9 +12704,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12666,7 +12720,7 @@ def L4_ploadrubtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010001;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12683,7 +12737,7 @@ def L4_ploadruhf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111011;
@@ -12693,8 +12747,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12708,7 +12762,7 @@ def L4_ploadruhf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12725,7 +12779,7 @@ def L4_ploadruhfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111011;
@@ -12735,9 +12789,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12751,7 +12805,7 @@ def L4_ploadruhfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12769,7 +12823,7 @@ def L4_ploadruht_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111011;
@@ -12778,8 +12832,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12793,7 +12847,7 @@ def L4_ploadruht_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000011;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12809,7 +12863,7 @@ def L4_ploadruhtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111011;
@@ -12818,9 +12872,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12834,7 +12888,7 @@ def L4_ploadruhtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010011;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12851,7 +12905,7 @@ def L4_return : HInst<
(outs),
(ins),
"dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_0, PredNewRel {
+tc_dcfee7ae, TypeLD>, Enc_3a3d62, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010110000;
@@ -12873,7 +12927,7 @@ def L4_return_f : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4) dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_9ce7a5ab, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1100;
@@ -12885,8 +12939,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12896,7 +12950,7 @@ def L4_return_fnew_pnt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4.new) dealloc_return:nt",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
@@ -12908,9 +12962,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12920,7 +12974,7 @@ def L4_return_fnew_pt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4.new) dealloc_return:t",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1110;
@@ -12932,9 +12986,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12944,7 +12998,7 @@ def L4_return_t : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4) dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_9ce7a5ab, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0100;
@@ -12955,8 +13009,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12966,7 +13020,7 @@ def L4_return_tnew_pnt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4.new) dealloc_return:nt",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0010;
@@ -12977,9 +13031,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12989,7 +13043,7 @@ def L4_return_tnew_pt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4.new) dealloc_return:t",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0110;
@@ -13000,9 +13054,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -13012,14 +13066,14 @@ def L4_sub_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13030,7 +13084,7 @@ def L4_sub_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13038,14 +13092,14 @@ def L4_sub_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13056,7 +13110,7 @@ def L4_sub_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13064,14 +13118,14 @@ def L4_sub_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13082,7 +13136,7 @@ def L4_sub_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13090,7 +13144,7 @@ def M2_acci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += add($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889, ImmRegRel {
+tc_c0cd91a8, TypeM>, Enc_2ae154, ImmRegRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -13105,7 +13159,7 @@ def M2_accii : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 += add($Rs32,#$Ii)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_11522288, ImmRegRel {
+tc_c0cd91a8, TypeM>, Enc_c90aca, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100010000;
let hasNewValue = 1;
@@ -13124,7 +13178,7 @@ def M2_cmaci_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpyi($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13135,7 +13189,7 @@ def M2_cmacr_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpyr($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13146,7 +13200,7 @@ def M2_cmacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13158,7 +13212,7 @@ def M2_cmacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -13170,7 +13224,7 @@ def M2_cmacsc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13182,7 +13236,7 @@ def M2_cmacsc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -13194,7 +13248,7 @@ def M2_cmpyi_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpyi($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13204,7 +13258,7 @@ def M2_cmpyr_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpyr($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13214,7 +13268,7 @@ def M2_cmpyrs_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -13227,7 +13281,7 @@ def M2_cmpyrs_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13240,7 +13294,7 @@ def M2_cmpyrsc_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32*):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101011;
@@ -13253,7 +13307,7 @@ def M2_cmpyrsc_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32*):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -13266,7 +13320,7 @@ def M2_cmpys_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13277,7 +13331,7 @@ def M2_cmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -13288,7 +13342,7 @@ def M2_cmpysc_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -13299,7 +13353,7 @@ def M2_cmpysc_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101110;
@@ -13310,7 +13364,7 @@ def M2_cnacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13322,7 +13376,7 @@ def M2_cnacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -13334,7 +13388,7 @@ def M2_cnacsc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13346,7 +13400,7 @@ def M2_cnacsc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -13358,7 +13412,7 @@ def M2_dpmpyss_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13369,7 +13423,7 @@ def M2_dpmpyss_nac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -13380,7 +13434,7 @@ def M2_dpmpyss_rnd_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -13392,7 +13446,7 @@ def M2_dpmpyss_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13402,7 +13456,7 @@ def M2_dpmpyuu_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13413,7 +13467,7 @@ def M2_dpmpyuu_nac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111011;
@@ -13424,7 +13478,7 @@ def M2_dpmpyuu_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -13434,7 +13488,7 @@ def M2_hmmpyh_rs1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13447,7 +13501,7 @@ def M2_hmmpyh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13460,7 +13514,7 @@ def M2_hmmpyl_rs1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -13473,7 +13527,7 @@ def M2_hmmpyl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13486,7 +13540,7 @@ def M2_maci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyi($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889, ImmRegRel {
+tc_8cb685d9, TypeM>, Enc_2ae154, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -13501,7 +13555,7 @@ def M2_macsin : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rx32 -= mpyi($Rs32,#$Ii)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_11522288 {
+tc_a12a5971, TypeM>, Enc_c90aca {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100001100;
let hasNewValue = 1;
@@ -13519,7 +13573,7 @@ def M2_macsip : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rx32 += mpyi($Rs32,#$Ii)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_11522288, ImmRegRel {
+tc_a12a5971, TypeM>, Enc_c90aca, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100001000;
let hasNewValue = 1;
@@ -13538,7 +13592,7 @@ def M2_mmachs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -13550,7 +13604,7 @@ def M2_mmachs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -13562,7 +13616,7 @@ def M2_mmachs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -13574,7 +13628,7 @@ def M2_mmachs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -13586,7 +13640,7 @@ def M2_mmacls_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -13598,7 +13652,7 @@ def M2_mmacls_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -13610,7 +13664,7 @@ def M2_mmacls_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -13622,7 +13676,7 @@ def M2_mmacls_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -13634,7 +13688,7 @@ def M2_mmacuhs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -13646,7 +13700,7 @@ def M2_mmacuhs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -13658,7 +13712,7 @@ def M2_mmacuhs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -13670,7 +13724,7 @@ def M2_mmacuhs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -13682,7 +13736,7 @@ def M2_mmaculs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -13694,7 +13748,7 @@ def M2_mmaculs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -13706,7 +13760,7 @@ def M2_mmaculs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -13718,7 +13772,7 @@ def M2_mmaculs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -13730,7 +13784,7 @@ def M2_mmpyh_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -13741,7 +13795,7 @@ def M2_mmpyh_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -13752,7 +13806,7 @@ def M2_mmpyh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -13763,7 +13817,7 @@ def M2_mmpyh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -13774,7 +13828,7 @@ def M2_mmpyl_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -13785,7 +13839,7 @@ def M2_mmpyl_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -13796,7 +13850,7 @@ def M2_mmpyl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -13807,7 +13861,7 @@ def M2_mmpyl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -13818,7 +13872,7 @@ def M2_mmpyuh_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -13829,7 +13883,7 @@ def M2_mmpyuh_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -13840,7 +13894,7 @@ def M2_mmpyuh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -13851,7 +13905,7 @@ def M2_mmpyuh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -13862,7 +13916,7 @@ def M2_mmpyul_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -13873,7 +13927,7 @@ def M2_mmpyul_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -13884,7 +13938,7 @@ def M2_mmpyul_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -13895,7 +13949,7 @@ def M2_mmpyul_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -13906,7 +13960,7 @@ def M2_mpy_acc_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13919,7 +13973,7 @@ def M2_mpy_acc_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13932,7 +13986,7 @@ def M2_mpy_acc_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13945,7 +13999,7 @@ def M2_mpy_acc_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13958,7 +14012,7 @@ def M2_mpy_acc_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13971,7 +14025,7 @@ def M2_mpy_acc_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13984,7 +14038,7 @@ def M2_mpy_acc_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13997,7 +14051,7 @@ def M2_mpy_acc_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14010,7 +14064,7 @@ def M2_mpy_acc_sat_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14024,7 +14078,7 @@ def M2_mpy_acc_sat_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14038,7 +14092,7 @@ def M2_mpy_acc_sat_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14052,7 +14106,7 @@ def M2_mpy_acc_sat_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14066,7 +14120,7 @@ def M2_mpy_acc_sat_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14080,7 +14134,7 @@ def M2_mpy_acc_sat_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14094,7 +14148,7 @@ def M2_mpy_acc_sat_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14108,7 +14162,7 @@ def M2_mpy_acc_sat_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14122,7 +14176,7 @@ def M2_mpy_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14134,7 +14188,7 @@ def M2_mpy_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14146,7 +14200,7 @@ def M2_mpy_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14158,7 +14212,7 @@ def M2_mpy_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14170,7 +14224,7 @@ def M2_mpy_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14182,7 +14236,7 @@ def M2_mpy_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14194,7 +14248,7 @@ def M2_mpy_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14206,7 +14260,7 @@ def M2_mpy_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14218,7 +14272,7 @@ def M2_mpy_nac_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14231,7 +14285,7 @@ def M2_mpy_nac_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14244,7 +14298,7 @@ def M2_mpy_nac_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14257,7 +14311,7 @@ def M2_mpy_nac_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14270,7 +14324,7 @@ def M2_mpy_nac_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14283,7 +14337,7 @@ def M2_mpy_nac_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14296,7 +14350,7 @@ def M2_mpy_nac_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14309,7 +14363,7 @@ def M2_mpy_nac_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14322,7 +14376,7 @@ def M2_mpy_nac_sat_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14336,7 +14390,7 @@ def M2_mpy_nac_sat_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14350,7 +14404,7 @@ def M2_mpy_nac_sat_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14364,7 +14418,7 @@ def M2_mpy_nac_sat_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14378,7 +14432,7 @@ def M2_mpy_nac_sat_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14392,7 +14446,7 @@ def M2_mpy_nac_sat_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14406,7 +14460,7 @@ def M2_mpy_nac_sat_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14420,7 +14474,7 @@ def M2_mpy_nac_sat_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14434,7 +14488,7 @@ def M2_mpy_rnd_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14446,7 +14500,7 @@ def M2_mpy_rnd_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14458,7 +14512,7 @@ def M2_mpy_rnd_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14470,7 +14524,7 @@ def M2_mpy_rnd_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14482,7 +14536,7 @@ def M2_mpy_rnd_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14494,7 +14548,7 @@ def M2_mpy_rnd_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14506,7 +14560,7 @@ def M2_mpy_rnd_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14518,7 +14572,7 @@ def M2_mpy_rnd_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14530,7 +14584,7 @@ def M2_mpy_sat_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14543,7 +14597,7 @@ def M2_mpy_sat_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14556,7 +14610,7 @@ def M2_mpy_sat_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14569,7 +14623,7 @@ def M2_mpy_sat_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14582,7 +14636,7 @@ def M2_mpy_sat_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14595,7 +14649,7 @@ def M2_mpy_sat_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14608,7 +14662,7 @@ def M2_mpy_sat_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14621,7 +14675,7 @@ def M2_mpy_sat_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14634,7 +14688,7 @@ def M2_mpy_sat_rnd_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14647,7 +14701,7 @@ def M2_mpy_sat_rnd_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14660,7 +14714,7 @@ def M2_mpy_sat_rnd_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14673,7 +14727,7 @@ def M2_mpy_sat_rnd_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14686,7 +14740,7 @@ def M2_mpy_sat_rnd_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14699,7 +14753,7 @@ def M2_mpy_sat_rnd_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14712,7 +14766,7 @@ def M2_mpy_sat_rnd_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14725,7 +14779,7 @@ def M2_mpy_sat_rnd_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14738,7 +14792,7 @@ def M2_mpy_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101000;
@@ -14750,7 +14804,7 @@ def M2_mpy_up_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -14762,7 +14816,7 @@ def M2_mpy_up_s1_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -14775,7 +14829,7 @@ def M2_mpyd_acc_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14786,7 +14840,7 @@ def M2_mpyd_acc_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14797,7 +14851,7 @@ def M2_mpyd_acc_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14808,7 +14862,7 @@ def M2_mpyd_acc_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14819,7 +14873,7 @@ def M2_mpyd_acc_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14830,7 +14884,7 @@ def M2_mpyd_acc_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14841,7 +14895,7 @@ def M2_mpyd_acc_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14852,7 +14906,7 @@ def M2_mpyd_acc_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14863,7 +14917,7 @@ def M2_mpyd_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14873,7 +14927,7 @@ def M2_mpyd_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14883,7 +14937,7 @@ def M2_mpyd_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14893,7 +14947,7 @@ def M2_mpyd_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14903,7 +14957,7 @@ def M2_mpyd_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14913,7 +14967,7 @@ def M2_mpyd_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14923,7 +14977,7 @@ def M2_mpyd_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14933,7 +14987,7 @@ def M2_mpyd_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14943,7 +14997,7 @@ def M2_mpyd_nac_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14954,7 +15008,7 @@ def M2_mpyd_nac_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -14965,7 +15019,7 @@ def M2_mpyd_nac_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14976,7 +15030,7 @@ def M2_mpyd_nac_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -14987,7 +15041,7 @@ def M2_mpyd_nac_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14998,7 +15052,7 @@ def M2_mpyd_nac_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -15009,7 +15063,7 @@ def M2_mpyd_nac_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -15020,7 +15074,7 @@ def M2_mpyd_nac_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -15031,7 +15085,7 @@ def M2_mpyd_rnd_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15041,7 +15095,7 @@ def M2_mpyd_rnd_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15051,7 +15105,7 @@ def M2_mpyd_rnd_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15061,7 +15115,7 @@ def M2_mpyd_rnd_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15071,7 +15125,7 @@ def M2_mpyd_rnd_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15081,7 +15135,7 @@ def M2_mpyd_rnd_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15091,7 +15145,7 @@ def M2_mpyd_rnd_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15101,7 +15155,7 @@ def M2_mpyd_rnd_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15111,7 +15165,7 @@ def M2_mpyi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyi($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773, ImmRegRel {
+tc_8c8041e6, TypeM>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101000;
@@ -15125,7 +15179,7 @@ def M2_mpysin : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Rd32 = -mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, Enc_16355964 {
+tc_ae2c2dc2, TypeM>, Enc_b8c967 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100000100;
let hasNewValue = 1;
@@ -15136,7 +15190,7 @@ def M2_mpysip : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rd32 = +mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, Enc_16355964 {
+tc_ae2c2dc2, TypeM>, Enc_b8c967 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100000000;
let hasNewValue = 1;
@@ -15152,7 +15206,7 @@ def M2_mpysmi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, m32_0Imm:$Ii),
"$Rd32 = mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, ImmRegRel {
+tc_ae2c2dc2, TypeM>, ImmRegRel {
let hasNewValue = 1;
let opNewValue = 0;
let CextOpcode = "M2_mpyi";
@@ -15168,7 +15222,7 @@ def M2_mpysu_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpysu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101011;
@@ -15180,7 +15234,7 @@ def M2_mpyu_acc_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15193,7 +15247,7 @@ def M2_mpyu_acc_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15206,7 +15260,7 @@ def M2_mpyu_acc_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15219,7 +15273,7 @@ def M2_mpyu_acc_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15232,7 +15286,7 @@ def M2_mpyu_acc_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15245,7 +15299,7 @@ def M2_mpyu_acc_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15258,7 +15312,7 @@ def M2_mpyu_acc_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15271,7 +15325,7 @@ def M2_mpyu_acc_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15284,7 +15338,7 @@ def M2_mpyu_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15296,7 +15350,7 @@ def M2_mpyu_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15308,7 +15362,7 @@ def M2_mpyu_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15320,7 +15374,7 @@ def M2_mpyu_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15332,7 +15386,7 @@ def M2_mpyu_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15344,7 +15398,7 @@ def M2_mpyu_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15356,7 +15410,7 @@ def M2_mpyu_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15368,7 +15422,7 @@ def M2_mpyu_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15380,7 +15434,7 @@ def M2_mpyu_nac_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15393,7 +15447,7 @@ def M2_mpyu_nac_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15406,7 +15460,7 @@ def M2_mpyu_nac_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15419,7 +15473,7 @@ def M2_mpyu_nac_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15432,7 +15486,7 @@ def M2_mpyu_nac_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15445,7 +15499,7 @@ def M2_mpyu_nac_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15458,7 +15512,7 @@ def M2_mpyu_nac_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15471,7 +15525,7 @@ def M2_mpyu_nac_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15484,7 +15538,7 @@ def M2_mpyu_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101010;
@@ -15496,7 +15550,7 @@ def M2_mpyud_acc_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15507,7 +15561,7 @@ def M2_mpyud_acc_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15518,7 +15572,7 @@ def M2_mpyud_acc_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15529,7 +15583,7 @@ def M2_mpyud_acc_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15540,7 +15594,7 @@ def M2_mpyud_acc_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15551,7 +15605,7 @@ def M2_mpyud_acc_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15562,7 +15616,7 @@ def M2_mpyud_acc_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15573,7 +15627,7 @@ def M2_mpyud_acc_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15584,7 +15638,7 @@ def M2_mpyud_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15594,7 +15648,7 @@ def M2_mpyud_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15604,7 +15658,7 @@ def M2_mpyud_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15614,7 +15668,7 @@ def M2_mpyud_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15624,7 +15678,7 @@ def M2_mpyud_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15634,7 +15688,7 @@ def M2_mpyud_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15644,7 +15698,7 @@ def M2_mpyud_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15654,7 +15708,7 @@ def M2_mpyud_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15664,7 +15718,7 @@ def M2_mpyud_nac_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15675,7 +15729,7 @@ def M2_mpyud_nac_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15686,7 +15740,7 @@ def M2_mpyud_nac_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15697,7 +15751,7 @@ def M2_mpyud_nac_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15708,7 +15762,7 @@ def M2_mpyud_nac_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15719,7 +15773,7 @@ def M2_mpyud_nac_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15730,7 +15784,7 @@ def M2_mpyud_nac_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15741,7 +15795,7 @@ def M2_mpyud_nac_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15752,7 +15806,7 @@ def M2_mpyui : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyui($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -15762,7 +15816,7 @@ def M2_nacci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= add($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_c0cd91a8, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111100;
@@ -15776,7 +15830,7 @@ def M2_naccii : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 -= add($Rs32,#$Ii)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_11522288 {
+tc_c0cd91a8, TypeM>, Enc_c90aca {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100010100;
let hasNewValue = 1;
@@ -15794,7 +15848,7 @@ def M2_subacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rt32, IntRegs:$Rs32),
"$Rx32 += sub($Rt32,$Rs32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_7692963 {
+tc_c0cd91a8, TypeM>, Enc_a568d4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -15808,7 +15862,7 @@ def M2_vabsdiffh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffh($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333 {
+tc_63cd9d2d, TypeM>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -15818,7 +15872,7 @@ def M2_vabsdiffw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffw($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333 {
+tc_63cd9d2d, TypeM>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -15828,7 +15882,7 @@ def M2_vcmac_s0_sat_i : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vcmpyi($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -15840,7 +15894,7 @@ def M2_vcmac_s0_sat_r : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vcmpyr($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -15852,7 +15906,7 @@ def M2_vcmpy_s0_sat_i : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyi($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -15863,7 +15917,7 @@ def M2_vcmpy_s0_sat_r : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyr($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -15874,7 +15928,7 @@ def M2_vcmpy_s1_sat_i : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyi($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -15885,7 +15939,7 @@ def M2_vcmpy_s1_sat_r : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyr($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -15896,7 +15950,7 @@ def M2_vdmacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpy($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -15908,7 +15962,7 @@ def M2_vdmacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpy($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -15920,7 +15974,7 @@ def M2_vdmpyrs_s0 : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vdmpy($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001000;
@@ -15933,7 +15987,7 @@ def M2_vdmpyrs_s1 : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vdmpy($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001100;
@@ -15946,7 +16000,7 @@ def M2_vdmpys_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpy($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -15957,7 +16011,7 @@ def M2_vdmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpy($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -15968,7 +16022,7 @@ def M2_vmac2 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -15979,7 +16033,7 @@ def M2_vmac2es : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -15990,7 +16044,7 @@ def M2_vmac2es_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16002,7 +16056,7 @@ def M2_vmac2es_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -16014,7 +16068,7 @@ def M2_vmac2s_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -16026,7 +16080,7 @@ def M2_vmac2s_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -16038,7 +16092,7 @@ def M2_vmac2su_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyhsu($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111011;
@@ -16050,7 +16104,7 @@ def M2_vmac2su_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyhsu($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111111;
@@ -16062,7 +16116,7 @@ def M2_vmpy2es_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyeh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16073,7 +16127,7 @@ def M2_vmpy2es_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyeh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -16084,7 +16138,7 @@ def M2_vmpy2s_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyh($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -16095,7 +16149,7 @@ def M2_vmpy2s_s0pack : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vmpyh($Rs32,$Rt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -16108,7 +16162,7 @@ def M2_vmpy2s_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyh($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16119,7 +16173,7 @@ def M2_vmpy2s_s1pack : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vmpyh($Rs32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -16132,7 +16186,7 @@ def M2_vmpy2su_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyhsu($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -16143,7 +16197,7 @@ def M2_vmpy2su_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyhsu($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16154,7 +16208,7 @@ def M2_vraddh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vraddh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001001;
@@ -16166,7 +16220,7 @@ def M2_vradduh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vradduh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001000;
@@ -16178,7 +16232,7 @@ def M2_vrcmaci_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyi($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16189,7 +16243,7 @@ def M2_vrcmaci_s0c : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyi($Rss32,$Rtt32*)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -16200,7 +16254,7 @@ def M2_vrcmacr_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyr($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16211,7 +16265,7 @@ def M2_vrcmacr_s0c : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyr($Rss32,$Rtt32*)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -16222,7 +16276,7 @@ def M2_vrcmpyi_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyi($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16232,7 +16286,7 @@ def M2_vrcmpyi_s0c : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyi($Rss32,$Rtt32*)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -16242,7 +16296,7 @@ def M2_vrcmpyr_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyr($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16252,7 +16306,7 @@ def M2_vrcmpyr_s0c : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyr($Rss32,$Rtt32*)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -16262,7 +16316,7 @@ def M2_vrcmpys_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += vrcmpys($Rss32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8cb685d9, TypeM> {
let isPseudo = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
@@ -16270,7 +16324,7 @@ def M2_vrcmpys_acc_s1_h : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpys($Rss32,$Rtt32):<<1:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -16282,7 +16336,7 @@ def M2_vrcmpys_acc_s1_l : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpys($Rss32,$Rtt32):<<1:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -16294,14 +16348,14 @@ def M2_vrcmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vrcmpys($Rss32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let isPseudo = 1;
}
def M2_vrcmpys_s1_h : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpys($Rss32,$Rtt32):<<1:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16312,7 +16366,7 @@ def M2_vrcmpys_s1_l : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpys($Rss32,$Rtt32):<<1:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -16323,7 +16377,7 @@ def M2_vrcmpys_s1rp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = vrcmpys($Rss32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -16332,7 +16386,7 @@ def M2_vrcmpys_s1rp_h : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vrcmpys($Rss32,$Rtt32):<<1:rnd:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001101;
@@ -16345,7 +16399,7 @@ def M2_vrcmpys_s1rp_l : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vrcmpys($Rss32,$Rtt32):<<1:rnd:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001101;
@@ -16358,7 +16412,7 @@ def M2_vrmac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16369,7 +16423,7 @@ def M2_vrmpy_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16379,7 +16433,7 @@ def M2_xor_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111100;
@@ -16393,7 +16447,7 @@ def M4_and_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16407,7 +16461,7 @@ def M4_and_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16421,7 +16475,7 @@ def M4_and_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16435,7 +16489,7 @@ def M4_and_xor : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16449,7 +16503,7 @@ def M4_cmpyi_wh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyiwh($Rss32,$Rt32):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16462,7 +16516,7 @@ def M4_cmpyi_whc : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyiwh($Rss32,$Rt32*):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645, Requires<[HasV5T]> {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16475,7 +16529,7 @@ def M4_cmpyr_wh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyrwh($Rss32,$Rt32):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16488,7 +16542,7 @@ def M4_cmpyr_whc : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyrwh($Rss32,$Rt32*):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645, Requires<[HasV5T]> {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16501,7 +16555,7 @@ def M4_mac_up_s1_sat : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
@@ -16516,7 +16570,7 @@ def M4_mpyri_addi : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii, IntRegs:$Rs32, u6_0Imm:$II),
"$Rd32 = add(#$Ii,mpyi($Rs32,#$II))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_971574, ImmRegRel {
+tc_a12a5971, TypeALU64>, Enc_322e1b, ImmRegRel {
let Inst{31-24} = 0b11011000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16532,7 +16586,7 @@ def M4_mpyri_addr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Ru32, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rd32 = add($Ru32,mpyi($Rs32,#$Ii))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_236434, ImmRegRel {
+tc_a12a5971, TypeALU64>, Enc_420cf3, ImmRegRel {
let Inst{31-23} = 0b110111111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16549,7 +16603,7 @@ def M4_mpyri_addr_u2 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Ru32, u6_2Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = add($Ru32,mpyi(#$Ii,$Rs32))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_9959498 {
+tc_69bb508b, TypeALU64>, Enc_277737 {
let Inst{31-23} = 0b110111110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16559,7 +16613,7 @@ def M4_mpyrr_addi : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add(#$Ii,mpyi($Rs32,$Rt32))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_2216485, ImmRegRel {
+tc_8cb685d9, TypeALU64>, Enc_a7b8e8, ImmRegRel {
let Inst{31-23} = 0b110101110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16576,7 +16630,7 @@ def M4_mpyrr_addr : HInst<
(outs IntRegs:$Ry32),
(ins IntRegs:$Ru32, IntRegs:$Ry32in, IntRegs:$Rs32),
"$Ry32 = add($Ru32,mpyi($Ry32in,$Rs32))",
-M_tc_3x_SLOT23, TypeM>, Enc_13770697, ImmRegRel {
+tc_8cb685d9, TypeM>, Enc_7f1a05, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100011000;
@@ -16591,7 +16645,7 @@ def M4_nac_up_s1_sat : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
@@ -16606,7 +16660,7 @@ def M4_or_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16620,7 +16674,7 @@ def M4_or_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16634,7 +16688,7 @@ def M4_or_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16648,7 +16702,7 @@ def M4_or_xor : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16662,7 +16716,7 @@ def M4_pmpyw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = pmpyw($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -16672,7 +16726,7 @@ def M4_pmpyw_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 ^= pmpyw($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -16683,7 +16737,7 @@ def M4_vpmpyh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vpmpyh($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101110;
@@ -16693,7 +16747,7 @@ def M4_vpmpyh_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 ^= vpmpyh($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111101;
@@ -16704,7 +16758,7 @@ def M4_vrmpyeh_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyweh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -16715,7 +16769,7 @@ def M4_vrmpyeh_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyweh($Rss32,$Rtt32):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -16726,7 +16780,7 @@ def M4_vrmpyeh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyweh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -16736,7 +16790,7 @@ def M4_vrmpyeh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyweh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -16746,7 +16800,7 @@ def M4_vrmpyoh_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpywoh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -16757,7 +16811,7 @@ def M4_vrmpyoh_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpywoh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -16768,7 +16822,7 @@ def M4_vrmpyoh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpywoh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -16778,7 +16832,7 @@ def M4_vrmpyoh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpywoh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16788,7 +16842,7 @@ def M4_xor_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16802,7 +16856,7 @@ def M4_xor_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16816,7 +16870,7 @@ def M4_xor_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16830,7 +16884,7 @@ def M4_xor_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 ^= xor($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_12702821 {
+tc_3c10f809, TypeS_3op>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001010100;
@@ -16841,7 +16895,7 @@ def M5_vdmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpybsu($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821, Requires<[HasV5T]> {
+tc_8cb685d9, TypeM>, Enc_88c16c, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -16853,7 +16907,7 @@ def M5_vdmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpybsu($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157, Requires<[HasV5T]> {
+tc_8c8041e6, TypeM>, Enc_a56825, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16864,7 +16918,7 @@ def M5_vmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpybsu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -16875,7 +16929,7 @@ def M5_vmacbuu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpybu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -16886,7 +16940,7 @@ def M5_vmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpybsu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -16896,7 +16950,7 @@ def M5_vmpybuu : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpybu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16906,7 +16960,7 @@ def M5_vrmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpybsu($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -16917,7 +16971,7 @@ def M5_vrmacbuu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpybu($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -16928,7 +16982,7 @@ def M5_vrmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpybsu($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -16938,7 +16992,7 @@ def M5_vrmpybuu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpybu($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -16948,7 +17002,7 @@ def M6_vabsdiffb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffb($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> {
+tc_faab1248, TypeM>, Enc_ea23e4, Requires<[HasV62T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -16958,7 +17012,7 @@ def M6_vabsdiffub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffub($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> {
+tc_faab1248, TypeM>, Enc_ea23e4, Requires<[HasV62T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16968,15 +17022,15 @@ def PS_loadrbabs : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memb(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let isPredicable = 1;
@@ -16991,13 +17045,13 @@ def PS_loadrdabs : HInst<
(outs DoubleRegs:$Rdd32),
(ins u29_3Imm:$Ii),
"$Rdd32 = memd(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4975051, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_509701, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let isPredicable = 1;
@@ -17012,15 +17066,15 @@ def PS_loadrhabs : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memh(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let isPredicable = 1;
@@ -17035,15 +17089,15 @@ def PS_loadriabs : HInst<
(outs IntRegs:$Rd32),
(ins u30_2Imm:$Ii),
"$Rd32 = memw(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_8814718, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_4f4ed7, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let isPredicable = 1;
@@ -17058,15 +17112,15 @@ def PS_loadrubabs : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memub(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let isPredicable = 1;
@@ -17081,15 +17135,15 @@ def PS_loadruhabs : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memuh(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let isPredicable = 1;
@@ -17104,7 +17158,7 @@ def PS_storerbabs : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Rt32),
"memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_12395768, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_1b64fb, AddrModeRel {
let Inst{24-21} = 0b0000;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17126,16 +17180,16 @@ def PS_storerbnewabs : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Nt8),
"memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_4050532, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_ad1831, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let isPredicable = 1;
@@ -17151,7 +17205,7 @@ def PS_storerdabs : HInst<
(outs),
(ins u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_11682941, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_5c124a, AddrModeRel {
let Inst{24-21} = 0b0110;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17172,7 +17226,7 @@ def PS_storerfabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17193,7 +17247,7 @@ def PS_storerhabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17215,16 +17269,16 @@ def PS_storerhnewabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Nt8),
"memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_13618890, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_bc03e5, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let isPredicable = 1;
@@ -17240,7 +17294,7 @@ def PS_storeriabs : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Rt32),
"memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_15999208, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_541f26, AddrModeRel {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17262,16 +17316,16 @@ def PS_storerinewabs : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Nt8),
"memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_12297800, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_78cbf0, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let isPredicable = 1;
@@ -17287,7 +17341,7 @@ def S2_addasl_rrri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32, u3_0Imm:$Ii),
"$Rd32 = addasl($Rt32,$Rs32,#$Ii)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_3494181 {
+tc_090485bb, TypeS_3op>, Enc_47ef61 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000100000;
let hasNewValue = 1;
@@ -17298,7 +17352,7 @@ def S2_allocframe : HInst<
(outs),
(ins u11_3Imm:$Ii),
"allocframe(#$Ii)",
-ST_tc_ld_SLOT0, TypeST>, Enc_15830826 {
+tc_0cb867f2, TypeST>, Enc_22c845 {
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10100000100;
let Inst{20-16} = 0b11101;
@@ -17312,7 +17366,7 @@ def S2_asl_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asl($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000000000;
}
@@ -17320,7 +17374,7 @@ def S2_asl_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17330,7 +17384,7 @@ def S2_asl_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17340,7 +17394,7 @@ def S2_asl_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17350,7 +17404,7 @@ def S2_asl_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17360,7 +17414,7 @@ def S2_asl_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -17370,7 +17424,7 @@ def S2_asl_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asl($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -17381,7 +17435,7 @@ def S2_asl_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17394,7 +17448,7 @@ def S2_asl_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17407,7 +17461,7 @@ def S2_asl_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17420,7 +17474,7 @@ def S2_asl_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17433,19 +17487,20 @@ def S2_asl_i_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asl($Rs32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_47ab9233, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asl_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -17458,7 +17513,7 @@ def S2_asl_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vaslh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -17467,7 +17522,7 @@ def S2_asl_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vaslw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -17476,7 +17531,7 @@ def S2_asl_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = asl($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -17485,7 +17540,7 @@ def S2_asl_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -17496,7 +17551,7 @@ def S2_asl_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -17507,7 +17562,7 @@ def S2_asl_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -17518,7 +17573,7 @@ def S2_asl_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -17529,7 +17584,7 @@ def S2_asl_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -17540,7 +17595,7 @@ def S2_asl_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asl($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -17551,7 +17606,7 @@ def S2_asl_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -17564,7 +17619,7 @@ def S2_asl_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -17577,7 +17632,7 @@ def S2_asl_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -17590,7 +17645,7 @@ def S2_asl_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -17603,19 +17658,20 @@ def S2_asl_r_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asl($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_47ab9233, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asl_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vaslh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -17624,7 +17680,7 @@ def S2_asl_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vaslw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -17633,7 +17689,7 @@ def S2_asr_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asr($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000000000;
}
@@ -17641,7 +17697,7 @@ def S2_asr_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17651,7 +17707,7 @@ def S2_asr_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17661,7 +17717,7 @@ def S2_asr_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17671,7 +17727,7 @@ def S2_asr_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17681,7 +17737,7 @@ def S2_asr_i_p_rnd : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asr($Rss32,#$Ii):rnd",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_5eac98, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000000110;
let prefersSlot3 = 1;
@@ -17690,14 +17746,14 @@ def S2_asr_i_p_rnd_goodsyntax : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asrrnd($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let isPseudo = 1;
}
def S2_asr_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asr($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -17708,7 +17764,7 @@ def S2_asr_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17721,7 +17777,7 @@ def S2_asr_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17734,7 +17790,7 @@ def S2_asr_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17747,7 +17803,7 @@ def S2_asr_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17760,7 +17816,7 @@ def S2_asr_i_r_rnd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asr($Rs32,#$Ii):rnd",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100010;
@@ -17772,7 +17828,7 @@ def S2_asr_i_r_rnd_goodsyntax : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asrrnd($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op> {
+tc_63cd9d2d, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -17781,18 +17837,19 @@ def S2_asr_i_svw_trun : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rd32 = vasrw($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2380082 {
+tc_7ca2ea10, TypeS_2op>, Enc_8dec2e {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_asr_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -17801,7 +17858,7 @@ def S2_asr_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vasrw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -17810,7 +17867,7 @@ def S2_asr_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = asr($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -17819,7 +17876,7 @@ def S2_asr_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -17830,7 +17887,7 @@ def S2_asr_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -17841,7 +17898,7 @@ def S2_asr_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -17852,7 +17909,7 @@ def S2_asr_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -17863,7 +17920,7 @@ def S2_asr_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -17874,7 +17931,7 @@ def S2_asr_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asr($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -17885,7 +17942,7 @@ def S2_asr_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -17898,7 +17955,7 @@ def S2_asr_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -17911,7 +17968,7 @@ def S2_asr_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -17924,7 +17981,7 @@ def S2_asr_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -17937,30 +17994,32 @@ def S2_asr_r_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asr($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_47ab9233, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asr_r_svw_trun : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = vasrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_7ca2ea10, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_asr_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vasrh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -17969,7 +18028,7 @@ def S2_asr_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vasrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -17978,25 +18037,27 @@ def S2_brev : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = brev($Rs32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_brevp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = brev($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_cabacdecbin : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = decbin($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_5d806107, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -18008,77 +18069,84 @@ def S2_cl0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = cl0($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl0p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = cl0($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = cl1($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl1p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = cl1($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = clb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clbnorm : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = normamt($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clbp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = clb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clrbit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = clrbit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -18089,7 +18157,7 @@ def S2_clrbit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = clrbit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -18100,55 +18168,60 @@ def S2_ct0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = ct0($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct0p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = ct0($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = ct1($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct1p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = ct1($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_deinterleave : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = deinterleave($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_extractu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rd32 = extractu($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_11930928 {
+tc_c0cd91a8, TypeS_2op>, Enc_b388cf {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011010;
let hasNewValue = 1;
@@ -18159,7 +18232,7 @@ def S2_extractu_rp : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rd32 = extractu($Rs32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_15472748 {
+tc_87601822, TypeS_3op>, Enc_e07374 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001001000;
@@ -18171,7 +18244,7 @@ def S2_extractup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rdd32 = extractu($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_9894557 {
+tc_c0cd91a8, TypeS_2op>, Enc_b84c4c {
let Inst{31-24} = 0b10000001;
let prefersSlot3 = 1;
}
@@ -18179,7 +18252,7 @@ def S2_extractup_rp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = extractu($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -18189,56 +18262,61 @@ def S2_insert : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = insert($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2880796 {
+tc_d95f4e98, TypeS_2op>, Enc_a1e29d {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Constraints = "$Rx32 = $Rx32in";
}
def S2_insert_rp : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rx32 = insert($Rs32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_16311032 {
+tc_3c10f809, TypeS_3op>, Enc_179b35 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001000000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Constraints = "$Rx32 = $Rx32in";
}
def S2_insertp : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rxx32 = insert($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_631197 {
+tc_d95f4e98, TypeS_2op>, Enc_143a3c {
let Inst{31-24} = 0b10000011;
+let prefersSlot3 = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
def S2_insertp_rp : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 = insert($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_12702821 {
+tc_3c10f809, TypeS_3op>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001010000;
+let prefersSlot3 = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
def S2_interleave : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = interleave($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_lfsp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = lfs($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -18248,7 +18326,7 @@ def S2_lsl_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = lsl($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -18257,7 +18335,7 @@ def S2_lsl_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -18268,7 +18346,7 @@ def S2_lsl_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -18279,7 +18357,7 @@ def S2_lsl_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -18290,7 +18368,7 @@ def S2_lsl_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -18301,7 +18379,7 @@ def S2_lsl_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -18312,7 +18390,7 @@ def S2_lsl_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = lsl($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -18323,7 +18401,7 @@ def S2_lsl_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -18336,7 +18414,7 @@ def S2_lsl_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -18349,7 +18427,7 @@ def S2_lsl_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -18362,7 +18440,7 @@ def S2_lsl_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -18375,7 +18453,7 @@ def S2_lsl_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlslh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -18384,7 +18462,7 @@ def S2_lsl_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlslw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -18393,7 +18471,7 @@ def S2_lsr_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = lsr($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000000000;
}
@@ -18401,7 +18479,7 @@ def S2_lsr_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -18411,7 +18489,7 @@ def S2_lsr_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -18421,7 +18499,7 @@ def S2_lsr_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -18431,7 +18509,7 @@ def S2_lsr_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -18441,7 +18519,7 @@ def S2_lsr_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -18451,7 +18529,7 @@ def S2_lsr_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = lsr($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -18462,7 +18540,7 @@ def S2_lsr_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -18475,7 +18553,7 @@ def S2_lsr_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -18488,7 +18566,7 @@ def S2_lsr_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -18501,7 +18579,7 @@ def S2_lsr_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -18514,7 +18592,7 @@ def S2_lsr_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -18527,7 +18605,7 @@ def S2_lsr_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vlsrh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b001;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -18536,7 +18614,7 @@ def S2_lsr_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vlsrw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -18545,7 +18623,7 @@ def S2_lsr_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = lsr($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -18554,7 +18632,7 @@ def S2_lsr_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -18565,7 +18643,7 @@ def S2_lsr_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -18576,7 +18654,7 @@ def S2_lsr_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -18587,7 +18665,7 @@ def S2_lsr_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -18598,7 +18676,7 @@ def S2_lsr_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -18609,7 +18687,7 @@ def S2_lsr_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = lsr($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -18620,7 +18698,7 @@ def S2_lsr_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -18633,7 +18711,7 @@ def S2_lsr_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -18646,7 +18724,7 @@ def S2_lsr_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -18659,7 +18737,7 @@ def S2_lsr_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -18672,7 +18750,7 @@ def S2_lsr_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlsrh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -18681,7 +18759,7 @@ def S2_lsr_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlsrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -18690,7 +18768,7 @@ def S2_packhl : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = packhl($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1997594 {
+tc_548f402d, TypeALU32_3op>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110101100;
@@ -18700,7 +18778,7 @@ def S2_parityp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = parity($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9277990 {
+tc_87601822, TypeALU64>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010000000;
@@ -18712,7 +18790,7 @@ def S2_pstorerbf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100000;
let isPredicated = 1;
@@ -18734,7 +18812,7 @@ def S2_pstorerbf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -18752,7 +18830,7 @@ def S2_pstorerbf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -18760,7 +18838,7 @@ def S2_pstorerbfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -18779,7 +18857,7 @@ def S2_pstorerbnewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000100101;
@@ -18788,8 +18866,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -18804,7 +18882,7 @@ def S2_pstorerbnewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b100;
@@ -18814,8 +18892,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18825,7 +18903,7 @@ def S2_pstorerbnewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -18834,7 +18912,7 @@ def S2_pstorerbnewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -18845,8 +18923,8 @@ let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18856,7 +18934,7 @@ def S2_pstorerbnewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000000101;
@@ -18864,8 +18942,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -18880,7 +18958,7 @@ def S2_pstorerbnewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b100;
@@ -18889,8 +18967,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18900,7 +18978,7 @@ def S2_pstorerbnewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -18909,7 +18987,7 @@ def S2_pstorerbnewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -18919,8 +18997,8 @@ let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18930,7 +19008,7 @@ def S2_pstorerbt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000000;
let isPredicated = 1;
@@ -18951,7 +19029,7 @@ def S2_pstorerbt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -18968,7 +19046,7 @@ def S2_pstorerbt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -18976,7 +19054,7 @@ def S2_pstorerbtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -18994,7 +19072,7 @@ def S2_pstorerdf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100110;
let isPredicated = 1;
@@ -19015,7 +19093,7 @@ def S2_pstorerdf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19033,7 +19111,7 @@ def S2_pstorerdf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19041,7 +19119,7 @@ def S2_pstorerdfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19060,7 +19138,7 @@ def S2_pstorerdt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000110;
let isPredicated = 1;
@@ -19080,7 +19158,7 @@ def S2_pstorerdt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19097,7 +19175,7 @@ def S2_pstorerdt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19105,7 +19183,7 @@ def S2_pstorerdtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19123,7 +19201,7 @@ def S2_pstorerff_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100011;
let isPredicated = 1;
@@ -19144,7 +19222,7 @@ def S2_pstorerff_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19162,7 +19240,7 @@ def S2_pstorerff_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19170,7 +19248,7 @@ def S2_pstorerffnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19189,7 +19267,7 @@ def S2_pstorerft_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000011;
let isPredicated = 1;
@@ -19209,7 +19287,7 @@ def S2_pstorerft_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19226,7 +19304,7 @@ def S2_pstorerft_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19234,7 +19312,7 @@ def S2_pstorerftnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19252,7 +19330,7 @@ def S2_pstorerhf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100010;
let isPredicated = 1;
@@ -19274,7 +19352,7 @@ def S2_pstorerhf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19292,7 +19370,7 @@ def S2_pstorerhf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19300,7 +19378,7 @@ def S2_pstorerhfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19319,7 +19397,7 @@ def S2_pstorerhnewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000100101;
@@ -19328,8 +19406,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -19344,7 +19422,7 @@ def S2_pstorerhnewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b101;
@@ -19354,8 +19432,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19365,7 +19443,7 @@ def S2_pstorerhnewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19374,7 +19452,7 @@ def S2_pstorerhnewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -19385,8 +19463,8 @@ let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19396,7 +19474,7 @@ def S2_pstorerhnewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000000101;
@@ -19404,8 +19482,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -19420,7 +19498,7 @@ def S2_pstorerhnewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b101;
@@ -19429,8 +19507,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19440,7 +19518,7 @@ def S2_pstorerhnewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19449,7 +19527,7 @@ def S2_pstorerhnewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -19459,8 +19537,8 @@ let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19470,7 +19548,7 @@ def S2_pstorerht_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000010;
let isPredicated = 1;
@@ -19491,7 +19569,7 @@ def S2_pstorerht_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19508,7 +19586,7 @@ def S2_pstorerht_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19516,7 +19594,7 @@ def S2_pstorerhtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19534,7 +19612,7 @@ def S2_pstorerif_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100100;
let isPredicated = 1;
@@ -19556,7 +19634,7 @@ def S2_pstorerif_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19574,7 +19652,7 @@ def S2_pstorerif_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19582,7 +19660,7 @@ def S2_pstorerifnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19602,7 +19680,7 @@ def S2_pstorerinewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000100101;
@@ -19611,8 +19689,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -19627,7 +19705,7 @@ def S2_pstorerinewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b110;
@@ -19637,8 +19715,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19648,7 +19726,7 @@ def S2_pstorerinewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19657,7 +19735,7 @@ def S2_pstorerinewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -19668,8 +19746,8 @@ let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19679,7 +19757,7 @@ def S2_pstorerinewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000000101;
@@ -19687,8 +19765,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -19703,7 +19781,7 @@ def S2_pstorerinewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b110;
@@ -19712,8 +19790,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19723,7 +19801,7 @@ def S2_pstorerinewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19732,7 +19810,7 @@ def S2_pstorerinewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -19742,8 +19820,8 @@ let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19753,7 +19831,7 @@ def S2_pstorerit_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000100;
let isPredicated = 1;
@@ -19774,7 +19852,7 @@ def S2_pstorerit_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19791,7 +19869,7 @@ def S2_pstorerit_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19799,7 +19877,7 @@ def S2_pstoreritnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19817,7 +19895,7 @@ def S2_setbit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = setbit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -19828,7 +19906,7 @@ def S2_setbit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = setbit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -19839,7 +19917,7 @@ def S2_shuffeb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = shuffeb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19848,7 +19926,7 @@ def S2_shuffeh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = shuffeh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19857,7 +19935,7 @@ def S2_shuffob : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = shuffob($Rtt32,$Rss32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11687333 {
+tc_9c18c9a5, TypeS_3op>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19866,7 +19944,7 @@ def S2_shuffoh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = shuffoh($Rtt32,$Rss32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11687333 {
+tc_9c18c9a5, TypeS_3op>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -19875,7 +19953,7 @@ def S2_storerb_io : HInst<
(outs),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13150110, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_448f7f, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -19896,7 +19974,7 @@ def S2_storerb_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111000;
let accessSize = ByteAccess;
@@ -19909,7 +19987,7 @@ def S2_storerb_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_3915770 {
+tc_251c87b2, TypeST>, Enc_b15941 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001000;
@@ -19924,7 +20002,7 @@ def S2_storerb_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001000;
let addrMode = PostInc;
@@ -19938,7 +20016,7 @@ def S2_storerb_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_12492533, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_10bc21, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -19955,7 +20033,7 @@ def S2_storerb_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101000;
let addrMode = PostInc;
@@ -19968,7 +20046,7 @@ def S2_storerb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19976,7 +20054,7 @@ def S2_storerbgp : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Rt32),
"memb(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_12395768, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_1b64fb, AddrModeRel {
let Inst{24-21} = 0b0000;
let Inst{31-27} = 0b01001;
let accessSize = ByteAccess;
@@ -19994,15 +20072,15 @@ def S2_storerbnew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10002182, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_4df4e9, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -20018,14 +20096,14 @@ def S2_storerbnew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101111101;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerb_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20034,7 +20112,7 @@ def S2_storerbnew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_5326450 {
+tc_9c68db63, TypeST>, Enc_96ce4f {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b00;
@@ -20042,8 +20120,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20052,15 +20130,15 @@ def S2_storerbnew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20069,7 +20147,7 @@ def S2_storerbnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_5900401, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_c7cd90, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b000;
@@ -20077,8 +20155,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerb_pi";
let isPredicable = 1;
let isNVStorable = 1;
@@ -20089,15 +20167,15 @@ def S2_storerbnew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20105,7 +20183,7 @@ def S2_storerbnew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20114,14 +20192,14 @@ def S2_storerbnewgp : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Nt8),
"memb(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_4050532, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_ad1831, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storerbabs";
let isPredicable = 1;
@@ -20135,7 +20213,7 @@ def S2_storerd_io : HInst<
(outs),
(ins IntRegs:$Rs32, s29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rs32+#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16319737, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_ce6828, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20155,7 +20233,7 @@ def S2_storerd_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++$Mu2:brev) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111110;
let accessSize = DoubleWordAccess;
@@ -20166,7 +20244,7 @@ def S2_storerd_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++#$Ii:circ($Mu2)) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_4501395 {
+tc_251c87b2, TypeST>, Enc_395cc4 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001110;
@@ -20180,7 +20258,7 @@ def S2_storerd_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++I:circ($Mu2)) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001110;
let addrMode = PostInc;
@@ -20193,7 +20271,7 @@ def S2_storerd_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11271630, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_85bf58, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20210,7 +20288,7 @@ def S2_storerd_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++$Mu2) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101110;
let addrMode = PostInc;
@@ -20222,7 +20300,7 @@ def S2_storerd_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20230,7 +20308,7 @@ def S2_storerdgp : HInst<
(outs),
(ins u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd(gp+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11682941, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_5c124a, AddrModeRel {
let Inst{24-21} = 0b0110;
let Inst{31-27} = 0b01001;
let accessSize = DoubleWordAccess;
@@ -20247,7 +20325,7 @@ def S2_storerf_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7736768, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_e957fb, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20267,7 +20345,7 @@ def S2_storerf_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2:brev) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111011;
let accessSize = HalfWordAccess;
@@ -20278,7 +20356,7 @@ def S2_storerf_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++#$Ii:circ($Mu2)) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_10915758 {
+tc_251c87b2, TypeST>, Enc_935d9b {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001011;
@@ -20292,7 +20370,7 @@ def S2_storerf_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++I:circ($Mu2)) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001011;
let addrMode = PostInc;
@@ -20305,7 +20383,7 @@ def S2_storerf_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11492529, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_052c7d, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20322,7 +20400,7 @@ def S2_storerf_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101011;
let addrMode = PostInc;
@@ -20334,7 +20412,7 @@ def S2_storerf_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20342,7 +20420,7 @@ def S2_storerfgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(gp+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
@@ -20359,7 +20437,7 @@ def S2_storerh_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7736768, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_e957fb, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20380,7 +20458,7 @@ def S2_storerh_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111010;
let accessSize = HalfWordAccess;
@@ -20393,7 +20471,7 @@ def S2_storerh_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_10915758 {
+tc_251c87b2, TypeST>, Enc_935d9b {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001010;
@@ -20408,7 +20486,7 @@ def S2_storerh_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001010;
let addrMode = PostInc;
@@ -20422,7 +20500,7 @@ def S2_storerh_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11492529, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_052c7d, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20439,7 +20517,7 @@ def S2_storerh_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101010;
let addrMode = PostInc;
@@ -20452,7 +20530,7 @@ def S2_storerh_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20460,7 +20538,7 @@ def S2_storerhgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
@@ -20478,15 +20556,15 @@ def S2_storerhnew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Nt8),
"memh($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_748676, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_0d8870, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -20502,14 +20580,14 @@ def S2_storerhnew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101111101;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerh_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20518,7 +20596,7 @@ def S2_storerhnew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10326434 {
+tc_9c68db63, TypeST>, Enc_91b9fe {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b01;
@@ -20526,8 +20604,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20536,15 +20614,15 @@ def S2_storerhnew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20553,7 +20631,7 @@ def S2_storerhnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_6900405, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_e26546, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b001;
@@ -20561,8 +20639,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerh_pi";
let isNVStorable = 1;
let isPredicable = 1;
@@ -20573,15 +20651,15 @@ def S2_storerhnew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20589,7 +20667,7 @@ def S2_storerhnew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20598,14 +20676,14 @@ def S2_storerhnewgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Nt8),
"memh(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_13618890, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_bc03e5, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storerhabs";
let isPredicable = 1;
@@ -20619,7 +20697,7 @@ def S2_storeri_io : HInst<
(outs),
(ins IntRegs:$Rs32, s30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_6673186, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_143445, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20640,7 +20718,7 @@ def S2_storeri_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111100;
let accessSize = WordAccess;
@@ -20653,7 +20731,7 @@ def S2_storeri_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_9915754 {
+tc_251c87b2, TypeST>, Enc_79b8c8 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001100;
@@ -20668,7 +20746,7 @@ def S2_storeri_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001100;
let addrMode = PostInc;
@@ -20682,7 +20760,7 @@ def S2_storeri_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10492541, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_db40cd, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20699,7 +20777,7 @@ def S2_storeri_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101100;
let addrMode = PostInc;
@@ -20712,7 +20790,7 @@ def S2_storeri_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20720,7 +20798,7 @@ def S2_storerigp : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Rt32),
"memw(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_15999208, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_541f26, AddrModeRel {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b01001;
let accessSize = WordAccess;
@@ -20738,15 +20816,15 @@ def S2_storerinew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s30_2Imm:$Ii, IntRegs:$Nt8),
"memw($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_8409782, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_690862, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -20762,14 +20840,14 @@ def S2_storerinew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101111101;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storeri_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20778,7 +20856,7 @@ def S2_storerinew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_11326438 {
+tc_9c68db63, TypeST>, Enc_3f97c8 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b10;
@@ -20786,8 +20864,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20796,15 +20874,15 @@ def S2_storerinew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20813,7 +20891,7 @@ def S2_storerinew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_7900405, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_223005, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b010;
@@ -20821,8 +20899,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storeri_pi";
let isPredicable = 1;
let opNewValue = 3;
@@ -20832,15 +20910,15 @@ def S2_storerinew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20848,7 +20926,7 @@ def S2_storerinew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20857,14 +20935,14 @@ def S2_storerinewgp : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Nt8),
"memw(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_12297800, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_78cbf0, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storeriabs";
let isPredicable = 1;
@@ -20878,20 +20956,20 @@ def S2_storew_locked : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw_locked($Rs32,$Pd4) = $Rt32",
-ST_tc_ld_SLOT0, TypeST>, Enc_10157519 {
+tc_7d01cbdc, TypeST>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100000101;
let accessSize = WordAccess;
+let isPredicateLate = 1;
let isSoloAX = 1;
let mayStore = 1;
-let isPredicateLate = 1;
}
def S2_svsathb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsathb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -20902,7 +20980,7 @@ def S2_svsathub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsathub($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -20913,7 +20991,7 @@ def S2_tableidxb : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxb($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011100;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20924,7 +21002,7 @@ def S2_tableidxb_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxb($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20935,7 +21013,7 @@ def S2_tableidxd : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxd($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20946,7 +21024,7 @@ def S2_tableidxd_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxd($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20956,7 +21034,7 @@ def S2_tableidxh : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxh($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20967,7 +21045,7 @@ def S2_tableidxh_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxh($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20977,7 +21055,7 @@ def S2_tableidxw : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxw($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20988,7 +21066,7 @@ def S2_tableidxw_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxw($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20998,7 +21076,7 @@ def S2_togglebit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = togglebit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -21009,7 +21087,7 @@ def S2_togglebit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = togglebit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -21020,7 +21098,7 @@ def S2_tstbit_i : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = tstbit($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742 {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101000;
@@ -21029,7 +21107,7 @@ def S2_tstbit_r : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = tstbit($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111000;
@@ -21038,7 +21116,7 @@ def S2_valignib : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32, u3_0Imm:$Ii),
"$Rdd32 = valignb($Rtt32,$Rss32,#$Ii)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11971407 {
+tc_d1b5a4b6, TypeS_3op>, Enc_729ff7 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000000000;
}
@@ -21046,7 +21124,7 @@ def S2_valignrb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32, PredRegs:$Pu4),
"$Rdd32 = valignb($Rtt32,$Rss32,$Pu4)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11552785 {
+tc_d1b5a4b6, TypeS_3op>, Enc_8c6530 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010000;
@@ -21055,7 +21133,7 @@ def S2_vcnegh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vcnegh($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_47ab9233, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011110;
@@ -21066,7 +21144,7 @@ def S2_vcrotate : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vcrotate($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_63cd9d2d, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011110;
@@ -21077,7 +21155,7 @@ def S2_vrcnegh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += vrcnegh($Rss32,$Rt32)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_8cb685d9, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -21088,28 +21166,30 @@ def S2_vrndpackwh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vrndwh($Rss32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_88fa2da6, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_vrndpackwhs : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vrndwh($Rss32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_94e6ffd9, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_vsathb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsathb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21120,7 +21200,7 @@ def S2_vsathb_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsathb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21129,7 +21209,7 @@ def S2_vsathub : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsathub($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21140,7 +21220,7 @@ def S2_vsathub_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsathub($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21149,7 +21229,7 @@ def S2_vsatwh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsatwh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21160,7 +21240,7 @@ def S2_vsatwh_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsatwh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21169,7 +21249,7 @@ def S2_vsatwuh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsatwuh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21180,7 +21260,7 @@ def S2_vsatwuh_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsatwuh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21189,7 +21269,7 @@ def S2_vsplatrb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsplatb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
@@ -21201,7 +21281,7 @@ def S2_vsplatrh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsplath($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100010;
let isReMaterializable = 1;
@@ -21211,7 +21291,7 @@ def S2_vspliceib : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, u3_0Imm:$Ii),
"$Rdd32 = vspliceb($Rss32,$Rtt32,#$Ii)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_16730127 {
+tc_d1b5a4b6, TypeS_3op>, Enc_d50cd3 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000000100;
}
@@ -21219,7 +21299,7 @@ def S2_vsplicerb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Pu4),
"$Rdd32 = vspliceb($Rss32,$Rtt32,$Pu4)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_5178985 {
+tc_d1b5a4b6, TypeS_3op>, Enc_dbd70c {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010100;
@@ -21228,7 +21308,7 @@ def S2_vsxtbh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsxtbh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21238,7 +21318,7 @@ def S2_vsxthw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsxthw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21248,7 +21328,7 @@ def S2_vtrunehb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vtrunehb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
@@ -21258,7 +21338,7 @@ def S2_vtrunewh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunewh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -21267,7 +21347,7 @@ def S2_vtrunohb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vtrunohb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
@@ -21277,7 +21357,7 @@ def S2_vtrunowh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunowh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -21286,7 +21366,7 @@ def S2_vzxtbh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vzxtbh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21296,7 +21376,7 @@ def S2_vzxthw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vzxthw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21306,7 +21386,7 @@ def S4_addaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Ru32, s32_0Imm:$Ii),
"$Rd32 = add($Rs32,add($Ru32,#$Ii))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6495334 {
+tc_090485bb, TypeALU64>, Enc_8b8d61 {
let Inst{31-23} = 0b110110110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21321,7 +21401,7 @@ def S4_addi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = add(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b100;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21339,7 +21419,7 @@ def S4_addi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = add(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b100;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21357,7 +21437,7 @@ def S4_andi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = and(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b000;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21375,7 +21455,7 @@ def S4_andi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = and(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b000;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21393,7 +21473,7 @@ def S4_clbaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s6_0Imm:$Ii),
"$Rd32 = add(clb($Rs32),#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_5523416 {
+tc_87601822, TypeS_2op>, Enc_9fae8a {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10001100001;
let hasNewValue = 1;
@@ -21404,7 +21484,7 @@ def S4_clbpaddi : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, s6_0Imm:$Ii),
"$Rd32 = add(clb($Rss32),#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_10188026 {
+tc_87601822, TypeS_2op>, Enc_a1640c {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
@@ -21415,17 +21495,18 @@ def S4_clbpnorm : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = normamt($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S4_extract : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rd32 = extract($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_11930928 {
+tc_c0cd91a8, TypeS_2op>, Enc_b388cf {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011011;
let hasNewValue = 1;
@@ -21436,7 +21517,7 @@ def S4_extract_rp : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rd32 = extract($Rs32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_15472748 {
+tc_87601822, TypeS_3op>, Enc_e07374 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001001000;
@@ -21448,7 +21529,7 @@ def S4_extractp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rdd32 = extract($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_9894557 {
+tc_c0cd91a8, TypeS_2op>, Enc_b84c4c {
let Inst{31-24} = 0b10001010;
let prefersSlot3 = 1;
}
@@ -21456,7 +21537,7 @@ def S4_extractp_rp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = extract($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -21466,7 +21547,7 @@ def S4_lsli : HInst<
(outs IntRegs:$Rd32),
(ins s6_0Imm:$Ii, IntRegs:$Rt32),
"$Rd32 = lsl(#$Ii,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_518319 {
+tc_9c18c9a5, TypeS_3op>, Enc_fef969 {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -21477,7 +21558,7 @@ def S4_ntstbit_i : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = !tstbit($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742 {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101001;
@@ -21486,7 +21567,7 @@ def S4_ntstbit_r : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !tstbit($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111001;
@@ -21495,7 +21576,7 @@ def S4_or_andi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 |= and($Rs32,#$Ii)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6356866 {
+tc_3c10f809, TypeALU64>, Enc_b0e9d8 {
let Inst{31-22} = 0b1101101000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21512,7 +21593,7 @@ def S4_or_andix : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Ru32, IntRegs:$Rx32in, s32_0Imm:$Ii),
"$Rx32 = or($Ru32,and($Rx32in,#$Ii))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_7504828 {
+tc_3c10f809, TypeALU64>, Enc_b4e6cf {
let Inst{31-22} = 0b1101101001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21528,7 +21609,7 @@ def S4_or_ori : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 |= or($Rs32,#$Ii)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6356866 {
+tc_3c10f809, TypeALU64>, Enc_b0e9d8 {
let Inst{31-22} = 0b1101101010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21545,7 +21626,7 @@ def S4_ori_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = or(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b010;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21563,7 +21644,7 @@ def S4_ori_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = or(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b010;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21581,7 +21662,7 @@ def S4_parity : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = parity($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_87601822, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101111;
@@ -21593,7 +21674,7 @@ def S4_pstorerbf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -21618,7 +21699,7 @@ def S4_pstorerbf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -21634,7 +21715,7 @@ def S4_pstorerbfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -21643,8 +21724,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
@@ -21660,7 +21741,7 @@ def S4_pstorerbfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110000;
let isPredicated = 1;
@@ -21683,7 +21764,7 @@ def S4_pstorerbfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -21700,7 +21781,7 @@ def S4_pstorerbfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -21708,7 +21789,7 @@ def S4_pstorerbnewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b000;
@@ -21718,9 +21799,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21735,7 +21816,7 @@ def S4_pstorerbnewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -21743,8 +21824,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21754,7 +21835,7 @@ def S4_pstorerbnewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -21764,10 +21845,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21782,7 +21863,7 @@ def S4_pstorerbnewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000110101;
@@ -21792,8 +21873,8 @@ let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -21808,7 +21889,7 @@ def S4_pstorerbnewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -21817,8 +21898,8 @@ let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21828,7 +21909,7 @@ def S4_pstorerbnewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -21837,7 +21918,7 @@ def S4_pstorerbnewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b000;
@@ -21846,9 +21927,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21863,15 +21944,15 @@ def S4_pstorerbnewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21881,7 +21962,7 @@ def S4_pstorerbnewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -21890,10 +21971,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21908,7 +21989,7 @@ def S4_pstorerbnewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000010101;
@@ -21917,8 +21998,8 @@ let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -21933,7 +22014,7 @@ def S4_pstorerbnewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -21941,8 +22022,8 @@ let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21952,7 +22033,7 @@ def S4_pstorerbnewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -21961,7 +22042,7 @@ def S4_pstorerbt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -21985,7 +22066,7 @@ def S4_pstorerbt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100000;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22000,7 +22081,7 @@ def S4_pstorerbtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22008,8 +22089,8 @@ let Inst{31-18} = 0b10101111000000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
@@ -22025,7 +22106,7 @@ def S4_pstorerbtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010000;
let isPredicated = 1;
@@ -22047,7 +22128,7 @@ def S4_pstorerbtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110000;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22063,7 +22144,7 @@ def S4_pstorerbtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22071,7 +22152,7 @@ def S4_pstorerdf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22095,7 +22176,7 @@ def S4_pstorerdf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110101110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22110,7 +22191,7 @@ def S4_pstorerdfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_336e698c, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22119,8 +22200,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerd";
let BaseOpcode = "S2_storerdabs";
@@ -22135,7 +22216,7 @@ def S4_pstorerdfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110110;
let isPredicated = 1;
@@ -22157,7 +22238,7 @@ def S4_pstorerdfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110111110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22173,7 +22254,7 @@ def S4_pstorerdfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22181,7 +22262,7 @@ def S4_pstorerdt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22204,7 +22285,7 @@ def S4_pstorerdt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110100110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22218,7 +22299,7 @@ def S4_pstorerdtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_336e698c, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22226,8 +22307,8 @@ let Inst{31-18} = 0b10101111110000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerd";
let BaseOpcode = "S2_storerdabs";
@@ -22242,7 +22323,7 @@ def S4_pstorerdtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010110;
let isPredicated = 1;
@@ -22263,7 +22344,7 @@ def S4_pstorerdtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110110110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22278,7 +22359,7 @@ def S4_pstorerdtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22286,7 +22367,7 @@ def S4_pstorerff_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22310,7 +22391,7 @@ def S4_pstorerff_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22325,7 +22406,7 @@ def S4_pstorerffnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22334,8 +22415,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerf";
let BaseOpcode = "S2_storerfabs";
@@ -22350,7 +22431,7 @@ def S4_pstorerffnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110011;
let isPredicated = 1;
@@ -22372,7 +22453,7 @@ def S4_pstorerffnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22388,7 +22469,7 @@ def S4_pstorerffnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22396,7 +22477,7 @@ def S4_pstorerft_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22419,7 +22500,7 @@ def S4_pstorerft_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100011;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22433,7 +22514,7 @@ def S4_pstorerftnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22441,8 +22522,8 @@ let Inst{31-18} = 0b10101111011000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerf";
let BaseOpcode = "S2_storerfabs";
@@ -22457,7 +22538,7 @@ def S4_pstorerftnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010011;
let isPredicated = 1;
@@ -22478,7 +22559,7 @@ def S4_pstorerftnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110011;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22493,7 +22574,7 @@ def S4_pstorerftnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22501,7 +22582,7 @@ def S4_pstorerhf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22526,7 +22607,7 @@ def S4_pstorerhf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22542,7 +22623,7 @@ def S4_pstorerhfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22551,8 +22632,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
@@ -22568,7 +22649,7 @@ def S4_pstorerhfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110010;
let isPredicated = 1;
@@ -22591,7 +22672,7 @@ def S4_pstorerhfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22608,7 +22689,7 @@ def S4_pstorerhfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22616,7 +22697,7 @@ def S4_pstorerhnewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b001;
@@ -22626,9 +22707,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22643,7 +22724,7 @@ def S4_pstorerhnewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -22651,8 +22732,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22662,7 +22743,7 @@ def S4_pstorerhnewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -22672,10 +22753,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22690,7 +22771,7 @@ def S4_pstorerhnewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000110101;
@@ -22700,8 +22781,8 @@ let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -22716,7 +22797,7 @@ def S4_pstorerhnewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -22725,8 +22806,8 @@ let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22736,7 +22817,7 @@ def S4_pstorerhnewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -22745,7 +22826,7 @@ def S4_pstorerhnewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b001;
@@ -22754,9 +22835,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22771,15 +22852,15 @@ def S4_pstorerhnewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22789,7 +22870,7 @@ def S4_pstorerhnewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -22798,10 +22879,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22816,7 +22897,7 @@ def S4_pstorerhnewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000010101;
@@ -22825,8 +22906,8 @@ let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -22841,7 +22922,7 @@ def S4_pstorerhnewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -22849,8 +22930,8 @@ let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22860,7 +22941,7 @@ def S4_pstorerhnewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -22869,7 +22950,7 @@ def S4_pstorerht_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22893,7 +22974,7 @@ def S4_pstorerht_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100010;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22908,7 +22989,7 @@ def S4_pstorerhtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22916,8 +22997,8 @@ let Inst{31-18} = 0b10101111010000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
@@ -22933,7 +23014,7 @@ def S4_pstorerhtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010010;
let isPredicated = 1;
@@ -22955,7 +23036,7 @@ def S4_pstorerhtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110010;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22971,7 +23052,7 @@ def S4_pstorerhtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22979,7 +23060,7 @@ def S4_pstorerif_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -23004,7 +23085,7 @@ def S4_pstorerif_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23020,7 +23101,7 @@ def S4_pstorerifnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -23029,8 +23110,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
@@ -23046,7 +23127,7 @@ def S4_pstorerifnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110100;
let isPredicated = 1;
@@ -23069,7 +23150,7 @@ def S4_pstorerifnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23086,7 +23167,7 @@ def S4_pstorerifnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23094,7 +23175,7 @@ def S4_pstorerinewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b010;
@@ -23104,9 +23185,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23121,7 +23202,7 @@ def S4_pstorerinewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -23129,8 +23210,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23140,7 +23221,7 @@ def S4_pstorerinewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -23150,10 +23231,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23168,7 +23249,7 @@ def S4_pstorerinewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000110101;
@@ -23178,8 +23259,8 @@ let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -23194,7 +23275,7 @@ def S4_pstorerinewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -23203,8 +23284,8 @@ let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23214,7 +23295,7 @@ def S4_pstorerinewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -23223,7 +23304,7 @@ def S4_pstorerinewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b010;
@@ -23232,9 +23313,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23249,15 +23330,15 @@ def S4_pstorerinewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23267,7 +23348,7 @@ def S4_pstorerinewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -23276,10 +23357,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23294,7 +23375,7 @@ def S4_pstorerinewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000010101;
@@ -23303,8 +23384,8 @@ let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -23319,7 +23400,7 @@ def S4_pstorerinewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -23327,8 +23408,8 @@ let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23338,7 +23419,7 @@ def S4_pstorerinewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -23347,7 +23428,7 @@ def S4_pstorerit_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -23371,7 +23452,7 @@ def S4_pstorerit_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100100;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -23386,7 +23467,7 @@ def S4_pstoreritnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -23394,8 +23475,8 @@ let Inst{31-18} = 0b10101111100000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
@@ -23411,7 +23492,7 @@ def S4_pstoreritnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010100;
let isPredicated = 1;
@@ -23433,7 +23514,7 @@ def S4_pstoreritnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110100;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -23449,7 +23530,7 @@ def S4_pstoreritnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23457,20 +23538,20 @@ def S4_stored_locked : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"memd_locked($Rs32,$Pd4) = $Rtt32",
-ST_tc_ld_SLOT0, TypeST>, Enc_2921694 {
+tc_7d01cbdc, TypeST>, Enc_d7dc10 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100000111;
let accessSize = DoubleWordAccess;
+let isPredicateLate = 1;
let isSoloAX = 1;
let mayStore = 1;
-let isPredicateLate = 1;
}
def S4_storeirb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11282123, PredNewRel {
+tc_fcee8723, TypeST>, Enc_8203bb, PredNewRel {
let Inst{31-21} = 0b00111100000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -23489,7 +23570,7 @@ def S4_storeirb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23497,7 +23578,7 @@ def S4_storeirbf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111000100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23517,7 +23598,7 @@ def S4_storeirbf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23525,7 +23606,7 @@ def S4_storeirbfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111001100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23546,7 +23627,7 @@ def S4_storeirbfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23554,7 +23635,7 @@ def S4_storeirbt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111000000;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23573,7 +23654,7 @@ def S4_storeirbt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23581,7 +23662,7 @@ def S4_storeirbtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111001000;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23601,7 +23682,7 @@ def S4_storeirbtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23609,7 +23690,7 @@ def S4_storeirh_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_10282127, PredNewRel {
+tc_fcee8723, TypeST>, Enc_a803e0, PredNewRel {
let Inst{31-21} = 0b00111100001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
@@ -23628,7 +23709,7 @@ def S4_storeirh_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23636,7 +23717,7 @@ def S4_storeirhf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23656,7 +23737,7 @@ def S4_storeirhf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23664,7 +23745,7 @@ def S4_storeirhfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111001101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23685,7 +23766,7 @@ def S4_storeirhfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23693,7 +23774,7 @@ def S4_storeirht_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111000001;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23712,7 +23793,7 @@ def S4_storeirht_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23720,7 +23801,7 @@ def S4_storeirhtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111001001;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23740,7 +23821,7 @@ def S4_storeirhtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23748,7 +23829,7 @@ def S4_storeiri_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9282127, PredNewRel {
+tc_fcee8723, TypeST>, Enc_f37377, PredNewRel {
let Inst{31-21} = 0b00111100010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -23767,7 +23848,7 @@ def S4_storeiri_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23775,7 +23856,7 @@ def S4_storeirif_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111000110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23795,7 +23876,7 @@ def S4_storeirif_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23803,7 +23884,7 @@ def S4_storeirifnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111001110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23824,7 +23905,7 @@ def S4_storeirifnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23832,7 +23913,7 @@ def S4_storeirit_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111000010;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23851,7 +23932,7 @@ def S4_storeirit_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23859,7 +23940,7 @@ def S4_storeiritnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111001010;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23879,7 +23960,7 @@ def S4_storeiritnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23887,7 +23968,7 @@ def S4_storerb_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memb($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011000;
@@ -23910,7 +23991,7 @@ def S4_storerb_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011000;
let addrMode = BaseRegOffset;
@@ -23926,7 +24007,7 @@ def S4_storerb_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memb($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101000;
let addrMode = BaseLongOffset;
@@ -23948,7 +24029,7 @@ def S4_storerbnew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memb($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10101011101;
@@ -23957,9 +24038,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storerb_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -23973,14 +24054,14 @@ def S4_storerbnew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0000;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -23991,16 +24072,16 @@ def S4_storerbnew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memb($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S4_storerb_ur";
let DecoderNamespace = "MustExtend";
@@ -24015,7 +24096,7 @@ def S4_storerd_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, DoubleRegs:$Rtt32),
"memd($Re32=#$II) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_8131399 {
+tc_336e698c, TypeST>, Enc_c7a204 {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011110;
@@ -24037,7 +24118,7 @@ def S4_storerd_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9772987, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_55355c, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011110;
let addrMode = BaseRegOffset;
@@ -24052,7 +24133,7 @@ def S4_storerd_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, DoubleRegs:$Rtt32),
"memd($Ru32<<#$Ii+#$II) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_12848507, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_f79415, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101110;
let addrMode = BaseLongOffset;
@@ -24073,7 +24154,7 @@ def S4_storerf_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memh($Re32=#$II) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246 {
+tc_336e698c, TypeST>, Enc_8bcba4 {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011011;
@@ -24095,7 +24176,7 @@ def S4_storerf_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011011;
let addrMode = BaseRegOffset;
@@ -24110,7 +24191,7 @@ def S4_storerf_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memh($Ru32<<#$Ii+#$II) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101011;
let addrMode = BaseLongOffset;
@@ -24131,7 +24212,7 @@ def S4_storerh_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memh($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011010;
@@ -24154,7 +24235,7 @@ def S4_storerh_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011010;
let addrMode = BaseRegOffset;
@@ -24170,7 +24251,7 @@ def S4_storerh_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memh($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101010;
let addrMode = BaseLongOffset;
@@ -24192,7 +24273,7 @@ def S4_storerhnew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memh($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b001;
let Inst{31-21} = 0b10101011101;
@@ -24201,9 +24282,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storerh_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -24217,14 +24298,14 @@ def S4_storerhnew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0001;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -24235,16 +24316,16 @@ def S4_storerhnew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memh($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_ur";
let DecoderNamespace = "MustExtend";
@@ -24259,7 +24340,7 @@ def S4_storeri_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memw($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011100;
@@ -24282,7 +24363,7 @@ def S4_storeri_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011100;
let addrMode = BaseRegOffset;
@@ -24298,7 +24379,7 @@ def S4_storeri_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memw($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101100;
let addrMode = BaseLongOffset;
@@ -24320,7 +24401,7 @@ def S4_storerinew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memw($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b010;
let Inst{31-21} = 0b10101011101;
@@ -24329,9 +24410,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storeri_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -24345,14 +24426,14 @@ def S4_storerinew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0010;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -24363,16 +24444,16 @@ def S4_storerinew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memw($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_ur";
let DecoderNamespace = "MustExtend";
@@ -24387,7 +24468,7 @@ def S4_subaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Ru32),
"$Rd32 = add($Rs32,sub(#$Ii,$Ru32))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6495334 {
+tc_090485bb, TypeALU64>, Enc_8b8d61 {
let Inst{31-23} = 0b110110111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24402,7 +24483,7 @@ def S4_subi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = sub(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b110;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -24420,7 +24501,7 @@ def S4_subi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = sub(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b110;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -24438,7 +24519,7 @@ def S4_vrcrotate : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rdd32 = vrcrotate($Rss32,$Rt32,#$Ii)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_114098 {
+tc_6264c5e0, TypeS_3op>, Enc_645d54 {
let Inst{7-6} = 0b11;
let Inst{31-21} = 0b11000011110;
let prefersSlot3 = 1;
@@ -24447,7 +24528,7 @@ def S4_vrcrotate_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rxx32 += vrcrotate($Rss32,$Rt32,#$Ii)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_13114546 {
+tc_bc5561d8, TypeS_3op>, Enc_b72622 {
let Inst{7-6} = 0b00;
let Inst{31-21} = 0b11001011101;
let prefersSlot3 = 1;
@@ -24457,17 +24538,18 @@ def S4_vxaddsubh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubh($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxaddsubhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubh($Rss32,$Rtt32):rnd:>>1:sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_63cd9d2d, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -24478,27 +24560,29 @@ def S4_vxaddsubw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubw($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxsubaddh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddh($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxsubaddhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddh($Rss32,$Rtt32):rnd:>>1:sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_63cd9d2d, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -24509,17 +24593,18 @@ def S4_vxsubaddw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddw($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S5_asrhub_rnd_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):raw",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8038806, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_11a146, Requires<[HasV5T]> {
let Inst{7-5} = 0b100;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10001000011;
@@ -24532,7 +24617,7 @@ def S5_asrhub_rnd_sat_goodsyntax : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):rnd:sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -24541,7 +24626,7 @@ def S5_asrhub_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8038806, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_11a146, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10001000011;
@@ -24554,7 +24639,7 @@ def S5_popcountp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = popcount($Rss32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_ca280e8b, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
@@ -24565,7 +24650,7 @@ def S5_vasrhrnd : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_12b6e9, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000001;
@@ -24575,14 +24660,14 @@ def S5_vasrhrnd_goodsyntax : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii):rnd",
-S_2op_tc_1_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let isPseudo = 1;
}
def S6_rol_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995, Requires<[HasV60T]> {
+tc_9f518242, TypeS_2op>, Enc_5eac98, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000000000;
}
@@ -24590,7 +24675,7 @@ def S6_rol_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -24600,7 +24685,7 @@ def S6_rol_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -24610,7 +24695,7 @@ def S6_rol_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -24620,7 +24705,7 @@ def S6_rol_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -24630,7 +24715,7 @@ def S6_rol_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -24640,7 +24725,7 @@ def S6_rol_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456, Requires<[HasV60T]> {
+tc_9f518242, TypeS_2op>, Enc_a05677, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -24651,7 +24736,7 @@ def S6_rol_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -24664,7 +24749,7 @@ def S6_rol_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -24677,7 +24762,7 @@ def S6_rol_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -24690,7 +24775,7 @@ def S6_rol_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -24703,7 +24788,7 @@ def S6_rol_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -24716,7 +24801,7 @@ def S6_vsplatrbp : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsplatb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV62T]> {
+tc_78b3c689, TypeS_2op>, Enc_3a3d62, Requires<[HasV62T]> {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100010;
}
@@ -24724,7 +24809,7 @@ def S6_vtrunehb_ppp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunehb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> {
+tc_9f518242, TypeS_3op>, Enc_a56825, Requires<[HasV62T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -24733,7 +24818,7 @@ def S6_vtrunohb_ppp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunohb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> {
+tc_9f518242, TypeS_3op>, Enc_a56825, Requires<[HasV62T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -24742,7 +24827,7 @@ def SA1_addi : HInst<
(outs GeneralSubRegs:$Rx16),
(ins IntRegs:$Rx16in, s32_0Imm:$Ii),
"$Rx16 = add($Rx16in,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_3974695 {
+tc_821c4233, TypeSUBINSN>, Enc_93af4c {
let Inst{12-11} = 0b00;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24759,7 +24844,7 @@ def SA1_addrx : HInst<
(outs GeneralSubRegs:$Rx16),
(ins IntRegs:$Rx16in, GeneralSubRegs:$Rs16),
"$Rx16 = add($Rx16in,$Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_6135183 {
+tc_821c4233, TypeSUBINSN>, Enc_0527db {
let Inst{12-8} = 0b11000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24771,7 +24856,7 @@ def SA1_addsp : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u6_2Imm:$Ii),
"$Rd16 = add(r29,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_176263 {
+tc_d2609065, TypeSUBINSN>, Enc_2df31d {
let Inst{12-10} = 0b011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24783,7 +24868,7 @@ def SA1_and1 : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = and($Rs16,#1)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24794,7 +24879,7 @@ def SA1_clrf : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (!p0) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_7c2dcd4d, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -24808,7 +24893,7 @@ def SA1_clrfnew : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (!p0.new) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_f26aa619, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -24823,7 +24908,7 @@ def SA1_clrt : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (p0) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_7c2dcd4d, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100110;
let isPredicated = 1;
let hasNewValue = 1;
@@ -24836,7 +24921,7 @@ def SA1_clrtnew : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (p0.new) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_f26aa619, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -24850,7 +24935,7 @@ def SA1_cmpeqi : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u2_0Imm:$Ii),
"p0 = cmp.eq($Rs16,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_2079016 {
+tc_e8c7a357, TypeSUBINSN>, Enc_63eaeb {
let Inst{3-2} = 0b00;
let Inst{12-8} = 0b11001;
let AsmVariantName = "NonParsable";
@@ -24861,7 +24946,7 @@ def SA1_combine0i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#0,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b00;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24873,7 +24958,7 @@ def SA1_combine1i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#1,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b01;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24885,7 +24970,7 @@ def SA1_combine2i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#2,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b10;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24897,7 +24982,7 @@ def SA1_combine3i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#3,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b11;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24909,7 +24994,7 @@ def SA1_combinerz : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins GeneralSubRegs:$Rs16),
"$Rdd8 = combine($Rs16,#0)",
-PSEUDO, TypeSUBINSN>, Enc_10501894 {
+tc_d2609065, TypeSUBINSN>, Enc_399e12 {
let Inst{3-3} = 0b1;
let Inst{12-8} = 0b11101;
let hasNewValue = 1;
@@ -24921,7 +25006,7 @@ def SA1_combinezr : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins GeneralSubRegs:$Rs16),
"$Rdd8 = combine(#0,$Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_10501894 {
+tc_d2609065, TypeSUBINSN>, Enc_399e12 {
let Inst{3-3} = 0b0;
let Inst{12-8} = 0b11101;
let hasNewValue = 1;
@@ -24933,7 +25018,7 @@ def SA1_dec : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, n1Const:$n1),
"$Rd16 = add($Rs16,#$n1)",
-PSEUDO, TypeSUBINSN>, Enc_10597934 {
+tc_821c4233, TypeSUBINSN>, Enc_ee5ed0 {
let Inst{12-8} = 0b10011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24944,7 +25029,7 @@ def SA1_inc : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = add($Rs16,#1)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24955,7 +25040,7 @@ def SA1_seti : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u32_0Imm:$Ii),
"$Rd16 = #$Ii",
-PSEUDO, TypeSUBINSN>, Enc_2176383 {
+tc_d2609065, TypeSUBINSN>, Enc_e39bb2 {
let Inst{12-10} = 0b010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24971,7 +25056,7 @@ def SA1_setin1 : HInst<
(outs GeneralSubRegs:$Rd16),
(ins n1Const:$n1),
"$Rd16 = #$n1",
-PSEUDO, TypeSUBINSN>, Enc_13336212 {
+tc_d2609065, TypeSUBINSN>, Enc_7a0ea6 {
let Inst{12-4} = 0b110100000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24982,7 +25067,7 @@ def SA1_sxtb : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = sxtb($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24993,7 +25078,7 @@ def SA1_sxth : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = sxth($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10100;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25004,7 +25089,7 @@ def SA1_tfr : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = $Rs16",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25015,7 +25100,7 @@ def SA1_zxtb : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = and($Rs16,#255)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25026,7 +25111,7 @@ def SA1_zxth : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = zxth($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25037,7 +25122,7 @@ def SL1_loadri_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"$Rd16 = memw($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_13606251 {
+tc_bf6fa601, TypeSUBINSN>, Enc_53dca9 {
let Inst{12-12} = 0b0;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25051,7 +25136,7 @@ def SL1_loadrub_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"$Rd16 = memub($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15606259 {
+tc_bf6fa601, TypeSUBINSN>, Enc_c175d0 {
let Inst{12-12} = 0b1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25065,7 +25150,7 @@ def SL2_deallocframe : HInst<
(outs),
(ins),
"deallocframe",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_86442910, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111100000000;
let accessSize = DoubleWordAccess;
let AsmVariantName = "NonParsable";
@@ -25078,7 +25163,7 @@ def SL2_jumpr31 : HInst<
(outs),
(ins),
"jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000000;
let isTerminator = 1;
let isIndirectBranch = 1;
@@ -25093,7 +25178,7 @@ def SL2_jumpr31_f : HInst<
(outs),
(ins),
"if (!p0) jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25111,7 +25196,7 @@ def SL2_jumpr31_fnew : HInst<
(outs),
(ins),
"if (!p0.new) jumpr:nt r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25119,8 +25204,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
+let isReturn = 1;
let Uses = [P0, R31];
let Defs = [PC];
let isTaken = Inst{4};
@@ -25130,7 +25215,7 @@ def SL2_jumpr31_t : HInst<
(outs),
(ins),
"if (p0) jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000100;
let isPredicated = 1;
let isTerminator = 1;
@@ -25147,15 +25232,15 @@ def SL2_jumpr31_tnew : HInst<
(outs),
(ins),
"if (p0.new) jumpr:nt r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000110;
let isPredicated = 1;
let isTerminator = 1;
let isIndirectBranch = 1;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
+let isReturn = 1;
let Uses = [P0, R31];
let Defs = [PC];
let isTaken = Inst{4};
@@ -25165,7 +25250,7 @@ def SL2_loadrb_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_0Imm:$Ii),
"$Rd16 = memb($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_3135259 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2fbf3c {
let Inst{12-11} = 0b10;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25179,7 +25264,7 @@ def SL2_loadrd_sp : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u5_3Imm:$Ii),
"$Rdd8 = memd(r29+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_16479122 {
+tc_70cabf66, TypeSUBINSN>, Enc_86a14b {
let Inst{12-8} = 0b11110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25194,7 +25279,7 @@ def SL2_loadrh_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii),
"$Rd16 = memh($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_4135257 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2bae10 {
let Inst{12-11} = 0b00;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25208,7 +25293,7 @@ def SL2_loadri_sp : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u5_2Imm:$Ii),
"$Rd16 = memw(r29+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_64199 {
+tc_70cabf66, TypeSUBINSN>, Enc_51635c {
let Inst{12-9} = 0b1110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25223,7 +25308,7 @@ def SL2_loadruh_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii),
"$Rd16 = memuh($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_4135257 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2bae10 {
let Inst{12-11} = 0b01;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25237,15 +25322,15 @@ def SL2_return : HInst<
(outs),
(ins),
"dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000000;
let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R30, R29, R31];
let DecoderNamespace = "SUBINSN_L2";
@@ -25254,7 +25339,7 @@ def SL2_return_f : HInst<
(outs),
(ins),
"if (!p0) dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25263,8 +25348,8 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25274,7 +25359,7 @@ def SL2_return_fnew : HInst<
(outs),
(ins),
"if (!p0.new) dealloc_return:nt",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25283,9 +25368,9 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25295,7 +25380,7 @@ def SL2_return_t : HInst<
(outs),
(ins),
"if (p0) dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000100;
let isPredicated = 1;
let isTerminator = 1;
@@ -25303,8 +25388,8 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25314,7 +25399,7 @@ def SL2_return_tnew : HInst<
(outs),
(ins),
"if (p0.new) dealloc_return:nt",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000110;
let isPredicated = 1;
let isTerminator = 1;
@@ -25322,9 +25407,9 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25334,7 +25419,7 @@ def SS1_storeb_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii, GeneralSubRegs:$Rt16),
"memb($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_13204995 {
+tc_53ee6546, TypeSUBINSN>, Enc_b38ffc {
let Inst{12-12} = 0b1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25346,7 +25431,7 @@ def SS1_storew_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii, GeneralSubRegs:$Rt16),
"memw($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_11205051 {
+tc_53ee6546, TypeSUBINSN>, Enc_f55a0c {
let Inst{12-12} = 0b0;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25358,7 +25443,7 @@ def SS2_allocframe : HInst<
(outs),
(ins u5_3Imm:$Ii),
"allocframe(#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_7884306 {
+tc_f027ebe9, TypeSUBINSN>, Enc_6f70ca {
let Inst{3-0} = 0b0000;
let Inst{12-9} = 0b1110;
let addrMode = BaseImmOffset;
@@ -25373,7 +25458,7 @@ def SS2_storebi0 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"memb($Rs16+#$Ii) = #0",
-PSEUDO, TypeSUBINSN>, Enc_13536408 {
+tc_6c52d277, TypeSUBINSN>, Enc_84d359 {
let Inst{12-8} = 0b10010;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25385,7 +25470,7 @@ def SS2_storebi1 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"memb($Rs16+#$Ii) = #1",
-PSEUDO, TypeSUBINSN>, Enc_13536408 {
+tc_6c52d277, TypeSUBINSN>, Enc_84d359 {
let Inst{12-8} = 0b10011;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25397,7 +25482,7 @@ def SS2_stored_sp : HInst<
(outs),
(ins s6_3Imm:$Ii, GeneralDoubleLow8Regs:$Rtt8),
"memd(r29+#$Ii) = $Rtt8",
-PSEUDO, TypeSUBINSN>, Enc_9165078 {
+tc_c14739d5, TypeSUBINSN>, Enc_b8309d {
let Inst{12-9} = 0b0101;
let addrMode = BaseImmOffset;
let accessSize = DoubleWordAccess;
@@ -25410,7 +25495,7 @@ def SS2_storeh_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii, GeneralSubRegs:$Rt16),
"memh($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_1734121 {
+tc_53ee6546, TypeSUBINSN>, Enc_625deb {
let Inst{12-11} = 0b00;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
@@ -25422,7 +25507,7 @@ def SS2_storew_sp : HInst<
(outs),
(ins u5_2Imm:$Ii, GeneralSubRegs:$Rt16),
"memw(r29+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_6690615 {
+tc_c14739d5, TypeSUBINSN>, Enc_87c142 {
let Inst{12-9} = 0b0100;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25435,7 +25520,7 @@ def SS2_storewi0 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"memw($Rs16+#$Ii) = #0",
-PSEUDO, TypeSUBINSN>, Enc_15536400 {
+tc_6c52d277, TypeSUBINSN>, Enc_a6ce9c {
let Inst{12-8} = 0b10000;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25447,7 +25532,7 @@ def SS2_storewi1 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"memw($Rs16+#$Ii) = #1",
-PSEUDO, TypeSUBINSN>, Enc_15536400 {
+tc_6c52d277, TypeSUBINSN>, Enc_a6ce9c {
let Inst{12-8} = 0b10001;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25759,7 +25844,7 @@ def V6_extractw : HInst<
(outs IntRegs:$Rd32),
(ins VectorRegs:$Vu32, IntRegs:$Rs32),
"$Rd32 = vextract($Vu32,$Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_16601956, Requires<[HasV60T,UseHVX]> {
+tc_9777e6bf, TypeLD>, Enc_50e578, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10010010000;
@@ -25773,7 +25858,7 @@ def V6_extractw_128B : HInst<
(outs IntRegs:$Rd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rs32),
"$Rd32 = vextract($Vu32,$Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_16601956, Requires<[HasV60T,UseHVX]> {
+tc_9777e6bf, TypeLD>, Enc_50e578, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10010010000;
@@ -25851,6 +25936,144 @@ let isCodeGenOnly = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
+def V6_ldcnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
def V6_ldnt0 : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
@@ -25874,6 +26097,144 @@ let isCodeGenOnly = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
+def V6_ldp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
def V6_ldu0 : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
@@ -25922,7 +26283,7 @@ def V6_lvsplatb : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.b = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25933,7 +26294,7 @@ def V6_lvsplatb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.b = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25945,7 +26306,7 @@ def V6_lvsplath : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.h = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25956,7 +26317,7 @@ def V6_lvsplath_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.h = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25968,7 +26329,7 @@ def V6_lvsplatw : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32 = vsplat($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_9768377, Requires<[HasV60T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX_LATE>, Enc_a5ed8a, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -25979,7 +26340,7 @@ def V6_lvsplatw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32 = vsplat($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_9768377, Requires<[HasV60T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX_LATE>, Enc_a5ed8a, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -25991,7 +26352,7 @@ def V6_pred_and : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = and($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26004,7 +26365,7 @@ def V6_pred_and_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = and($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26018,7 +26379,7 @@ def V6_pred_and_n : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = and($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26031,7 +26392,7 @@ def V6_pred_and_n_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = and($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26045,7 +26406,7 @@ def V6_pred_not : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4),
"$Qd4 = not($Qs4)",
-CVI_VA, TypeCVI_VA>, Enc_4897205, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_bfbf03, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b0001111000000011;
@@ -26057,7 +26418,7 @@ def V6_pred_not_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4),
"$Qd4 = not($Qs4)",
-CVI_VA, TypeCVI_VA>, Enc_4897205, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_bfbf03, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b0001111000000011;
@@ -26070,7 +26431,7 @@ def V6_pred_or : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = or($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26083,7 +26444,7 @@ def V6_pred_or_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = or($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26097,7 +26458,7 @@ def V6_pred_or_n : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = or($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26110,7 +26471,7 @@ def V6_pred_or_n_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = or($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26124,7 +26485,7 @@ def V6_pred_scalar2 : HInst<
(outs VecPredRegs:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV60T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV60T,UseHVX]> {
let Inst{13-2} = 0b000000010001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26135,7 +26496,7 @@ def V6_pred_scalar2_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV60T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV60T,UseHVX]> {
let Inst{13-2} = 0b000000010001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26147,7 +26508,7 @@ def V6_pred_scalar2v2 : HInst<
(outs VecPredRegs:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq2($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV62T,UseHVX]> {
let Inst{13-2} = 0b000000010011;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26158,7 +26519,7 @@ def V6_pred_scalar2v2_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq2($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV62T,UseHVX]> {
let Inst{13-2} = 0b000000010011;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26170,7 +26531,7 @@ def V6_pred_xor : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = xor($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000011;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26183,7 +26544,7 @@ def V6_pred_xor_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = xor($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000011;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26197,7 +26558,7 @@ def V6_shuffeqh : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26210,7 +26571,7 @@ def V6_shuffeqh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26224,7 +26585,7 @@ def V6_shuffeqw : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000111;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26237,7 +26598,7 @@ def V6_shuffeqw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000111;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26540,7 +26901,7 @@ def V6_vL32Ub_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmemu($Rt32+#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_35e92f8e, TypeCVI_VM_VP_LDU>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26556,7 +26917,7 @@ def V6_vL32Ub_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmemu($Rt32+#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_35e92f8e, TypeCVI_VM_VP_LDU>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26573,7 +26934,7 @@ def V6_vL32Ub_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmemu($Rx32++#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26590,7 +26951,7 @@ def V6_vL32Ub_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmemu($Rx32++#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26608,7 +26969,7 @@ def V6_vL32Ub_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmemu($Rx32++$Mu2)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26624,7 +26985,7 @@ def V6_vL32Ub_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmemu($Rx32++$Mu2)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26641,7 +27002,7 @@ def V6_vL32b_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26658,7 +27019,7 @@ def V6_vL32b_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26676,7 +27037,7 @@ def V6_vL32b_cur_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26693,7 +27054,7 @@ def V6_vL32b_cur_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26711,7 +27072,7 @@ def V6_vL32b_cur_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26729,7 +27090,7 @@ def V6_vL32b_cur_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26748,7 +27109,7 @@ def V6_vL32b_cur_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26768,7 +27129,7 @@ def V6_vL32b_cur_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26789,7 +27150,7 @@ def V6_vL32b_cur_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26808,7 +27169,7 @@ def V6_vL32b_cur_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26828,7 +27189,7 @@ def V6_vL32b_cur_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26846,7 +27207,7 @@ def V6_vL32b_cur_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26865,7 +27226,7 @@ def V6_vL32b_cur_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26882,7 +27243,7 @@ def V6_vL32b_cur_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26900,7 +27261,7 @@ def V6_vL32b_cur_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26917,7 +27278,7 @@ def V6_vL32b_cur_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26935,7 +27296,7 @@ def V6_vL32b_cur_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26954,7 +27315,7 @@ def V6_vL32b_cur_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26974,7 +27335,7 @@ def V6_vL32b_cur_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26992,7 +27353,7 @@ def V6_vL32b_cur_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27011,7 +27372,7 @@ def V6_vL32b_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -27028,7 +27389,7 @@ def V6_vL32b_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -27046,7 +27407,7 @@ def V6_vL32b_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -27065,7 +27426,7 @@ def V6_vL32b_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -27085,7 +27446,7 @@ def V6_vL32b_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27103,7 +27464,7 @@ def V6_vL32b_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27122,7 +27483,7 @@ def V6_vL32b_nt_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27131,8 +27492,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
}
@@ -27140,7 +27501,7 @@ def V6_vL32b_nt_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27149,8 +27510,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27159,7 +27520,7 @@ def V6_vL32b_nt_cur_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27177,7 +27538,7 @@ def V6_vL32b_nt_cur_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27196,7 +27557,7 @@ def V6_vL32b_nt_cur_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27215,7 +27576,7 @@ def V6_vL32b_nt_cur_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27235,7 +27596,7 @@ def V6_vL32b_nt_cur_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27256,7 +27617,7 @@ def V6_vL32b_nt_cur_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27278,7 +27639,7 @@ def V6_vL32b_nt_cur_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27298,7 +27659,7 @@ def V6_vL32b_nt_cur_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27319,7 +27680,7 @@ def V6_vL32b_nt_cur_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27338,7 +27699,7 @@ def V6_vL32b_nt_cur_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27358,7 +27719,7 @@ def V6_vL32b_nt_cur_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27376,7 +27737,7 @@ def V6_vL32b_nt_cur_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27395,7 +27756,7 @@ def V6_vL32b_nt_cur_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27413,7 +27774,7 @@ def V6_vL32b_nt_cur_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27432,7 +27793,7 @@ def V6_vL32b_nt_cur_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27452,7 +27813,7 @@ def V6_vL32b_nt_cur_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27473,7 +27834,7 @@ def V6_vL32b_nt_cur_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27492,7 +27853,7 @@ def V6_vL32b_nt_cur_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27512,7 +27873,7 @@ def V6_vL32b_nt_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27522,15 +27883,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27540,8 +27901,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27549,7 +27910,7 @@ def V6_vL32b_nt_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27560,8 +27921,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27569,7 +27930,7 @@ def V6_vL32b_nt_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27580,8 +27941,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27590,7 +27951,7 @@ def V6_vL32b_nt_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27600,8 +27961,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27609,7 +27970,7 @@ def V6_vL32b_nt_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27619,8 +27980,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27629,7 +27990,7 @@ def V6_vL32b_nt_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27638,8 +27999,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
@@ -27648,7 +28009,7 @@ def V6_vL32b_nt_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27657,8 +28018,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27668,7 +28029,7 @@ def V6_vL32b_nt_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27676,8 +28037,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
@@ -27686,7 +28047,7 @@ def V6_vL32b_nt_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27694,8 +28055,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27705,7 +28066,7 @@ def V6_vL32b_nt_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27714,15 +28075,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27731,8 +28092,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27740,7 +28101,7 @@ def V6_vL32b_nt_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27750,8 +28111,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27759,7 +28120,7 @@ def V6_vL32b_nt_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27769,8 +28130,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27779,7 +28140,7 @@ def V6_vL32b_nt_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27788,8 +28149,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27797,7 +28158,7 @@ def V6_vL32b_nt_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27806,8 +28167,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27816,7 +28177,7 @@ def V6_vL32b_nt_tmp_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27825,15 +28186,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27842,8 +28203,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27851,7 +28212,7 @@ def V6_vL32b_nt_tmp_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27861,15 +28222,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27879,8 +28240,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27888,7 +28249,7 @@ def V6_vL32b_nt_tmp_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27899,8 +28260,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27908,7 +28269,7 @@ def V6_vL32b_nt_tmp_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27919,8 +28280,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27929,7 +28290,7 @@ def V6_vL32b_nt_tmp_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27939,8 +28300,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27948,7 +28309,7 @@ def V6_vL32b_nt_tmp_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27958,8 +28319,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27968,7 +28329,7 @@ def V6_vL32b_nt_tmp_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27977,8 +28338,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27986,7 +28347,7 @@ def V6_vL32b_nt_tmp_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27995,8 +28356,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28005,7 +28366,7 @@ def V6_vL32b_nt_tmp_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -28013,8 +28374,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28022,7 +28383,7 @@ def V6_vL32b_nt_tmp_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -28030,8 +28391,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28040,7 +28401,7 @@ def V6_vL32b_nt_tmp_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -28049,15 +28410,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -28066,8 +28427,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -28075,7 +28436,7 @@ def V6_vL32b_nt_tmp_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -28085,8 +28446,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28094,7 +28455,7 @@ def V6_vL32b_nt_tmp_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -28104,8 +28465,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28114,7 +28475,7 @@ def V6_vL32b_nt_tmp_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -28123,8 +28484,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28132,7 +28493,7 @@ def V6_vL32b_nt_tmp_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -28141,8 +28502,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28151,7 +28512,7 @@ def V6_vL32b_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28169,7 +28530,7 @@ def V6_vL32b_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28188,7 +28549,7 @@ def V6_vL32b_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28205,7 +28566,7 @@ def V6_vL32b_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28223,7 +28584,7 @@ def V6_vL32b_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28239,7 +28600,7 @@ def V6_vL32b_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28256,7 +28617,7 @@ def V6_vL32b_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28274,7 +28635,7 @@ def V6_vL32b_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28293,7 +28654,7 @@ def V6_vL32b_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28310,7 +28671,7 @@ def V6_vL32b_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28328,7 +28689,7 @@ def V6_vL32b_tmp_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -28344,7 +28705,7 @@ def V6_vL32b_tmp_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -28361,7 +28722,7 @@ def V6_vL32b_tmp_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28378,7 +28739,7 @@ def V6_vL32b_tmp_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28396,7 +28757,7 @@ def V6_vL32b_tmp_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28415,7 +28776,7 @@ def V6_vL32b_tmp_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28435,7 +28796,7 @@ def V6_vL32b_tmp_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28453,7 +28814,7 @@ def V6_vL32b_tmp_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28472,7 +28833,7 @@ def V6_vL32b_tmp_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28489,7 +28850,7 @@ def V6_vL32b_tmp_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28507,7 +28868,7 @@ def V6_vL32b_tmp_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28523,7 +28884,7 @@ def V6_vL32b_tmp_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28540,7 +28901,7 @@ def V6_vL32b_tmp_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28556,7 +28917,7 @@ def V6_vL32b_tmp_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28573,7 +28934,7 @@ def V6_vL32b_tmp_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28591,7 +28952,7 @@ def V6_vL32b_tmp_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28610,7 +28971,7 @@ def V6_vL32b_tmp_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28627,7 +28988,7 @@ def V6_vL32b_tmp_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28645,7 +29006,7 @@ def V6_vS32Ub_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_354299ad, TypeCVI_VM_STU>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28660,7 +29021,7 @@ def V6_vS32Ub_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_354299ad, TypeCVI_VM_STU>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28676,7 +29037,7 @@ def V6_vS32Ub_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28691,7 +29052,7 @@ def V6_vS32Ub_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28707,7 +29068,7 @@ def V6_vS32Ub_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28724,7 +29085,7 @@ def V6_vS32Ub_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28742,7 +29103,7 @@ def V6_vS32Ub_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28758,7 +29119,7 @@ def V6_vS32Ub_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28775,7 +29136,7 @@ def V6_vS32Ub_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -28791,7 +29152,7 @@ def V6_vS32Ub_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -28808,7 +29169,7 @@ def V6_vS32Ub_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -28823,7 +29184,7 @@ def V6_vS32Ub_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -28839,7 +29200,7 @@ def V6_vS32Ub_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28853,7 +29214,7 @@ def V6_vS32Ub_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28868,7 +29229,7 @@ def V6_vS32Ub_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28884,7 +29245,7 @@ def V6_vS32Ub_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28901,7 +29262,7 @@ def V6_vS32Ub_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28916,7 +29277,7 @@ def V6_vS32Ub_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28932,7 +29293,7 @@ def V6_vS32b_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28948,7 +29309,7 @@ def V6_vS32b_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28965,7 +29326,7 @@ def V6_vS32b_new_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_6608821, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28984,7 +29345,7 @@ def V6_vS32b_new_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2152247, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -29004,7 +29365,7 @@ def V6_vS32b_new_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29023,7 +29384,7 @@ def V6_vS32b_new_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29043,7 +29404,7 @@ def V6_vS32b_new_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29064,7 +29425,7 @@ def V6_vS32b_new_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29086,7 +29447,7 @@ def V6_vS32b_new_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001101;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29106,7 +29467,7 @@ def V6_vS32b_new_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001101;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29127,7 +29488,7 @@ def V6_vS32b_new_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_12244921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -29147,7 +29508,7 @@ def V6_vS32b_new_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_11244923, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -29168,7 +29529,7 @@ def V6_vS32b_new_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -29187,7 +29548,7 @@ def V6_vS32b_new_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -29207,7 +29568,7 @@ def V6_vS32b_new_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29225,7 +29586,7 @@ def V6_vS32b_new_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29244,7 +29605,7 @@ def V6_vS32b_new_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29264,7 +29625,7 @@ def V6_vS32b_new_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29285,7 +29646,7 @@ def V6_vS32b_new_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29304,7 +29665,7 @@ def V6_vS32b_new_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29324,7 +29685,7 @@ def V6_vS32b_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29340,7 +29701,7 @@ def V6_vS32b_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29357,7 +29718,7 @@ def V6_vS32b_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29375,7 +29736,7 @@ def V6_vS32b_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29394,7 +29755,7 @@ def V6_vS32b_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29411,7 +29772,7 @@ def V6_vS32b_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29429,7 +29790,7 @@ def V6_vS32b_nqpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -29441,7 +29802,7 @@ def V6_vS32b_nqpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -29454,7 +29815,7 @@ def V6_vS32b_nqpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -29468,7 +29829,7 @@ def V6_vS32b_nqpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -29483,7 +29844,7 @@ def V6_vS32b_nqpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -29496,7 +29857,7 @@ def V6_vS32b_nqpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -29510,14 +29871,14 @@ def V6_vS32b_nt_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let isPredicable = 1;
@@ -29527,14 +29888,14 @@ def V6_vS32b_nt_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -29545,7 +29906,7 @@ def V6_vS32b_nt_new_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_6608821, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
@@ -29565,7 +29926,7 @@ def V6_vS32b_nt_new_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2152247, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
@@ -29586,7 +29947,7 @@ def V6_vS32b_nt_new_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29606,7 +29967,7 @@ def V6_vS32b_nt_new_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29627,7 +29988,7 @@ def V6_vS32b_nt_new_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29649,7 +30010,7 @@ def V6_vS32b_nt_new_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29672,7 +30033,7 @@ def V6_vS32b_nt_new_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001111;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29693,7 +30054,7 @@ def V6_vS32b_nt_new_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001111;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29715,7 +30076,7 @@ def V6_vS32b_nt_new_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_12244921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
@@ -29736,7 +30097,7 @@ def V6_vS32b_nt_new_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_11244923, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
@@ -29758,7 +30119,7 @@ def V6_vS32b_nt_new_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
@@ -29778,7 +30139,7 @@ def V6_vS32b_nt_new_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
@@ -29799,7 +30160,7 @@ def V6_vS32b_nt_new_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29818,7 +30179,7 @@ def V6_vS32b_nt_new_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29838,7 +30199,7 @@ def V6_vS32b_nt_new_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29859,7 +30220,7 @@ def V6_vS32b_nt_new_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29881,7 +30242,7 @@ def V6_vS32b_nt_new_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001010;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29901,7 +30262,7 @@ def V6_vS32b_nt_new_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001010;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29922,15 +30283,15 @@ def V6_vS32b_nt_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29939,15 +30300,15 @@ def V6_vS32b_nt_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29957,7 +30318,7 @@ def V6_vS32b_nt_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29965,8 +30326,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29976,7 +30337,7 @@ def V6_vS32b_nt_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29984,8 +30345,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29996,15 +30357,15 @@ def V6_vS32b_nt_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30014,15 +30375,15 @@ def V6_vS32b_nt_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30033,26 +30394,26 @@ def V6_vS32b_nt_nqpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vS32b_nt_nqpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -30060,14 +30421,14 @@ def V6_vS32b_nt_nqpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30075,14 +30436,14 @@ def V6_vS32b_nt_nqpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30091,13 +30452,13 @@ def V6_vS32b_nt_nqpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30105,13 +30466,13 @@ def V6_vS32b_nt_nqpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30120,14 +30481,14 @@ def V6_vS32b_nt_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30138,14 +30499,14 @@ def V6_vS32b_nt_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30157,13 +30518,13 @@ def V6_vS32b_nt_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30174,13 +30535,13 @@ def V6_vS32b_nt_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30192,14 +30553,14 @@ def V6_vS32b_nt_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30208,14 +30569,14 @@ def V6_vS32b_nt_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30225,15 +30586,15 @@ def V6_vS32b_nt_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30243,15 +30604,15 @@ def V6_vS32b_nt_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30262,14 +30623,14 @@ def V6_vS32b_nt_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30279,14 +30640,14 @@ def V6_vS32b_nt_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30297,26 +30658,26 @@ def V6_vS32b_nt_qpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vS32b_nt_qpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -30324,14 +30685,14 @@ def V6_vS32b_nt_qpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30339,14 +30700,14 @@ def V6_vS32b_nt_qpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30355,13 +30716,13 @@ def V6_vS32b_nt_qpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30369,13 +30730,13 @@ def V6_vS32b_nt_qpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30384,7 +30745,7 @@ def V6_vS32b_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -30401,7 +30762,7 @@ def V6_vS32b_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -30419,7 +30780,7 @@ def V6_vS32b_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -30434,7 +30795,7 @@ def V6_vS32b_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -30450,7 +30811,7 @@ def V6_vS32b_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -30465,7 +30826,7 @@ def V6_vS32b_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -30481,7 +30842,7 @@ def V6_vS32b_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -30498,7 +30859,7 @@ def V6_vS32b_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -30516,7 +30877,7 @@ def V6_vS32b_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -30531,7 +30892,7 @@ def V6_vS32b_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -30547,7 +30908,7 @@ def V6_vS32b_qpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -30559,7 +30920,7 @@ def V6_vS32b_qpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -30572,7 +30933,7 @@ def V6_vS32b_qpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -30586,7 +30947,7 @@ def V6_vS32b_qpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -30601,7 +30962,7 @@ def V6_vS32b_qpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -30614,7 +30975,7 @@ def V6_vS32b_qpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -30628,7 +30989,7 @@ def V6_vabsdiffh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30640,7 +31001,7 @@ def V6_vabsdiffh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30676,7 +31037,7 @@ def V6_vabsdiffub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vabsdiff($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30688,7 +31049,7 @@ def V6_vabsdiffub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vabsdiff($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30724,7 +31085,7 @@ def V6_vabsdiffuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.uh,$Vv32.uh)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30736,7 +31097,7 @@ def V6_vabsdiffuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.uh,$Vv32.uh)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30772,7 +31133,7 @@ def V6_vabsdiffw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vabsdiff($Vu32.w,$Vv32.w)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30784,7 +31145,7 @@ def V6_vabsdiffw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vabsdiff($Vu32.w,$Vv32.w)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30820,7 +31181,7 @@ def V6_vabsh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vabs($Vu32.h)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30832,7 +31193,7 @@ def V6_vabsh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vabs($Vu32.h)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30868,7 +31229,7 @@ def V6_vabsh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vabs($Vu32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30880,7 +31241,7 @@ def V6_vabsh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vabs($Vu32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30916,7 +31277,7 @@ def V6_vabsw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vabs($Vu32.w)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30928,7 +31289,7 @@ def V6_vabsw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vabs($Vu32.w)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30964,7 +31325,7 @@ def V6_vabsw_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vabs($Vu32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30976,7 +31337,7 @@ def V6_vabsw_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vabs($Vu32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -31012,7 +31373,7 @@ def V6_vaddb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31024,7 +31385,7 @@ def V6_vaddb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31060,7 +31421,7 @@ def V6_vaddb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31072,7 +31433,7 @@ def V6_vaddb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31108,7 +31469,7 @@ def V6_vaddbnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31123,7 +31484,7 @@ def V6_vaddbnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31166,7 +31527,7 @@ def V6_vaddbq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31181,7 +31542,7 @@ def V6_vaddbq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31224,7 +31585,7 @@ def V6_vaddbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -31236,7 +31597,7 @@ def V6_vaddbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -31272,7 +31633,7 @@ def V6_vaddbsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -31284,7 +31645,7 @@ def V6_vaddbsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -31320,7 +31681,7 @@ def V6_vaddcarry : HInst<
(outs VectorRegs:$Vd32, VecPredRegs:$Qx4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in),
"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -31335,7 +31696,7 @@ def V6_vaddcarry_128B : HInst<
(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in),
"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -31351,7 +31712,7 @@ def V6_vaddclbh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31363,7 +31724,7 @@ def V6_vaddclbh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31376,7 +31737,7 @@ def V6_vaddclbw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31388,7 +31749,7 @@ def V6_vaddclbw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31401,7 +31762,7 @@ def V6_vaddh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31413,7 +31774,7 @@ def V6_vaddh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31449,7 +31810,7 @@ def V6_vaddh_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31461,7 +31822,7 @@ def V6_vaddh_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31497,7 +31858,7 @@ def V6_vaddhnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31512,7 +31873,7 @@ def V6_vaddhnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31555,7 +31916,7 @@ def V6_vaddhq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31570,7 +31931,7 @@ def V6_vaddhq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31613,7 +31974,7 @@ def V6_vaddhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31625,7 +31986,7 @@ def V6_vaddhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31661,7 +32022,7 @@ def V6_vaddhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -31673,7 +32034,7 @@ def V6_vaddhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -31709,7 +32070,7 @@ def V6_vaddhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31721,7 +32082,7 @@ def V6_vaddhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31734,7 +32095,7 @@ def V6_vaddhw_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -31748,7 +32109,7 @@ def V6_vaddhw_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -31813,7 +32174,7 @@ def V6_vaddubh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31825,7 +32186,7 @@ def V6_vaddubh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31838,7 +32199,7 @@ def V6_vaddubh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -31852,7 +32213,7 @@ def V6_vaddubh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -31917,7 +32278,7 @@ def V6_vaddubsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31929,7 +32290,7 @@ def V6_vaddubsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31965,7 +32326,7 @@ def V6_vaddubsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.ub = vadd($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31977,7 +32338,7 @@ def V6_vaddubsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.ub = vadd($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32013,7 +32374,7 @@ def V6_vaddububb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32025,7 +32386,7 @@ def V6_vaddububb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32038,7 +32399,7 @@ def V6_vadduhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vadd($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32050,7 +32411,7 @@ def V6_vadduhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vadd($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32086,7 +32447,7 @@ def V6_vadduhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uh = vadd($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32098,7 +32459,7 @@ def V6_vadduhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uh = vadd($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32134,7 +32495,7 @@ def V6_vadduhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -32146,7 +32507,7 @@ def V6_vadduhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -32159,7 +32520,7 @@ def V6_vadduhw_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -32173,7 +32534,7 @@ def V6_vadduhw_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -32238,7 +32599,7 @@ def V6_vadduwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -32250,7 +32611,7 @@ def V6_vadduwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -32286,7 +32647,7 @@ def V6_vadduwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32298,7 +32659,7 @@ def V6_vadduwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32334,7 +32695,7 @@ def V6_vaddw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32346,7 +32707,7 @@ def V6_vaddw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32382,7 +32743,7 @@ def V6_vaddw_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32394,7 +32755,7 @@ def V6_vaddw_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32430,7 +32791,7 @@ def V6_vaddwnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32445,7 +32806,7 @@ def V6_vaddwnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32488,7 +32849,7 @@ def V6_vaddwq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32503,7 +32864,7 @@ def V6_vaddwq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32546,7 +32907,7 @@ def V6_vaddwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32558,7 +32919,7 @@ def V6_vaddwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32594,7 +32955,7 @@ def V6_vaddwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32606,7 +32967,7 @@ def V6_vaddwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32642,7 +33003,7 @@ def V6_valignb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = valign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -32654,7 +33015,7 @@ def V6_valignb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = valign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -32667,7 +33028,7 @@ def V6_valignbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32 = valign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -32678,7 +33039,7 @@ def V6_valignbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32 = valign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -32690,7 +33051,7 @@ def V6_vand : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vand($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -32702,7 +33063,7 @@ def V6_vand_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vand($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -32715,7 +33076,7 @@ def V6_vandnqrt : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX>, Enc_7b7ba8, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0001;
let Inst{31-21} = 0b00011001101;
@@ -32727,7 +33088,7 @@ def V6_vandnqrt_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX>, Enc_7b7ba8, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0001;
let Inst{31-21} = 0b00011001101;
@@ -32740,7 +33101,7 @@ def V6_vandnqrt_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX>, Enc_895bd9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b00011001011;
@@ -32754,7 +33115,7 @@ def V6_vandnqrt_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX>, Enc_895bd9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b00011001011;
@@ -32819,7 +33180,7 @@ def V6_vandqrt : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4711514, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_7b7ba8, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b00011001101;
@@ -32831,7 +33192,7 @@ def V6_vandqrt_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4711514, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_7b7ba8, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b00011001101;
@@ -32844,7 +33205,7 @@ def V6_vandqrt_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4944558, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_895bd9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b00011001011;
@@ -32858,7 +33219,7 @@ def V6_vandqrt_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4944558, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_895bd9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b00011001011;
@@ -32923,7 +33284,7 @@ def V6_vandvnqv : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vu32),
"$Vd32 = vand(!$Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32936,7 +33297,7 @@ def V6_vandvnqv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32),
"$Vd32 = vand(!$Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32950,7 +33311,7 @@ def V6_vandvqv : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vu32),
"$Vd32 = vand($Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32963,7 +33324,7 @@ def V6_vandvqv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32),
"$Vd32 = vand($Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32977,7 +33338,7 @@ def V6_vandvrt : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Qd4 = vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_11498120, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_0f8bab, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -32989,7 +33350,7 @@ def V6_vandvrt_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Qd4 = vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_11498120, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_0f8bab, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -33002,7 +33363,7 @@ def V6_vandvrt_acc : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Qx4 |= vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_10612292, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_adf111, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33016,7 +33377,7 @@ def V6_vandvrt_acc_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Qx4 |= vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_10612292, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_adf111, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33081,7 +33442,7 @@ def V6_vaslh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasl($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -33093,7 +33454,7 @@ def V6_vaslh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasl($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -33129,7 +33490,7 @@ def V6_vaslhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vasl($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33141,7 +33502,7 @@ def V6_vaslhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vasl($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33177,7 +33538,7 @@ def V6_vaslw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33189,7 +33550,7 @@ def V6_vaslw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33202,7 +33563,7 @@ def V6_vaslw_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33216,7 +33577,7 @@ def V6_vaslw_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33281,7 +33642,7 @@ def V6_vaslwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vasl($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33293,7 +33654,7 @@ def V6_vaslwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vasl($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33329,7 +33690,7 @@ def V6_vasrh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasr($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33341,7 +33702,7 @@ def V6_vasrh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasr($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33377,7 +33738,7 @@ def V6_vasrhbrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -33389,7 +33750,7 @@ def V6_vasrhbrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -33402,7 +33763,7 @@ def V6_vasrhbrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhb($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33412,7 +33773,7 @@ def V6_vasrhbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33424,7 +33785,7 @@ def V6_vasrhbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33437,7 +33798,7 @@ def V6_vasrhubrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33449,7 +33810,7 @@ def V6_vasrhubrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33462,7 +33823,7 @@ def V6_vasrhubrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhub($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33472,7 +33833,7 @@ def V6_vasrhubsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33484,7 +33845,7 @@ def V6_vasrhubsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33497,7 +33858,7 @@ def V6_vasrhubsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhub($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33507,7 +33868,7 @@ def V6_vasrhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vasr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33519,7 +33880,7 @@ def V6_vasrhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vasr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33555,7 +33916,7 @@ def V6_vasruwuhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33567,7 +33928,7 @@ def V6_vasruwuhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33580,7 +33941,7 @@ def V6_vasrw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33592,7 +33953,7 @@ def V6_vasrw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33605,7 +33966,7 @@ def V6_vasrw_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33619,7 +33980,7 @@ def V6_vasrw_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33684,7 +34045,7 @@ def V6_vasrwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8)",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33696,7 +34057,7 @@ def V6_vasrwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8)",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33709,7 +34070,7 @@ def V6_vasrwh_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8)",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33719,7 +34080,7 @@ def V6_vasrwhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33731,7 +34092,7 @@ def V6_vasrwhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33744,7 +34105,7 @@ def V6_vasrwhrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33754,7 +34115,7 @@ def V6_vasrwhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33766,7 +34127,7 @@ def V6_vasrwhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33779,7 +34140,7 @@ def V6_vasrwhsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33789,7 +34150,7 @@ def V6_vasrwuhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33801,7 +34162,7 @@ def V6_vasrwuhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33814,7 +34175,7 @@ def V6_vasrwuhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33826,7 +34187,7 @@ def V6_vasrwuhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33839,7 +34200,7 @@ def V6_vasrwuhsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwuh($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33849,7 +34210,7 @@ def V6_vasrwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vasr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33861,7 +34222,7 @@ def V6_vasrwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vasr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33897,7 +34258,7 @@ def V6_vassign : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000011;
@@ -33909,7 +34270,7 @@ def V6_vassign_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000011;
@@ -33922,7 +34283,7 @@ def V6_vassignp : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32),
"$Vdd32 = $Vuu32",
-CVI_VA, TypeCVI_VA>, Requires<[HasV60T,UseHVX]> {
+CVI_VA, TypeCVI_VA_DV>, Requires<[HasV60T,UseHVX]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33932,7 +34293,7 @@ def V6_vassignp_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32),
"$Vdd32 = $Vuu32",
-CVI_VA, TypeCVI_VA>, Requires<[HasV60T,UseHVX]> {
+CVI_VA, TypeCVI_VA_DV>, Requires<[HasV60T,UseHVX]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33943,7 +34304,7 @@ def V6_vavgh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -33955,7 +34316,7 @@ def V6_vavgh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -33991,7 +34352,7 @@ def V6_vavghrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34003,7 +34364,7 @@ def V6_vavghrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34039,7 +34400,7 @@ def V6_vavgub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34051,7 +34412,7 @@ def V6_vavgub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34087,7 +34448,7 @@ def V6_vavgubrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34099,7 +34460,7 @@ def V6_vavgubrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34135,7 +34496,7 @@ def V6_vavguh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34147,7 +34508,7 @@ def V6_vavguh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34183,7 +34544,7 @@ def V6_vavguhrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34195,7 +34556,7 @@ def V6_vavguhrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34231,7 +34592,7 @@ def V6_vavgw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34243,7 +34604,7 @@ def V6_vavgw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34279,7 +34640,7 @@ def V6_vavgwrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34291,7 +34652,7 @@ def V6_vavgwrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34327,7 +34688,7 @@ def V6_vccombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"if ($Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010011;
@@ -34340,7 +34701,7 @@ def V6_vccombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"if ($Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010011;
@@ -34354,7 +34715,7 @@ def V6_vcl0h : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.uh = vcl0($Vu32.uh)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34366,7 +34727,7 @@ def V6_vcl0h_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.uh = vcl0($Vu32.uh)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34402,7 +34763,7 @@ def V6_vcl0w : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.uw = vcl0($Vu32.uw)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34414,7 +34775,7 @@ def V6_vcl0w_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.uw = vcl0($Vu32.uw)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34450,7 +34811,7 @@ def V6_vcmov : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32),
"if ($Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000000000;
@@ -34463,7 +34824,7 @@ def V6_vcmov_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32),
"if ($Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000000000;
@@ -34477,7 +34838,7 @@ def V6_vcombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -34490,7 +34851,7 @@ def V6_vcombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -34527,7 +34888,7 @@ def V6_vdeal : HInst<
(outs VectorRegs:$Vy32, VectorRegs:$Vx32),
(ins VectorRegs:$Vy32in, VectorRegs:$Vx32in, IntRegs:$Rt32),
"vdeal($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -34542,7 +34903,7 @@ def V6_vdeal_128B : HInst<
(outs VectorRegs128B:$Vy32, VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vy32in, VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"vdeal($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -34558,7 +34919,7 @@ def V6_vdealb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.b = vdeal($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34570,7 +34931,7 @@ def V6_vdealb4w : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vdeale($Vu32.b,$Vv32.b)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34582,7 +34943,7 @@ def V6_vdealb4w_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vdeale($Vu32.b,$Vv32.b)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34618,7 +34979,7 @@ def V6_vdealb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.b = vdeal($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34654,7 +35015,7 @@ def V6_vdealh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vdeal($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34666,7 +35027,7 @@ def V6_vdealh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vdeal($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34702,7 +35063,7 @@ def V6_vdealvdd : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vdeal($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -34714,7 +35075,7 @@ def V6_vdealvdd_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vdeal($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -34727,7 +35088,7 @@ def V6_vdelta : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34739,7 +35100,7 @@ def V6_vdelta_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34752,7 +35113,7 @@ def V6_vdmpybus : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34764,7 +35125,7 @@ def V6_vdmpybus_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34777,7 +35138,7 @@ def V6_vdmpybus_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34791,7 +35152,7 @@ def V6_vdmpybus_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34856,7 +35217,7 @@ def V6_vdmpybus_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34868,7 +35229,7 @@ def V6_vdmpybus_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34881,7 +35242,7 @@ def V6_vdmpybus_dv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34895,7 +35256,7 @@ def V6_vdmpybus_dv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34960,7 +35321,7 @@ def V6_vdmpyhb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34972,7 +35333,7 @@ def V6_vdmpyhb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34985,7 +35346,7 @@ def V6_vdmpyhb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34999,7 +35360,7 @@ def V6_vdmpyhb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -35064,7 +35425,7 @@ def V6_vdmpyhb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35076,7 +35437,7 @@ def V6_vdmpyhb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35089,7 +35450,7 @@ def V6_vdmpyhb_dv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35103,7 +35464,7 @@ def V6_vdmpyhb_dv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35168,7 +35529,7 @@ def V6_vdmpyhisat : HInst<
(outs VectorRegs:$Vd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35180,7 +35541,7 @@ def V6_vdmpyhisat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35193,7 +35554,7 @@ def V6_vdmpyhisat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35207,7 +35568,7 @@ def V6_vdmpyhisat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35272,7 +35633,7 @@ def V6_vdmpyhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35284,7 +35645,7 @@ def V6_vdmpyhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35297,7 +35658,7 @@ def V6_vdmpyhsat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35311,7 +35672,7 @@ def V6_vdmpyhsat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35376,7 +35737,7 @@ def V6_vdmpyhsuisat : HInst<
(outs VectorRegs:$Vd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35388,7 +35749,7 @@ def V6_vdmpyhsuisat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35401,7 +35762,7 @@ def V6_vdmpyhsuisat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35415,7 +35776,7 @@ def V6_vdmpyhsuisat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35480,7 +35841,7 @@ def V6_vdmpyhsusat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35492,7 +35853,7 @@ def V6_vdmpyhsusat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35505,7 +35866,7 @@ def V6_vdmpyhsusat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35519,7 +35880,7 @@ def V6_vdmpyhsusat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35584,7 +35945,7 @@ def V6_vdmpyhvsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -35596,7 +35957,7 @@ def V6_vdmpyhvsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -35609,7 +35970,7 @@ def V6_vdmpyhvsat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -35623,7 +35984,7 @@ def V6_vdmpyhvsat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -35688,7 +36049,7 @@ def V6_vdsaduh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.uw = vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -35700,7 +36061,7 @@ def V6_vdsaduh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.uw = vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -35713,7 +36074,7 @@ def V6_vdsaduh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.uw += vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -35727,7 +36088,7 @@ def V6_vdsaduh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.uw += vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -35792,7 +36153,7 @@ def V6_veqb : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35804,7 +36165,7 @@ def V6_veqb_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35817,7 +36178,7 @@ def V6_veqb_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35830,7 +36191,7 @@ def V6_veqb_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35844,7 +36205,7 @@ def V6_veqb_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35858,7 +36219,7 @@ def V6_veqb_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35873,7 +36234,7 @@ def V6_veqb_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35886,7 +36247,7 @@ def V6_veqb_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35900,7 +36261,7 @@ def V6_veqh : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35912,7 +36273,7 @@ def V6_veqh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35925,7 +36286,7 @@ def V6_veqh_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35938,7 +36299,7 @@ def V6_veqh_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35952,7 +36313,7 @@ def V6_veqh_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35966,7 +36327,7 @@ def V6_veqh_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35981,7 +36342,7 @@ def V6_veqh_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35994,7 +36355,7 @@ def V6_veqh_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36008,7 +36369,7 @@ def V6_veqw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36020,7 +36381,7 @@ def V6_veqw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36033,7 +36394,7 @@ def V6_veqw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36046,7 +36407,7 @@ def V6_veqw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36060,7 +36421,7 @@ def V6_veqw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36074,7 +36435,7 @@ def V6_veqw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36089,7 +36450,7 @@ def V6_veqw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36102,7 +36463,7 @@ def V6_veqw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36116,7 +36477,7 @@ def V6_vgtb : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36128,7 +36489,7 @@ def V6_vgtb_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36141,7 +36502,7 @@ def V6_vgtb_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36154,7 +36515,7 @@ def V6_vgtb_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36168,7 +36529,7 @@ def V6_vgtb_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36182,7 +36543,7 @@ def V6_vgtb_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36197,7 +36558,7 @@ def V6_vgtb_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36210,7 +36571,7 @@ def V6_vgtb_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36224,7 +36585,7 @@ def V6_vgth : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36236,7 +36597,7 @@ def V6_vgth_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36249,7 +36610,7 @@ def V6_vgth_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36262,7 +36623,7 @@ def V6_vgth_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36276,7 +36637,7 @@ def V6_vgth_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36290,7 +36651,7 @@ def V6_vgth_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36305,7 +36666,7 @@ def V6_vgth_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36318,7 +36679,7 @@ def V6_vgth_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36332,7 +36693,7 @@ def V6_vgtub : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36344,7 +36705,7 @@ def V6_vgtub_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36357,7 +36718,7 @@ def V6_vgtub_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36370,7 +36731,7 @@ def V6_vgtub_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36384,7 +36745,7 @@ def V6_vgtub_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36398,7 +36759,7 @@ def V6_vgtub_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36413,7 +36774,7 @@ def V6_vgtub_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36426,7 +36787,7 @@ def V6_vgtub_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36440,7 +36801,7 @@ def V6_vgtuh : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36452,7 +36813,7 @@ def V6_vgtuh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36465,7 +36826,7 @@ def V6_vgtuh_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36478,7 +36839,7 @@ def V6_vgtuh_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36492,7 +36853,7 @@ def V6_vgtuh_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36506,7 +36867,7 @@ def V6_vgtuh_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36521,7 +36882,7 @@ def V6_vgtuh_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36534,7 +36895,7 @@ def V6_vgtuh_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36548,7 +36909,7 @@ def V6_vgtuw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36560,7 +36921,7 @@ def V6_vgtuw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36573,7 +36934,7 @@ def V6_vgtuw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36586,7 +36947,7 @@ def V6_vgtuw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36600,7 +36961,7 @@ def V6_vgtuw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36614,7 +36975,7 @@ def V6_vgtuw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36629,7 +36990,7 @@ def V6_vgtuw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36642,7 +37003,7 @@ def V6_vgtuw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36656,7 +37017,7 @@ def V6_vgtw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36668,7 +37029,7 @@ def V6_vgtw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36681,7 +37042,7 @@ def V6_vgtw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36694,7 +37055,7 @@ def V6_vgtw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36708,7 +37069,7 @@ def V6_vgtw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36722,7 +37083,7 @@ def V6_vgtw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36737,7 +37098,7 @@ def V6_vgtw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36750,7 +37111,7 @@ def V6_vgtw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36764,7 +37125,7 @@ def V6_vhist : HInst<
(outs),
(ins),
"vhist",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV60T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -36773,7 +37134,7 @@ def V6_vhist_128B : HInst<
(outs),
(ins),
"vhist",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV60T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -36783,7 +37144,7 @@ def V6_vhistq : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vhist($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV60T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -36793,7 +37154,7 @@ def V6_vhistq_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vhist($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV60T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -36804,7 +37165,7 @@ def V6_vinsertwr : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, IntRegs:$Rt32),
"$Vx32.w = vinsert($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_313333, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_569cfe, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b100000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -36816,7 +37177,7 @@ def V6_vinsertwr_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"$Vx32.w = vinsert($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_313333, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_569cfe, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b100000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -36829,7 +37190,7 @@ def V6_vlalignb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vlalign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -36841,7 +37202,7 @@ def V6_vlalignb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vlalign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -36854,7 +37215,7 @@ def V6_vlalignbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32 = vlalign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -36865,7 +37226,7 @@ def V6_vlalignbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32 = vlalign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -36877,7 +37238,7 @@ def V6_vlsrb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.ub = vlsr($Vu32.ub,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36889,7 +37250,7 @@ def V6_vlsrb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.ub = vlsr($Vu32.ub,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36902,7 +37263,7 @@ def V6_vlsrh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uh = vlsr($Vu32.uh,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36914,7 +37275,7 @@ def V6_vlsrh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uh = vlsr($Vu32.uh,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36950,7 +37311,7 @@ def V6_vlsrhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vlsr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -36962,7 +37323,7 @@ def V6_vlsrhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vlsr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -36998,7 +37359,7 @@ def V6_vlsrw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vlsr($Vu32.uw,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -37010,7 +37371,7 @@ def V6_vlsrw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vlsr($Vu32.uw,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -37046,7 +37407,7 @@ def V6_vlsrwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vlsr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -37058,7 +37419,7 @@ def V6_vlsrwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vlsr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -37094,7 +37455,7 @@ def V6_vlutvvb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37106,7 +37467,7 @@ def V6_vlutvvb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37119,7 +37480,7 @@ def V6_vlutvvb_nm : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37131,7 +37492,7 @@ def V6_vlutvvb_nm_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37144,7 +37505,7 @@ def V6_vlutvvb_oracc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8877260, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_245865, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37158,7 +37519,7 @@ def V6_vlutvvb_oracc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8877260, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_245865, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37173,7 +37534,7 @@ def V6_vlutvvb_oracci : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_cd4705, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100110;
let hasNewValue = 1;
@@ -37186,7 +37547,7 @@ def V6_vlutvvb_oracci_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_cd4705, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100110;
let hasNewValue = 1;
@@ -37200,7 +37561,7 @@ def V6_vlutvvbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -37211,7 +37572,7 @@ def V6_vlutvvbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -37223,7 +37584,7 @@ def V6_vlutvwh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37235,7 +37596,7 @@ def V6_vlutvwh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37248,7 +37609,7 @@ def V6_vlutvwh_nm : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37260,7 +37621,7 @@ def V6_vlutvwh_nm_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37273,7 +37634,7 @@ def V6_vlutvwh_oracc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_16213761, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_7b523d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37287,7 +37648,7 @@ def V6_vlutvwh_oracc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_16213761, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_7b523d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37302,7 +37663,7 @@ def V6_vlutvwh_oracci : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_1178da, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100111;
let hasNewValue = 1;
@@ -37315,7 +37676,7 @@ def V6_vlutvwh_oracci_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_1178da, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100111;
let hasNewValue = 1;
@@ -37329,7 +37690,7 @@ def V6_vlutvwhi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_4b39e4, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -37340,7 +37701,7 @@ def V6_vlutvwhi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_4b39e4, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -37352,7 +37713,7 @@ def V6_vmaxb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vmax($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37364,7 +37725,7 @@ def V6_vmaxb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vmax($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37400,7 +37761,7 @@ def V6_vmaxh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmax($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37412,7 +37773,7 @@ def V6_vmaxh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmax($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37448,7 +37809,7 @@ def V6_vmaxub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vmax($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37460,7 +37821,7 @@ def V6_vmaxub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vmax($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37496,7 +37857,7 @@ def V6_vmaxuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vmax($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37508,7 +37869,7 @@ def V6_vmaxuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vmax($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37544,7 +37905,7 @@ def V6_vmaxw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmax($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37556,7 +37917,7 @@ def V6_vmaxw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmax($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37592,7 +37953,7 @@ def V6_vminb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vmin($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37604,7 +37965,7 @@ def V6_vminb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vmin($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37640,7 +38001,7 @@ def V6_vminh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmin($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37652,7 +38013,7 @@ def V6_vminh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmin($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37688,7 +38049,7 @@ def V6_vminub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vmin($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37700,7 +38061,7 @@ def V6_vminub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vmin($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37736,7 +38097,7 @@ def V6_vminuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vmin($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37748,7 +38109,7 @@ def V6_vminuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vmin($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37784,7 +38145,7 @@ def V6_vminw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmin($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37796,7 +38157,7 @@ def V6_vminw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmin($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37832,7 +38193,7 @@ def V6_vmpabus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -37844,7 +38205,7 @@ def V6_vmpabus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -37857,7 +38218,7 @@ def V6_vmpabus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -37871,7 +38232,7 @@ def V6_vmpabus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -37936,7 +38297,7 @@ def V6_vmpabusv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -37948,7 +38309,7 @@ def V6_vmpabusv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -37984,7 +38345,7 @@ def V6_vmpabuuv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -37996,7 +38357,7 @@ def V6_vmpabuuv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -38032,7 +38393,7 @@ def V6_vmpahb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38044,7 +38405,7 @@ def V6_vmpahb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38057,7 +38418,7 @@ def V6_vmpahb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38071,7 +38432,7 @@ def V6_vmpahb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38136,7 +38497,7 @@ def V6_vmpauhb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -38148,7 +38509,7 @@ def V6_vmpauhb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -38161,7 +38522,7 @@ def V6_vmpauhb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -38175,7 +38536,7 @@ def V6_vmpauhb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -38240,7 +38601,7 @@ def V6_vmpybus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.h = vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38252,7 +38613,7 @@ def V6_vmpybus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.h = vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38265,7 +38626,7 @@ def V6_vmpybus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.h += vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38279,7 +38640,7 @@ def V6_vmpybus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.h += vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38344,7 +38705,7 @@ def V6_vmpybusv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38356,7 +38717,7 @@ def V6_vmpybusv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38369,7 +38730,7 @@ def V6_vmpybusv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38383,7 +38744,7 @@ def V6_vmpybusv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38448,7 +38809,7 @@ def V6_vmpybv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38460,7 +38821,7 @@ def V6_vmpybv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38473,7 +38834,7 @@ def V6_vmpybv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38487,7 +38848,7 @@ def V6_vmpybv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38552,7 +38913,7 @@ def V6_vmpyewuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -38564,7 +38925,7 @@ def V6_vmpyewuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -38577,7 +38938,7 @@ def V6_vmpyewuh_64 : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -38589,7 +38950,7 @@ def V6_vmpyewuh_64_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -38625,7 +38986,7 @@ def V6_vmpyh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.w = vmpy($Vu32.h,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38637,7 +38998,7 @@ def V6_vmpyh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.w = vmpy($Vu32.h,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38673,7 +39034,7 @@ def V6_vmpyhsat_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.w += vmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -38687,7 +39048,7 @@ def V6_vmpyhsat_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.w += vmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -38729,7 +39090,7 @@ def V6_vmpyhsrs : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38741,7 +39102,7 @@ def V6_vmpyhsrs_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38777,7 +39138,7 @@ def V6_vmpyhss : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38789,7 +39150,7 @@ def V6_vmpyhss_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38825,7 +39186,7 @@ def V6_vmpyhus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -38837,7 +39198,7 @@ def V6_vmpyhus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -38850,7 +39211,7 @@ def V6_vmpyhus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -38864,7 +39225,7 @@ def V6_vmpyhus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -38929,7 +39290,7 @@ def V6_vmpyhv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38941,7 +39302,7 @@ def V6_vmpyhv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38954,7 +39315,7 @@ def V6_vmpyhv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38968,7 +39329,7 @@ def V6_vmpyhv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -39033,7 +39394,7 @@ def V6_vmpyhvsrs : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmpy($Vu32.h,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39045,7 +39406,7 @@ def V6_vmpyhvsrs_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmpy($Vu32.h,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39081,7 +39442,7 @@ def V6_vmpyieoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyieo($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -39093,7 +39454,7 @@ def V6_vmpyieoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyieo($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -39106,7 +39467,7 @@ def V6_vmpyiewh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -39120,7 +39481,7 @@ def V6_vmpyiewh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -39162,7 +39523,7 @@ def V6_vmpyiewuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39174,7 +39535,7 @@ def V6_vmpyiewuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39187,7 +39548,7 @@ def V6_vmpyiewuh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39201,7 +39562,7 @@ def V6_vmpyiewuh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39266,7 +39627,7 @@ def V6_vmpyih : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39278,7 +39639,7 @@ def V6_vmpyih_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39291,7 +39652,7 @@ def V6_vmpyih_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.h += vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39305,7 +39666,7 @@ def V6_vmpyih_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.h += vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39370,7 +39731,7 @@ def V6_vmpyihb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -39382,7 +39743,7 @@ def V6_vmpyihb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -39395,7 +39756,7 @@ def V6_vmpyihb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -39409,7 +39770,7 @@ def V6_vmpyihb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -39474,7 +39835,7 @@ def V6_vmpyiowh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyio($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39486,7 +39847,7 @@ def V6_vmpyiowh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyio($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39522,7 +39883,7 @@ def V6_vmpyiwb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -39534,7 +39895,7 @@ def V6_vmpyiwb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -39547,7 +39908,7 @@ def V6_vmpyiwb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39561,7 +39922,7 @@ def V6_vmpyiwb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39626,7 +39987,7 @@ def V6_vmpyiwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39638,7 +39999,7 @@ def V6_vmpyiwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39651,7 +40012,7 @@ def V6_vmpyiwh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39665,7 +40026,7 @@ def V6_vmpyiwh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39730,7 +40091,7 @@ def V6_vmpyiwub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39742,7 +40103,7 @@ def V6_vmpyiwub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39755,7 +40116,7 @@ def V6_vmpyiwub_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -39769,7 +40130,7 @@ def V6_vmpyiwub_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -39834,7 +40195,7 @@ def V6_vmpyowh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -39846,7 +40207,7 @@ def V6_vmpyowh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -39859,7 +40220,7 @@ def V6_vmpyowh_64_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39873,7 +40234,7 @@ def V6_vmpyowh_64_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39911,7 +40272,7 @@ def V6_vmpyowh_rnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -39923,7 +40284,7 @@ def V6_vmpyowh_rnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -39959,7 +40320,7 @@ def V6_vmpyowh_rnd_sacc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39973,7 +40334,7 @@ def V6_vmpyowh_rnd_sacc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40013,7 +40374,7 @@ def V6_vmpyowh_sacc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40027,7 +40388,7 @@ def V6_vmpyowh_sacc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40067,7 +40428,7 @@ def V6_vmpyub : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.uh = vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001110;
@@ -40079,7 +40440,7 @@ def V6_vmpyub_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.uh = vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001110;
@@ -40092,7 +40453,7 @@ def V6_vmpyub_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.uh += vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -40106,7 +40467,7 @@ def V6_vmpyub_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.uh += vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -40171,7 +40532,7 @@ def V6_vmpyubv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.uh = vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -40183,7 +40544,7 @@ def V6_vmpyubv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.uh = vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -40196,7 +40557,7 @@ def V6_vmpyubv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.uh += vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -40210,7 +40571,7 @@ def V6_vmpyubv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.uh += vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -40275,7 +40636,7 @@ def V6_vmpyuh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.uw = vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -40287,7 +40648,7 @@ def V6_vmpyuh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.uw = vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -40300,7 +40661,7 @@ def V6_vmpyuh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.uw += vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -40314,7 +40675,7 @@ def V6_vmpyuh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.uw += vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -40379,7 +40740,7 @@ def V6_vmpyuhv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.uw = vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40391,7 +40752,7 @@ def V6_vmpyuhv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.uw = vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40404,7 +40765,7 @@ def V6_vmpyuhv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.uw += vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40418,7 +40779,7 @@ def V6_vmpyuhv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.uw += vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40483,7 +40844,7 @@ def V6_vmux : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qt4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vmux($Qt4,$Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_1572239, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_31db33, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110111;
@@ -40495,7 +40856,7 @@ def V6_vmux_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qt4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vmux($Qt4,$Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_1572239, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_31db33, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110111;
@@ -40508,7 +40869,7 @@ def V6_vnavgh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vnavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40520,7 +40881,7 @@ def V6_vnavgh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vnavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40556,7 +40917,7 @@ def V6_vnavgub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vnavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40568,7 +40929,7 @@ def V6_vnavgub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vnavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40604,7 +40965,7 @@ def V6_vnavgw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vnavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40616,7 +40977,7 @@ def V6_vnavgw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vnavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40652,7 +41013,7 @@ def V6_vnccombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"if (!$Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010010;
@@ -40666,7 +41027,7 @@ def V6_vnccombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"if (!$Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010010;
@@ -40681,7 +41042,7 @@ def V6_vncmov : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32),
"if (!$Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000100000;
@@ -40695,7 +41056,7 @@ def V6_vncmov_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32),
"if (!$Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000100000;
@@ -40710,7 +41071,7 @@ def V6_vnormamth : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vnormamt($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40722,7 +41083,7 @@ def V6_vnormamth_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vnormamt($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40758,7 +41119,7 @@ def V6_vnormamtw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vnormamt($Vu32.w)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40770,7 +41131,7 @@ def V6_vnormamtw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vnormamt($Vu32.w)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40806,7 +41167,7 @@ def V6_vnot : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32 = vnot($Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -40818,7 +41179,7 @@ def V6_vnot_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32 = vnot($Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -40831,7 +41192,7 @@ def V6_vor : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40843,7 +41204,7 @@ def V6_vor_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40856,7 +41217,7 @@ def V6_vpackeb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpacke($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40868,7 +41229,7 @@ def V6_vpackeb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpacke($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40904,7 +41265,7 @@ def V6_vpackeh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpacke($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40916,7 +41277,7 @@ def V6_vpackeh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpacke($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40952,7 +41313,7 @@ def V6_vpackhb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40964,7 +41325,7 @@ def V6_vpackhb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41000,7 +41361,7 @@ def V6_vpackhub_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41012,7 +41373,7 @@ def V6_vpackhub_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41048,7 +41409,7 @@ def V6_vpackob : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpacko($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41060,7 +41421,7 @@ def V6_vpackob_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpacko($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41096,7 +41457,7 @@ def V6_vpackoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpacko($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41108,7 +41469,7 @@ def V6_vpackoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpacko($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41144,7 +41505,7 @@ def V6_vpackwh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41156,7 +41517,7 @@ def V6_vpackwh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41192,7 +41553,7 @@ def V6_vpackwuh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41204,7 +41565,7 @@ def V6_vpackwuh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41240,7 +41601,7 @@ def V6_vpopcounth : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vpopcount($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -41252,7 +41613,7 @@ def V6_vpopcounth_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vpopcount($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -41288,7 +41649,7 @@ def V6_vrdelta : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vrdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -41300,7 +41661,7 @@ def V6_vrdelta_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vrdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -41313,7 +41674,7 @@ def V6_vrmpybus : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41325,7 +41686,7 @@ def V6_vrmpybus_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41338,7 +41699,7 @@ def V6_vrmpybus_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41352,7 +41713,7 @@ def V6_vrmpybus_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41417,7 +41778,7 @@ def V6_vrmpybusi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.w = vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -41429,7 +41790,7 @@ def V6_vrmpybusi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.w = vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -41442,7 +41803,7 @@ def V6_vrmpybusi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.w += vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -41456,7 +41817,7 @@ def V6_vrmpybusi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.w += vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -41521,7 +41882,7 @@ def V6_vrmpybusv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41533,7 +41894,7 @@ def V6_vrmpybusv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41546,7 +41907,7 @@ def V6_vrmpybusv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41560,7 +41921,7 @@ def V6_vrmpybusv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41625,7 +41986,7 @@ def V6_vrmpybv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41637,7 +41998,7 @@ def V6_vrmpybv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41650,7 +42011,7 @@ def V6_vrmpybv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41664,7 +42025,7 @@ def V6_vrmpybv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41729,7 +42090,7 @@ def V6_vrmpyub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41741,7 +42102,7 @@ def V6_vrmpyub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41754,7 +42115,7 @@ def V6_vrmpyub_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.uw += vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41768,7 +42129,7 @@ def V6_vrmpyub_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.uw += vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41833,7 +42194,7 @@ def V6_vrmpyubi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -41845,7 +42206,7 @@ def V6_vrmpyubi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -41858,7 +42219,7 @@ def V6_vrmpyubi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -41872,7 +42233,7 @@ def V6_vrmpyubi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -41937,7 +42298,7 @@ def V6_vrmpyubv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41949,7 +42310,7 @@ def V6_vrmpyubv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41962,7 +42323,7 @@ def V6_vrmpyubv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.uw += vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41976,7 +42337,7 @@ def V6_vrmpyubv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.uw += vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -42041,7 +42402,7 @@ def V6_vror : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32 = vror($Vu32,$Rt32)",
-CVI_VP, TypeCVI_VP>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_bf142ae2, TypeCVI_VP>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -42053,7 +42414,7 @@ def V6_vror_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32 = vror($Vu32,$Rt32)",
-CVI_VP, TypeCVI_VP>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_bf142ae2, TypeCVI_VP>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -42066,7 +42427,7 @@ def V6_vroundhb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42078,7 +42439,7 @@ def V6_vroundhb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42114,7 +42475,7 @@ def V6_vroundhub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42126,7 +42487,7 @@ def V6_vroundhub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42162,7 +42523,7 @@ def V6_vrounduhub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42174,7 +42535,7 @@ def V6_vrounduhub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42210,7 +42571,7 @@ def V6_vrounduwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42222,7 +42583,7 @@ def V6_vrounduwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42258,7 +42619,7 @@ def V6_vroundwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42270,7 +42631,7 @@ def V6_vroundwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42306,7 +42667,7 @@ def V6_vroundwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42318,7 +42679,7 @@ def V6_vroundwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42354,7 +42715,7 @@ def V6_vrsadubi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -42366,7 +42727,7 @@ def V6_vrsadubi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -42379,7 +42740,7 @@ def V6_vrsadubi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -42393,7 +42754,7 @@ def V6_vrsadubi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -42458,7 +42819,7 @@ def V6_vsathub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsat($Vu32.h,$Vv32.h)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42470,7 +42831,7 @@ def V6_vsathub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsat($Vu32.h,$Vv32.h)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42506,7 +42867,7 @@ def V6_vsatuwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -42518,7 +42879,7 @@ def V6_vsatuwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -42554,7 +42915,7 @@ def V6_vsatwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsat($Vu32.w,$Vv32.w)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42566,7 +42927,7 @@ def V6_vsatwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsat($Vu32.w,$Vv32.w)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42602,7 +42963,7 @@ def V6_vsb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.h = vsxt($Vu32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42614,7 +42975,7 @@ def V6_vsb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.h = vsxt($Vu32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42650,7 +43011,7 @@ def V6_vsh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.w = vsxt($Vu32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42662,7 +43023,7 @@ def V6_vsh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.w = vsxt($Vu32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42698,7 +43059,7 @@ def V6_vshufeh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vshuffe($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42710,7 +43071,7 @@ def V6_vshufeh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vshuffe($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42746,7 +43107,7 @@ def V6_vshuff : HInst<
(outs VectorRegs:$Vy32, VectorRegs:$Vx32),
(ins VectorRegs:$Vy32in, VectorRegs:$Vx32in, IntRegs:$Rt32),
"vshuff($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -42761,7 +43122,7 @@ def V6_vshuff_128B : HInst<
(outs VectorRegs128B:$Vy32, VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vy32in, VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"vshuff($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -42777,7 +43138,7 @@ def V6_vshuffb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.b = vshuff($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42789,7 +43150,7 @@ def V6_vshuffb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.b = vshuff($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42825,7 +43186,7 @@ def V6_vshuffeb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vshuffe($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42837,7 +43198,7 @@ def V6_vshuffeb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vshuffe($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42873,7 +43234,7 @@ def V6_vshuffh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vshuff($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -42885,7 +43246,7 @@ def V6_vshuffh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vshuff($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -42921,7 +43282,7 @@ def V6_vshuffob : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vshuffo($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42933,7 +43294,7 @@ def V6_vshuffob_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vshuffo($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42969,7 +43330,7 @@ def V6_vshuffvdd : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vshuff($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -42981,7 +43342,7 @@ def V6_vshuffvdd_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vshuff($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -42994,7 +43355,7 @@ def V6_vshufoeb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.b = vshuffoe($Vu32.b,$Vv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43006,7 +43367,7 @@ def V6_vshufoeb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.b = vshuffoe($Vu32.b,$Vv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43042,7 +43403,7 @@ def V6_vshufoeh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vshuffoe($Vu32.h,$Vv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43054,7 +43415,7 @@ def V6_vshufoeh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vshuffoe($Vu32.h,$Vv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43090,7 +43451,7 @@ def V6_vshufoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vshuffo($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43102,7 +43463,7 @@ def V6_vshufoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vshuffo($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43138,7 +43499,7 @@ def V6_vsubb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43150,7 +43511,7 @@ def V6_vsubb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43186,7 +43547,7 @@ def V6_vsubb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43198,7 +43559,7 @@ def V6_vsubb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43234,7 +43595,7 @@ def V6_vsubbnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43248,7 +43609,7 @@ def V6_vsubbnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43288,7 +43649,7 @@ def V6_vsubbq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43302,7 +43663,7 @@ def V6_vsubbq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43342,7 +43703,7 @@ def V6_vsubbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -43354,7 +43715,7 @@ def V6_vsubbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -43390,7 +43751,7 @@ def V6_vsubbsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43402,7 +43763,7 @@ def V6_vsubbsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43438,7 +43799,7 @@ def V6_vsubcarry : HInst<
(outs VectorRegs:$Vd32, VecPredRegs:$Qx4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in),
"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -43453,7 +43814,7 @@ def V6_vsubcarry_128B : HInst<
(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in),
"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -43469,7 +43830,7 @@ def V6_vsubh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43481,7 +43842,7 @@ def V6_vsubh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43517,7 +43878,7 @@ def V6_vsubh_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43529,7 +43890,7 @@ def V6_vsubh_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43565,7 +43926,7 @@ def V6_vsubhnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43579,7 +43940,7 @@ def V6_vsubhnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43619,7 +43980,7 @@ def V6_vsubhq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43633,7 +43994,7 @@ def V6_vsubhq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43673,7 +44034,7 @@ def V6_vsubhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43685,7 +44046,7 @@ def V6_vsubhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43721,7 +44082,7 @@ def V6_vsubhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43733,7 +44094,7 @@ def V6_vsubhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43769,7 +44130,7 @@ def V6_vsubhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vsub($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43781,7 +44142,7 @@ def V6_vsubhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vsub($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43817,7 +44178,7 @@ def V6_vsububh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vsub($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43829,7 +44190,7 @@ def V6_vsububh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vsub($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43865,7 +44226,7 @@ def V6_vsububsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43877,7 +44238,7 @@ def V6_vsububsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43913,7 +44274,7 @@ def V6_vsububsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.ub = vsub($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43925,7 +44286,7 @@ def V6_vsububsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.ub = vsub($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43961,7 +44322,7 @@ def V6_vsubububb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43973,7 +44334,7 @@ def V6_vsubububb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43986,7 +44347,7 @@ def V6_vsubuhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vsub($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43998,7 +44359,7 @@ def V6_vsubuhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vsub($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44034,7 +44395,7 @@ def V6_vsubuhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uh = vsub($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44046,7 +44407,7 @@ def V6_vsubuhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uh = vsub($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44082,7 +44443,7 @@ def V6_vsubuhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vsub($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44094,7 +44455,7 @@ def V6_vsubuhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vsub($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44130,7 +44491,7 @@ def V6_vsubuwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -44142,7 +44503,7 @@ def V6_vsubuwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -44178,7 +44539,7 @@ def V6_vsubuwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -44190,7 +44551,7 @@ def V6_vsubuwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -44226,7 +44587,7 @@ def V6_vsubw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -44238,7 +44599,7 @@ def V6_vsubw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -44274,7 +44635,7 @@ def V6_vsubw_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44286,7 +44647,7 @@ def V6_vsubw_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44322,7 +44683,7 @@ def V6_vsubwnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44336,7 +44697,7 @@ def V6_vsubwnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44376,7 +44737,7 @@ def V6_vsubwq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44390,7 +44751,7 @@ def V6_vsubwq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44430,7 +44791,7 @@ def V6_vsubwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44442,7 +44803,7 @@ def V6_vsubwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44478,7 +44839,7 @@ def V6_vsubwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44490,7 +44851,7 @@ def V6_vsubwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44526,7 +44887,7 @@ def V6_vswap : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecPredRegs:$Qt4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vswap($Qt4,$Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_11424254, Requires<[HasV60T,UseHVX]> {
+tc_316c637c, TypeCVI_VA_DV>, Enc_3dac0b, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110101;
@@ -44538,7 +44899,7 @@ def V6_vswap_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecPredRegs128B:$Qt4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vswap($Qt4,$Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_11424254, Requires<[HasV60T,UseHVX]> {
+tc_316c637c, TypeCVI_VA_DV>, Enc_3dac0b, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110101;
@@ -44551,7 +44912,7 @@ def V6_vtmpyb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44563,7 +44924,7 @@ def V6_vtmpyb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44576,7 +44937,7 @@ def V6_vtmpyb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44590,7 +44951,7 @@ def V6_vtmpyb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44655,7 +45016,7 @@ def V6_vtmpybus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44667,7 +45028,7 @@ def V6_vtmpybus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44680,7 +45041,7 @@ def V6_vtmpybus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44694,7 +45055,7 @@ def V6_vtmpybus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44759,7 +45120,7 @@ def V6_vtmpyhb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -44771,7 +45132,7 @@ def V6_vtmpyhb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -44784,7 +45145,7 @@ def V6_vtmpyhb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44798,7 +45159,7 @@ def V6_vtmpyhb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44892,7 +45253,7 @@ def V6_vunpackb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.h = vunpack($Vu32.b)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44904,7 +45265,7 @@ def V6_vunpackb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.h = vunpack($Vu32.b)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44940,7 +45301,7 @@ def V6_vunpackh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.w = vunpack($Vu32.h)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44952,7 +45313,7 @@ def V6_vunpackh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.w = vunpack($Vu32.h)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44988,7 +45349,7 @@ def V6_vunpackob : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32),
"$Vxx32.h |= vunpacko($Vu32.b)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45002,7 +45363,7 @@ def V6_vunpackob_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32),
"$Vxx32.h |= vunpacko($Vu32.b)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45042,7 +45403,7 @@ def V6_vunpackoh : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32),
"$Vxx32.w |= vunpacko($Vu32.h)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45056,7 +45417,7 @@ def V6_vunpackoh_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32),
"$Vxx32.w |= vunpacko($Vu32.h)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45098,7 +45459,7 @@ def V6_vunpackub : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uh = vunpack($Vu32.ub)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45110,7 +45471,7 @@ def V6_vunpackub_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uh = vunpack($Vu32.ub)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45146,7 +45507,7 @@ def V6_vunpackuh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uw = vunpack($Vu32.uh)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45158,7 +45519,7 @@ def V6_vunpackuh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uw = vunpack($Vu32.uh)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45194,7 +45555,7 @@ def V6_vwhist128 : HInst<
(outs),
(ins),
"vwhist128",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45203,7 +45564,7 @@ def V6_vwhist128_128B : HInst<
(outs),
(ins),
"vwhist128",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45213,7 +45574,7 @@ def V6_vwhist128m : HInst<
(outs),
(ins u1_0Imm:$Ii),
"vwhist128(#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> {
+tc_b77635b4, TypeCVI_HIST>, Enc_efaed8, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{31-16} = 0b0001111000000000;
@@ -45223,7 +45584,7 @@ def V6_vwhist128m_128B : HInst<
(outs),
(ins u1_0Imm:$Ii),
"vwhist128(#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> {
+tc_b77635b4, TypeCVI_HIST>, Enc_efaed8, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{31-16} = 0b0001111000000000;
@@ -45234,7 +45595,7 @@ def V6_vwhist128q : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist128($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45244,7 +45605,7 @@ def V6_vwhist128q_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist128($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45255,7 +45616,7 @@ def V6_vwhist128qm : HInst<
(outs),
(ins VecPredRegs:$Qv4, u1_0Imm:$Ii),
"vwhist128($Qv4,#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> {
+tc_28978789, TypeCVI_HIST>, Enc_802dc0, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{21-16} = 0b000010;
@@ -45266,7 +45627,7 @@ def V6_vwhist128qm_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, u1_0Imm:$Ii),
"vwhist128($Qv4,#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> {
+tc_28978789, TypeCVI_HIST>, Enc_802dc0, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{21-16} = 0b000010;
@@ -45278,7 +45639,7 @@ def V6_vwhist256 : HInst<
(outs),
(ins),
"vwhist256",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45287,7 +45648,7 @@ def V6_vwhist256_128B : HInst<
(outs),
(ins),
"vwhist256",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45297,7 +45658,7 @@ def V6_vwhist256_sat : HInst<
(outs),
(ins),
"vwhist256:sat",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45306,7 +45667,7 @@ def V6_vwhist256_sat_128B : HInst<
(outs),
(ins),
"vwhist256:sat",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45316,7 +45677,7 @@ def V6_vwhist256q : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist256($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45326,7 +45687,7 @@ def V6_vwhist256q_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist256($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45337,7 +45698,7 @@ def V6_vwhist256q_sat : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist256($Qv4):sat",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45347,7 +45708,7 @@ def V6_vwhist256q_sat_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist256($Qv4):sat",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45358,7 +45719,7 @@ def V6_vxor : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vxor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -45370,7 +45731,7 @@ def V6_vxor_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vxor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -45383,7 +45744,7 @@ def V6_vzb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uh = vzxt($Vu32.ub)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45395,7 +45756,7 @@ def V6_vzb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uh = vzxt($Vu32.ub)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45431,7 +45792,7 @@ def V6_vzh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uw = vzxt($Vu32.uh)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45443,7 +45804,7 @@ def V6_vzh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uw = vzxt($Vu32.uh)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45479,7 +45840,7 @@ def Y2_barrier : HInst<
(outs),
(ins),
"barrier",
-ST_tc_3stall_SLOT0, TypeST>, Enc_0 {
+tc_ef2676fd, TypeST>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b1010100000000000;
let isSoloAX = 1;
@@ -45489,7 +45850,7 @@ def Y2_break : HInst<
(outs),
(ins),
"brkpt",
-CR_tc_3x_SLOT3, TypeCR>, Enc_0 {
+tc_bcf0e36e, TypeCR>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b0110110000100000;
let isSolo = 1;
@@ -45498,7 +45859,7 @@ def Y2_dccleana : HInst<
(outs),
(ins IntRegs:$Rs32),
"dccleana($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000000;
let isSoloAin1 = 1;
@@ -45507,7 +45868,7 @@ def Y2_dccleaninva : HInst<
(outs),
(ins IntRegs:$Rs32),
"dccleaninva($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000010;
let isSoloAin1 = 1;
@@ -45516,7 +45877,7 @@ def Y2_dcfetch : HInst<
(outs),
(ins IntRegs:$Rs32),
"dcfetch($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_34e882a4, TypeMAPPING> {
let hasSideEffects = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -45525,7 +45886,7 @@ def Y2_dcfetchbo : HInst<
(outs),
(ins IntRegs:$Rs32, u11_3Imm:$Ii),
"dcfetch($Rs32+#$Ii)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4983213 {
+tc_ef0ebaaa, TypeLD>, Enc_2d829e {
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10010100000;
let addrMode = BaseImmOffset;
@@ -45535,7 +45896,7 @@ def Y2_dcinva : HInst<
(outs),
(ins IntRegs:$Rs32),
"dcinva($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000001;
let isSoloAin1 = 1;
@@ -45544,17 +45905,17 @@ def Y2_dczeroa : HInst<
(outs),
(ins IntRegs:$Rs32),
"dczeroa($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000110;
-let mayStore = 1;
let isSoloAin1 = 1;
+let mayStore = 1;
}
def Y2_icinva : HInst<
(outs),
(ins IntRegs:$Rs32),
"icinva($Rs32)",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_049dfb74, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010110110;
let isSolo = 1;
@@ -45563,7 +45924,7 @@ def Y2_isync : HInst<
(outs),
(ins),
"isync",
-J_tc_2early_SLOT2, TypeJ>, Enc_0 {
+tc_d267fa19, TypeJ>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000010;
let Inst{31-16} = 0b0101011111000000;
let isSolo = 1;
@@ -45572,7 +45933,7 @@ def Y2_syncht : HInst<
(outs),
(ins),
"syncht",
-ST_tc_ld_SLOT0, TypeST>, Enc_0 {
+tc_ef2676fd, TypeST>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b1010100001000000;
let isSolo = 1;
@@ -45581,7 +45942,7 @@ def Y4_l2fetch : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"l2fetch($Rs32,$Rt32)",
-ST_tc_3stall_SLOT0, TypeST>, Enc_14620934 {
+tc_f4608adc, TypeST>, Enc_ca3887 {
let Inst{7-0} = 0b00000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100110000;
@@ -45593,7 +45954,7 @@ def Y4_trace : HInst<
(outs),
(ins IntRegs:$Rs32),
"trace($Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_11704059 {
+tc_4997da4a, TypeCR>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01100010010;
let isSoloAX = 1;
@@ -45602,7 +45963,7 @@ def Y5_l2fetch : HInst<
(outs),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"l2fetch($Rs32,$Rtt32)",
-ST_tc_3stall_SLOT0, TypeST>, Enc_8943121, Requires<[HasV5T]> {
+tc_f4608adc, TypeST>, Enc_e6abcf, Requires<[HasV5T]> {
let Inst{7-0} = 0b00000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100110100;
@@ -45614,31 +45975,33 @@ def dep_A2_addsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32):sat:deprecated",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def dep_A2_subsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32):sat:deprecated",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def dep_S2_packhl : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = packhl($Rs32,$Rt32):deprecated",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_1997594 {
+tc_9c18c9a5, TypeALU64>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010100000;
diff --git a/lib/Target/Hexagon/HexagonDepTimingClasses.h b/lib/Target/Hexagon/HexagonDepTimingClasses.h
new file mode 100644
index 000000000000..52963034543d
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonDepTimingClasses.h
@@ -0,0 +1,132 @@
+//===--- HexagonDepTimingClasses.h ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+static bool is_TC3x(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_1000eb10:
+ case Hexagon::Sched::tc_2aaab1e0:
+ case Hexagon::Sched::tc_4997da4a:
+ case Hexagon::Sched::tc_5d806107:
+ case Hexagon::Sched::tc_6264c5e0:
+ case Hexagon::Sched::tc_69bb508b:
+ case Hexagon::Sched::tc_8c8041e6:
+ case Hexagon::Sched::tc_8cb685d9:
+ case Hexagon::Sched::tc_a12a5971:
+ case Hexagon::Sched::tc_ae0722f7:
+ case Hexagon::Sched::tc_ae2c2dc2:
+ case Hexagon::Sched::tc_bc5561d8:
+ case Hexagon::Sched::tc_d6a805a8:
+ case Hexagon::Sched::tc_f055fbb6:
+ case Hexagon::Sched::tc_feb4974b:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC2early(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_35fb9d13:
+ case Hexagon::Sched::tc_cbe45117:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC4x(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_09c86199:
+ case Hexagon::Sched::tc_2d1e6f5c:
+ case Hexagon::Sched::tc_2e55aa16:
+ case Hexagon::Sched::tc_3bea1824:
+ case Hexagon::Sched::tc_e836c161:
+ case Hexagon::Sched::tc_f1aa2cdb:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC2(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_090485bb:
+ case Hexagon::Sched::tc_1fe8323c:
+ case Hexagon::Sched::tc_37326008:
+ case Hexagon::Sched::tc_3c10f809:
+ case Hexagon::Sched::tc_47ab9233:
+ case Hexagon::Sched::tc_485bb57c:
+ case Hexagon::Sched::tc_511f28f6:
+ case Hexagon::Sched::tc_583510c7:
+ case Hexagon::Sched::tc_63cd9d2d:
+ case Hexagon::Sched::tc_76c4c5ef:
+ case Hexagon::Sched::tc_7ca2ea10:
+ case Hexagon::Sched::tc_87601822:
+ case Hexagon::Sched::tc_88fa2da6:
+ case Hexagon::Sched::tc_94e6ffd9:
+ case Hexagon::Sched::tc_ab1b5e74:
+ case Hexagon::Sched::tc_b0f50e3c:
+ case Hexagon::Sched::tc_bd16579e:
+ case Hexagon::Sched::tc_c0cd91a8:
+ case Hexagon::Sched::tc_ca280e8b:
+ case Hexagon::Sched::tc_cd321066:
+ case Hexagon::Sched::tc_d95f4e98:
+ case Hexagon::Sched::tc_e17ce9ad:
+ case Hexagon::Sched::tc_f1240c08:
+ case Hexagon::Sched::tc_faab1248:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC1(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_07ac815d:
+ case Hexagon::Sched::tc_1b6011fb:
+ case Hexagon::Sched::tc_1b834fe7:
+ case Hexagon::Sched::tc_1e062b18:
+ case Hexagon::Sched::tc_1f9668cc:
+ case Hexagon::Sched::tc_43068634:
+ case Hexagon::Sched::tc_47f0b7ad:
+ case Hexagon::Sched::tc_537e2013:
+ case Hexagon::Sched::tc_548f402d:
+ case Hexagon::Sched::tc_5fa2857c:
+ case Hexagon::Sched::tc_5fe9fcd0:
+ case Hexagon::Sched::tc_78b3c689:
+ case Hexagon::Sched::tc_7c2dcd4d:
+ case Hexagon::Sched::tc_81a23d44:
+ case Hexagon::Sched::tc_821c4233:
+ case Hexagon::Sched::tc_92d1833c:
+ case Hexagon::Sched::tc_9a13af9d:
+ case Hexagon::Sched::tc_9c18c9a5:
+ case Hexagon::Sched::tc_9df8b0dc:
+ case Hexagon::Sched::tc_9f518242:
+ case Hexagon::Sched::tc_a1fb80e1:
+ case Hexagon::Sched::tc_a333d2a9:
+ case Hexagon::Sched::tc_a87879e8:
+ case Hexagon::Sched::tc_aad55963:
+ case Hexagon::Sched::tc_b08b653e:
+ case Hexagon::Sched::tc_b324366f:
+ case Hexagon::Sched::tc_b5bfaa60:
+ case Hexagon::Sched::tc_b86c7e8b:
+ case Hexagon::Sched::tc_c58f771a:
+ case Hexagon::Sched::tc_d108a090:
+ case Hexagon::Sched::tc_d1b5a4b6:
+ case Hexagon::Sched::tc_d2609065:
+ case Hexagon::Sched::tc_d63b71d1:
+ case Hexagon::Sched::tc_e2c31426:
+ case Hexagon::Sched::tc_e8c7a357:
+ case Hexagon::Sched::tc_eb07ef6f:
+ case Hexagon::Sched::tc_f16d5b17:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/lib/Target/Hexagon/HexagonIICHVX.td b/lib/Target/Hexagon/HexagonIICHVX.td
index 4081a225832b..1493d52f08e8 100644
--- a/lib/Target/Hexagon/HexagonIICHVX.td
+++ b/lib/Target/Hexagon/HexagonIICHVX.td
@@ -7,96 +7,12 @@
//
//===----------------------------------------------------------------------===//
-//
-// Though all these itinerary classes exist for V60 onwards, they are being
-// listed here as 'HVXV62Itin' because itinerary class description prior to V62
-// doesn't include operand cycle info. In future, I plan to merge them
-// together and call it 'HVXItin'.
-//
-class HVXV62Itin {
- list<InstrItinData> HVXV62Itin_list = [
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23,
- [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_SLOT23,
- [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CVI_VA, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE,CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VA_DV, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF, CVI_MPY01]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_LATE, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_SLOT2, [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_SLOT2_LONG_EARLY,
- [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_EARLY, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_LONG_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_DV, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VS, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VINLANESAT, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_LD, [InstrStage<1, [SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_TMP_LD, [InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD]>],[1, 1, 1, 1, 10]>,
- InstrItinData<CVI_VM_CUR_LD, [InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_VP_LDU, [InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_ST, [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_NEW_ST, [InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [CVI_ST]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_STU, [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_HIST, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_ALL]>], [1, 1, 1, 1]>];
+def CVI_VA : InstrItinClass;
+
+class HVXItin {
+ list<InstrItinData> HVXItin_list = [
+ InstrItinData<CVI_VA,
+ [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE,CVI_SHIFT, CVI_MPY0, CVI_MPY1]>],
+ [9, 7, 7, 7], [HVX_FWD, HVX_FWD, HVX_FWD]>];
}
diff --git a/lib/Target/Hexagon/HexagonIICScalar.td b/lib/Target/Hexagon/HexagonIICScalar.td
index e69cfbdad688..5fe713346e38 100644
--- a/lib/Target/Hexagon/HexagonIICScalar.td
+++ b/lib/Target/Hexagon/HexagonIICScalar.td
@@ -11,154 +11,22 @@
// classes as per V62. Curretnly, they are just extracted from
// HexagonScheduleV62.td but will soon be auto-generated by HexagonGen.py.
+class PseudoItin {
+ list<InstrItinData> PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>], [2]>
+ ];
+}
+
class ScalarItin {
list<InstrItinData> ScalarItin_list = [
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>], [3, 1, 1]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>], [2, 1, 1]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<1, [SLOT2]>], [3, 1, 1]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123, [InstrStage<1,
- [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>], [4, 1]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [3, 1]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<1, [SLOT0]>], [3, 1, 1]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [3, 1, 1]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- // The S_2op_tc_3x_SLOT23 slots are 4 cycles on v60.
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>],
- [3, 1, 1, 1]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<1, [SLOT_ENDLOOP]>],
- [2]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
-
- // Duplex and Compound
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
- // Misc
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDOM , [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>];
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [3, 1], [Hex_FWD, Hex_FWD]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1], [Hex_FWD, Hex_FWD, Hex_FWD]>
+ ];
}
diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td
index 709d64585c0b..636a439ba6a9 100644
--- a/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -188,30 +188,10 @@ class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-class PseudoLDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-
class CONSTLDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : PseudoLDInst<outs, ins, asmstr, pattern, cstr>;
-
-// LD Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayLoad = 1 in
-class LD0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin=LD_tc_ld_SLOT0>
+ string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-let mayLoad = 1 in
-class LD1Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin=LD_tc_ld_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>;
-
// ST Instruction Class in V2/V3 can take SLOT0 only.
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
@@ -220,124 +200,9 @@ class STInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>, OpcodeHexagon;
-let mayStore = 1 in
-class STInst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
-
-class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayStore = 1 in
-class ST0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_ld_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>, OpcodeHexagon;
-
-// Same as ST0Inst but doesn't derive from OpcodeHexagon.
-let mayStore = 1 in
-class ST1Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
-
-// ST Instruction Class in V2/V3 can take SLOT0 only.
-// ST Instruction Class in V4 can take SLOT0 & SLOT1.
-// Definition of the instruction class CHANGED from V2/V3 to V4.
-class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
- : STInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// ALU64 Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
-class ALU64Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeALU64>,
- OpcodeHexagon;
-
-// ALU64 Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
-class ALU64Inst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeALU64>;
-
-
-class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-
-// M Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
-class MInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_3x_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeM>,
- OpcodeHexagon;
-
-// Same as above but doesn't derive from OpcodeHexagon
-class MInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_3x_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeM>;
-
-// M Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
-class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_2_SLOT23>
- : MInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// S Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
-class SInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>,
- OpcodeHexagon;
-
-class SInst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>;
-
-class SInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>;
-
-// S Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
-class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_3op_tc_1_SLOT23>
- : SInst<outs, ins, asmstr, pattern, cstr, itin> {
- let Type = TypeS_3op;
-}
-
-// J Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class JInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
-
-class JInst_CJUMP_UCJUMP<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
-
-// CR Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CR_tc_2early_SLOT3>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCR>, OpcodeHexagon;
-
let isCodeGenOnly = 1, isPseudo = 1 in
class Endloop<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_SLOT0123>
+ string cstr = "", InstrItinClass itin = tc_ENDLOOP>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeENDLOOP>,
OpcodeHexagon;
@@ -357,27 +222,6 @@ class PseudoM<dag outs, dag ins, string asmstr, list<dag> pattern = [],
// Instruction Classes Definitions -
//===----------------------------------------------------------------------===//
-//
-// ALU64 patterns.
-//
-class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Post increment ST Instruction.
-class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
-
-// Post increment LD Instruction.
-class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
-
//===----------------------------------------------------------------------===//
// V4 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
@@ -385,7 +229,7 @@ class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
include "HexagonInstrFormatsV4.td"
//===----------------------------------------------------------------------===//
-// V4 Instruction Format Definitions +
+// V55 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -395,5 +239,5 @@ include "HexagonInstrFormatsV4.td"
include "HexagonInstrFormatsV60.td"
//===----------------------------------------------------------------------===//
-// V60 Instruction Format Definitions +
+// V62 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
index 1fdf930c62fd..c5fa25995212 100644
--- a/lib/Target/Hexagon/HexagonInstrFormatsV4.td
+++ b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -1,4 +1,4 @@
-//==- HexagonInstrFormats.td - Hexagon Instruction Formats --*- tablegen -*-==//
+//==- HexagonInstrFormatsV4.td - Hexagon Instruction Formats --*- tablegen -==//
//
// The LLVM Compiler Infrastructure
//
@@ -85,64 +85,3 @@ class InstDuplex<bits<4> iClass, list<dag> pattern = [],
bits<2> opExtentAlign = 0;
let TSFlags{28-27} = opExtentAlign; // Alignment exponent before extending.
}
-
-//----------------------------------------------------------------------------//
-// Instruction Classes Definitions
-//----------------------------------------------------------------------------//
-
-//
-// NV type instructions.
-//
-class NVInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeNCJ>, OpcodeHexagon;
-
-class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Definition of Post increment new value store.
-class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Post increment ST Instruction.
-let mayStore = 1 in
-class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// New-value conditional branch.
-class NCJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : NVInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayLoad = 1, mayStore = 1 in
-class MEMInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeV4LDST>,
- OpcodeHexagon;
-
-class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
- : MEMInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-class EXTENDERInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
- : InstHexagon<outs, ins, asmstr, pattern, "", EXTENDER_tc_1_SLOT0123,
- TypeEXTENDER>, OpcodeHexagon;
-
-class SUBInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypeDUPLEX>,
- OpcodeHexagon;
-
-class CJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>,
- OpcodeHexagon;
-
-class CJInst_JMPSET<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND, TypeCJ>,
- OpcodeHexagon;
-
diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV60.td b/lib/Target/Hexagon/HexagonInstrFormatsV60.td
index b913727972e5..14bda0e0107d 100644
--- a/lib/Target/Hexagon/HexagonInstrFormatsV60.td
+++ b/lib/Target/Hexagon/HexagonInstrFormatsV60.td
@@ -20,183 +20,3 @@ class CVI_VA_Resource<dag outs, dag ins, string asmstr,
InstrItinClass itin = CVI_VA>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA>,
OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VA_DV_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VA_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource_late<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_LATE>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Slot2_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV_SLOT2>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_early<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_EARLY>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_long_early<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_LONG_EARLY>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VS_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VS>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VINLANESAT_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VINLANESAT>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VINLANESAT>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VS_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VS>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_LD_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_LD_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_TMP_LD_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_TMP_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_TMP_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_TMP_LD_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_TMP_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_TMP_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_VP_LDU_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_VP_LDU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_VP_LDU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_VP_LDU_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_VP_LDU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_VP_LDU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_ST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_ST_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_NEW_ST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_NEW_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_NEW_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_NEW_ST_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_NEW_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_NEW_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_STU_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_STU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_STU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_STU_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_STU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_STU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_HIST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_HIST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_HIST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VA_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VA>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_HIST_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_HIST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_HIST>,
- Requires<[HasV60T, UseHVX]>;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 852bfb1b4f54..03794511414e 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -59,6 +59,7 @@ using namespace llvm;
#define GET_INSTRMAP_INFO
#include "HexagonGenInstrInfo.inc"
#include "HexagonGenDFAPacketizer.inc"
+#include "HexagonDepTimingClasses.h"
cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
@@ -1466,7 +1467,15 @@ bool HexagonInstrInfo::DefinesPredicate(
}
bool HexagonInstrInfo::isPredicable(const MachineInstr &MI) const {
- return MI.getDesc().isPredicable();
+ if (!MI.getDesc().isPredicable())
+ return false;
+
+ if (MI.isCall() || isTailCall(MI)) {
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ if (!MF.getSubtarget<HexagonSubtarget>().usePredicatedCalls())
+ return false;
+ }
+ return true;
}
bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
@@ -1643,6 +1652,7 @@ unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
return getInstrTimingClassLatency(ItinData, MI);
}
+
DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
const TargetSubtargetInfo &STI) const {
const InstrItineraryData *II = STI.getInstrItineraryData();
@@ -2047,9 +2057,7 @@ bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr &MI) const {
// Multiply
unsigned SchedClass = MI.getDesc().getSchedClass();
- if (SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23)
- return true;
- return false;
+ return is_TC4x(SchedClass) || is_TC3x(SchedClass);
}
bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
@@ -2117,7 +2125,7 @@ bool HexagonInstrInfo::isFloat(const MachineInstr &MI) const {
// No V60 HVX VMEM with A_INDIRECT.
bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr &I,
const MachineInstr &J) const {
- if (!isV60VectorInstruction(I))
+ if (!isHVXVec(I))
return false;
if (!I.mayLoad() && !I.mayStore())
return false;
@@ -2241,30 +2249,13 @@ bool HexagonInstrInfo::isLateResultInstr(const MachineInstr &MI) const {
}
unsigned SchedClass = MI.getDesc().getSchedClass();
-
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
- case Hexagon::Sched::ALU64_tc_1_SLOT23:
- case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
- case Hexagon::Sched::S_2op_tc_1_SLOT23:
- case Hexagon::Sched::S_3op_tc_1_SLOT23:
- case Hexagon::Sched::V2LDST_tc_ld_SLOT01:
- case Hexagon::Sched::V2LDST_tc_st_SLOT0:
- case Hexagon::Sched::V2LDST_tc_st_SLOT01:
- case Hexagon::Sched::V4LDST_tc_ld_SLOT01:
- case Hexagon::Sched::V4LDST_tc_st_SLOT0:
- case Hexagon::Sched::V4LDST_tc_st_SLOT01:
- return false;
- }
- return true;
+ return !is_TC1(SchedClass);
}
bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
// Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
// resource, but all operands can be received late like an ALU instruction.
- return MI.getDesc().getSchedClass() == Hexagon::Sched::CVI_VX_LATE;
+ return getType(MI) == HexagonII::TypeCVI_VX_LATE;
}
bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
@@ -2507,61 +2498,22 @@ bool HexagonInstrInfo::isTailCall(const MachineInstr &MI) const {
// Returns true when SU has a timing class TC1.
bool HexagonInstrInfo::isTC1(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
- case Hexagon::Sched::ALU64_tc_1_SLOT23:
- case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
- //case Hexagon::Sched::M_tc_1_SLOT23:
- case Hexagon::Sched::S_2op_tc_1_SLOT23:
- case Hexagon::Sched::S_3op_tc_1_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC1(SchedClass);
}
bool HexagonInstrInfo::isTC2(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_3op_tc_2_SLOT0123:
- case Hexagon::Sched::ALU64_tc_2_SLOT23:
- case Hexagon::Sched::CR_tc_2_SLOT3:
- case Hexagon::Sched::M_tc_2_SLOT23:
- case Hexagon::Sched::S_2op_tc_2_SLOT23:
- case Hexagon::Sched::S_3op_tc_2_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC2(SchedClass);
}
bool HexagonInstrInfo::isTC2Early(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_2early_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_2early_SLOT0123:
- case Hexagon::Sched::ALU64_tc_2early_SLOT23:
- case Hexagon::Sched::CR_tc_2early_SLOT23:
- case Hexagon::Sched::CR_tc_2early_SLOT3:
- case Hexagon::Sched::J_tc_2early_SLOT0123:
- case Hexagon::Sched::J_tc_2early_SLOT2:
- case Hexagon::Sched::J_tc_2early_SLOT23:
- case Hexagon::Sched::S_2op_tc_2early_SLOT23:
- case Hexagon::Sched::S_3op_tc_2early_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC2early(SchedClass);
}
bool HexagonInstrInfo::isTC4x(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- return SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23;
+ return is_TC4x(SchedClass);
}
// Schedule this ASAP.
@@ -2583,7 +2535,7 @@ bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1,
return false;
}
-bool HexagonInstrInfo::isV60VectorInstruction(const MachineInstr &MI) const {
+bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
const uint64_t V = getType(MI);
return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
}
@@ -2782,7 +2734,7 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
}
bool HexagonInstrInfo::isVecAcc(const MachineInstr &MI) const {
- return isV60VectorInstruction(MI) && isAccumulator(MI);
+ return isHVXVec(MI) && isAccumulator(MI);
}
bool HexagonInstrInfo::isVecALU(const MachineInstr &MI) const {
@@ -2888,7 +2840,7 @@ bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
// Add latency to instruction.
bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
const MachineInstr &MI2) const {
- if (isV60VectorInstruction(MI1) && isV60VectorInstruction(MI2))
+ if (isHVXVec(MI1) && isHVXVec(MI2))
if (!isVecUsableNextPacket(MI1, MI2))
return true;
return false;
@@ -3013,7 +2965,7 @@ bool HexagonInstrInfo::mayBeNewStore(const MachineInstr &MI) const {
bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
const MachineInstr &ConsMI) const {
// There is no stall when ProdMI is not a V60 vector.
- if (!isV60VectorInstruction(ProdMI))
+ if (!isHVXVec(ProdMI))
return false;
// There is no stall when ProdMI and ConsMI are not dependent.
@@ -3031,7 +2983,7 @@ bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
bool HexagonInstrInfo::producesStall(const MachineInstr &MI,
MachineBasicBlock::const_instr_iterator BII) const {
// There is no stall when I is not a V60 vector.
- if (!isV60VectorInstruction(MI))
+ if (!isHVXVec(MI))
return false;
MachineBasicBlock::const_instr_iterator MII = BII;
@@ -3415,7 +3367,6 @@ int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
// p.old store
// [if (p0)memw(R0+#0)=R2]
//
-//
// The following set of instructions further explains the scenario where
// conditional new-value store becomes invalid when promoted to .new predicate
// form.
@@ -4025,18 +3976,53 @@ unsigned HexagonInstrInfo::getInstrTimingClassLatency(
if (!ItinData)
return getInstrLatency(ItinData, MI);
- // Get the latency embedded in the itinerary. If we're not using timing class
- // latencies or if we using BSB scheduling, then restrict the maximum latency
- // to 1 (that is, either 0 or 1).
if (MI.isTransient())
return 0;
- unsigned Latency = ItinData->getStageLatency(MI.getDesc().getSchedClass());
- if (!EnableTimingClassLatency ||
- MI.getParent()->getParent()->getSubtarget<HexagonSubtarget>().
- useBSBScheduling())
- if (Latency > 1)
- Latency = 1;
- return Latency;
+ return ItinData->getStageLatency(MI.getDesc().getSchedClass());
+}
+
+/// getOperandLatency - Compute and return the use operand latency of a given
+/// pair of def and use.
+/// In most cases, the static scheduling itinerary was enough to determine the
+/// operand latency. But it may not be possible for instructions with variable
+/// number of defs / uses.
+///
+/// This is a raw interface to the itinerary that may be directly overriden by
+/// a target. Use computeOperandLatency to get the best estimate of latency.
+int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI,
+ unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
+ auto &RI = getRegisterInfo();
+ // Get DefIdx and UseIdx for super registers.
+ MachineOperand DefMO = DefMI.getOperand(DefIdx);
+
+ if (RI.isPhysicalRegister(DefMO.getReg())) {
+ if (DefMO.isImplicit()) {
+ for (MCSuperRegIterator SR(DefMO.getReg(), &RI); SR.isValid(); ++SR) {
+ int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &RI);
+ if (Idx != -1) {
+ DefIdx = Idx;
+ break;
+ }
+ }
+ }
+
+ MachineOperand UseMO = UseMI.getOperand(UseIdx);
+ if (UseMO.isImplicit()) {
+ for (MCSuperRegIterator SR(UseMO.getReg(), &RI); SR.isValid(); ++SR) {
+ int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &RI);
+ if (Idx != -1) {
+ UseIdx = Idx;
+ break;
+ }
+ }
+ }
+ }
+
+ return TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
+ UseMI, UseIdx);
}
// inverts the predication logic.
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index 21b4f738f6e8..97b9bc954688 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -288,6 +288,19 @@ public:
/// If the instruction is an increment of a constant value, return the amount.
bool getIncrementValue(const MachineInstr &MI, int &Value) const override;
+ /// getOperandLatency - Compute and return the use operand latency of a given
+ /// pair of def and use.
+ /// In most cases, the static scheduling itinerary was enough to determine the
+ /// operand latency. But it may not be possible for instructions with variable
+ /// number of defs / uses.
+ ///
+ /// This is a raw interface to the itinerary that may be directly overriden by
+ /// a target. Use computeOperandLatency to get the best estimate of latency.
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const override;
+
bool isTailCall(const MachineInstr &MI) const override;
/// HexagonInstrInfo specifics.
@@ -356,7 +369,7 @@ public:
bool isTC4x(const MachineInstr &MI) const;
bool isToBeScheduledASAP(const MachineInstr &MI1,
const MachineInstr &MI2) const;
- bool isV60VectorInstruction(const MachineInstr &MI) const;
+ bool isHVXVec(const MachineInstr &MI) const;
bool isValidAutoIncImm(const EVT VT, const int Offset) const;
bool isValidOffset(unsigned Opcode, int Offset, bool Extend = true) const;
bool isVecAcc(const MachineInstr &MI) const;
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 20dc9b0da1db..324108284a9a 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -744,7 +744,7 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
// Give less preference to an instruction that will cause a stall with
// an instruction in the previous packet.
- if (QII.isV60VectorInstruction(Instr)) {
+ if (QII.isHVXVec(Instr)) {
// Check for stalls in the previous packet.
if (Q.getID() == TopQID) {
for (auto J : Top.ResourceModel->OldPacket)
diff --git a/lib/Target/Hexagon/HexagonPatterns.td b/lib/Target/Hexagon/HexagonPatterns.td
index b8c3bf0745ce..32503d111c24 100644
--- a/lib/Target/Hexagon/HexagonPatterns.td
+++ b/lib/Target/Hexagon/HexagonPatterns.td
@@ -1,13 +1,5 @@
// Pattern fragment that combines the value type and the register class
// into a single parameter.
-// The pat frags in the definitions below need to have a named register,
-// otherwise i32 will be assumed regardless of the register class. The
-// name of the register does not matter.
-def I1 : PatLeaf<(i1 PredRegs:$R)>;
-def I32 : PatLeaf<(i32 IntRegs:$R)>;
-def I64 : PatLeaf<(i64 DoubleRegs:$R)>;
-def F32 : PatLeaf<(f32 IntRegs:$R)>;
-def F64 : PatLeaf<(f64 DoubleRegs:$R)>;
// Pattern fragments to extract the low and high subregisters from a
// 64-bit value.
diff --git a/lib/Target/Hexagon/HexagonPseudo.td b/lib/Target/Hexagon/HexagonPseudo.td
index 2e8def572c4b..8c2caea2d5c5 100644
--- a/lib/Target/Hexagon/HexagonPseudo.td
+++ b/lib/Target/Hexagon/HexagonPseudo.td
@@ -7,6 +7,15 @@
//
//===----------------------------------------------------------------------===//
+// The pat frags in the definitions below need to have a named register,
+// otherwise i32 will be assumed regardless of the register class. The
+// name of the register does not matter.
+def I1 : PatLeaf<(i1 PredRegs:$R)>;
+def I32 : PatLeaf<(i32 IntRegs:$R)>;
+def I64 : PatLeaf<(i64 DoubleRegs:$R)>;
+def F32 : PatLeaf<(f32 IntRegs:$R)>;
+def F64 : PatLeaf<(f64 DoubleRegs:$R)>;
+
let PrintMethod = "printGlobalOperand" in {
def globaladdress : Operand<i32>;
def globaladdressExt : Operand<i32>;
@@ -23,17 +32,20 @@ def DUPLEX_Pseudo : InstHexagon<(outs),
let isExtendable = 1, opExtendable = 1, opExtentBits = 6,
isAsmParserOnly = 1 in
-def TFRI64_V2_ext : ALU64_rr<(outs DoubleRegs:$dst),
- (ins s32_0Imm:$src1, s8_0Imm:$src2),
- "$dst=combine(#$src1,#$src2)">;
+def TFRI64_V2_ext : InstHexagon<(outs DoubleRegs:$dst),
+ (ins s32_0Imm:$src1, s8_0Imm:$src2),
+ "$dst=combine(#$src1,#$src2)", [], "",
+ A2_combineii.Itinerary, TypeALU32_2op>, OpcodeHexagon;
// HI/LO Instructions
let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
hasNewValue = 1, opNewValue = 0 in
-class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp>
+class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp,
+ InstHexagon rootInst>
: InstHexagon<(outs IntRegs:$dst),
- (ins u16_0Imm:$imm_value),
- "$dst"#RegHalf#"=#$imm_value", [], "", ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, OpcodeHexagon {
+ (ins u16_0Imm:$imm_value),
+ "$dst"#RegHalf#"=#$imm_value", [], "",
+ rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<5> dst;
bits<32> imm_value;
@@ -46,8 +58,8 @@ class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp>
}
let isAsmParserOnly = 1 in {
- def LO : REG_IMMED<".l", 0b0, 0b001, 0b1>;
- def HI : REG_IMMED<".h", 0b0, 0b010, 0b1>;
+ def LO : REG_IMMED<".l", 0b0, 0b001, 0b1, A2_tfril>;
+ def HI : REG_IMMED<".h", 0b0, 0b010, 0b1, A2_tfrih>;
}
let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in {
@@ -59,11 +71,13 @@ let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in {
let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
isCodeGenOnly = 1 in
-def PS_true : SInst<(outs PredRegs:$dst), (ins), "", []>;
+def PS_true : InstHexagon<(outs PredRegs:$dst), (ins), "",
+ [(set I1:$dst, 1)], "", C2_orn.Itinerary, TypeCR>;
let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
isCodeGenOnly = 1 in
-def PS_false : SInst<(outs PredRegs:$dst), (ins), "", []>;
+def PS_false : InstHexagon<(outs PredRegs:$dst), (ins), "",
+ [(set I1:$dst, 0)], "", C2_andn.Itinerary, TypeCR>;
let Defs = [R29, R30], Uses = [R31, R30, R29], isPseudo = 1 in
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
@@ -90,10 +104,10 @@ def ENDLOOP1 : Endloop<(outs), (ins b30_2Imm:$offset),
let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
opExtendable = 0, hasSideEffects = 0 in
-class LOOP_iBase<string mnemonic, Operand brOp, bit mustExtend = 0>
- : CRInst<(outs), (ins brOp:$offset, u10_0Imm:$src2),
+class LOOP_iBase<string mnemonic, InstHexagon rootInst>
+ : InstHexagon <(outs), (ins b30_2Imm:$offset, u10_0Imm:$src2),
#mnemonic#"($offset,#$src2)",
- [], "" , CR_tc_3x_SLOT3> {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<9> offset;
bits<10> src2;
@@ -110,10 +124,10 @@ class LOOP_iBase<string mnemonic, Operand brOp, bit mustExtend = 0>
let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
opExtendable = 0, hasSideEffects = 0 in
-class LOOP_rBase<string mnemonic, Operand brOp, bit mustExtend = 0>
- : CRInst<(outs), (ins brOp:$offset, IntRegs:$src2),
+class LOOP_rBase<string mnemonic, InstHexagon rootInst>
+ : InstHexagon<(outs), (ins b30_2Imm:$offset, IntRegs:$src2),
#mnemonic#"($offset,$src2)",
- [], "" ,CR_tc_3x_SLOT3> {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<9> offset;
bits<5> src2;
@@ -126,27 +140,25 @@ class LOOP_rBase<string mnemonic, Operand brOp, bit mustExtend = 0>
let Inst{4-3} = offset{3-2};
}
-multiclass LOOP_ri<string mnemonic> {
- let isCodeGenOnly = 1, isExtended = 1, opExtendable = 0 in {
- def iext: LOOP_iBase<mnemonic, b30_2Imm, 1>;
- def rext: LOOP_rBase<mnemonic, b30_2Imm, 1>;
- }
+let Defs = [SA0, LC0, USR], isCodeGenOnly = 1, isExtended = 1,
+ opExtendable = 0 in {
+ def J2_loop0iext : LOOP_iBase<"loop0", J2_loop0i>;
+ def J2_loop1iext : LOOP_iBase<"loop1", J2_loop1i>;
}
-
-let Defs = [SA0, LC0, USR] in
-defm J2_loop0 : LOOP_ri<"loop0">;
-
// Interestingly only loop0's appear to set usr.lpcfg
-let Defs = [SA1, LC1] in
-defm J2_loop1 : LOOP_ri<"loop1">;
+let Defs = [SA1, LC1], isCodeGenOnly = 1, isExtended = 1, opExtendable = 0 in {
+ def J2_loop0rext : LOOP_rBase<"loop0", J2_loop0r>;
+ def J2_loop1rext : LOOP_rBase<"loop1", J2_loop1r>;
+}
let isCall = 1, hasSideEffects = 1, isPredicable = 0,
isExtended = 0, isExtendable = 1, opExtendable = 0,
isExtentSigned = 1, opExtentBits = 24, opExtentAlign = 2 in
class T_Call<string ExtStr>
- : JInst<(outs), (ins a30_2Imm:$dst),
- "call " # ExtStr # "$dst", [], "", J_tc_2early_SLOT23> {
+ : InstHexagon<(outs), (ins a30_2Imm:$dst),
+ "call " # ExtStr # "$dst", [], "", J2_call.Itinerary, TypeJ>,
+ OpcodeHexagon {
let BaseOpcode = "call";
bits<24> dst;
@@ -164,38 +176,24 @@ let isCodeGenOnly = 1, isCall = 1, hasSideEffects = 1,
Defs = [PC, R31, R6, R7, P0] in
def PS_call_stk : T_Call<"">;
-let isCall = 1, hasSideEffects = 1, cofMax1 = 1 in
-class JUMPR_MISC_CALLR<bit isPred, bit isPredNot,
- dag InputDag = (ins IntRegs:$Rs)>
- : JInst<(outs), InputDag,
- !if(isPred, !if(isPredNot, "if (!$Pu) callr $Rs",
- "if ($Pu) callr $Rs"),
- "callr $Rs"),
- [], "", J_tc_2early_SLOT2> {
+// Call, no return.
+let isCall = 1, hasSideEffects = 1, cofMax1 = 1, isCodeGenOnly = 1 in
+def PS_callr_nr: InstHexagon<(outs), (ins IntRegs:$Rs),
+ "callr $Rs", [], "", J2_callr.Itinerary, TypeJ>, OpcodeHexagon {
bits<5> Rs;
bits<2> Pu;
- let isPredicated = isPred;
- let isPredicatedFalse = isPredNot;
+ let isPredicatedFalse = 1;
let IClass = 0b0101;
- let Inst{27-25} = 0b000;
- let Inst{24-23} = !if (isPred, 0b10, 0b01);
- let Inst{22} = 0;
- let Inst{21} = isPredNot;
- let Inst{9-8} = !if (isPred, Pu, 0b00);
+ let Inst{27-21} = 0b0000101;
let Inst{20-16} = Rs;
-
}
-let isCodeGenOnly = 1 in {
- def PS_callr_nr : JUMPR_MISC_CALLR<0, 1>; // Call, no return.
-}
-
let isCall = 1, hasSideEffects = 1,
isExtended = 0, isExtendable = 1, opExtendable = 0, isCodeGenOnly = 1,
- BaseOpcode = "PS_call_nr", isExtentSigned = 1, opExtentAlign = 2,
- Itinerary = J_tc_2early_SLOT23 in
-class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops>
+ BaseOpcode = "PS_call_nr", isExtentSigned = 1, opExtentAlign = 2 in
+class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops,
+ InstrItinClass itin>
: Pseudo<(outs), iops, "">, PredRel {
bits<2> Pu;
bits<17> dst;
@@ -205,16 +203,18 @@ class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops>
let isPredicatedFalse = isFalse;
}
-def PS_call_nr : Call_nr<24, 0, 0, (ins s32_0Imm:$Ii)>;
-//def PS_call_nrt: Call_nr<17, 1, 0, (ins PredRegs:$Pu, s32_0Imm:$dst)>;
-//def PS_call_nrf: Call_nr<17, 1, 1, (ins PredRegs:$Pu, s32_0Imm:$dst)>;
+def PS_call_nr : Call_nr<24, 0, 0, (ins s32_0Imm:$Ii), J2_call.Itinerary>;
+//def PS_call_nrt: Call_nr<17, 1, 0, (ins PredRegs:$Pu, s32_0Imm:$dst),
+// J2_callt.Itinerary>;
+//def PS_call_nrf: Call_nr<17, 1, 1, (ins PredRegs:$Pu, s32_0Imm:$dst),
+// J2_callf.Itinerary>;
let isBranch = 1, isIndirectBranch = 1, isBarrier = 1, Defs = [PC],
isPredicable = 1, hasSideEffects = 0, InputType = "reg",
cofMax1 = 1 in
-class T_JMPr
+class T_JMPr <InstHexagon rootInst>
: InstHexagon<(outs), (ins IntRegs:$dst), "jumpr $dst", [],
- "", J_tc_2early_SLOT2, TypeJ>, OpcodeHexagon {
+ "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<5> dst;
let IClass = 0b0101;
@@ -225,12 +225,12 @@ class T_JMPr
// A return through builtin_eh_return.
let isReturn = 1, isTerminator = 1, isBarrier = 1, hasSideEffects = 0,
isCodeGenOnly = 1, Defs = [PC], Uses = [R28], isPredicable = 0 in
-def EH_RETURN_JMPR : T_JMPr;
+def EH_RETURN_JMPR : T_JMPr<J2_jumpr>;
// Indirect tail-call.
let isPseudo = 1, isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0,
isTerminator = 1, isCodeGenOnly = 1 in
-def PS_tailcall_r : T_JMPr;
+def PS_tailcall_r : T_JMPr<J2_jumpr>;
//
// Direct tail-calls.
@@ -262,11 +262,11 @@ class JumpOpcStr<string Mnemonic, bit New, bit Taken> {
}
let isBranch = 1, isIndirectBranch = 1, Defs = [PC], isPredicated = 1,
hasSideEffects = 0, InputType = "reg", cofMax1 = 1 in
-class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>
+class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak, InstHexagon rootInst>
: InstHexagon<(outs), (ins PredRegs:$src, IntRegs:$dst),
CondStr<"$src", !if(PredNot,0,1), isPredNew>.S #
JumpOpcStr<"jumpr", isPredNew, isTak>.S # " $dst",
- [], "", J_tc_2early_SLOT2, TypeJ>, OpcodeHexagon {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
let isTaken = isTak;
let isPredicatedFalse = PredNot;
@@ -283,30 +283,25 @@ class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>
let Inst{11} = isPredNew;
let Inst{9-8} = src;
}
-multiclass JMPR_Pred<bit PredNot> {
- def NAME : T_JMPr_c<PredNot, 0, 0>; // not taken
- // Predicate new
- def NAME#newpt : T_JMPr_c<PredNot, 1, 1>; // taken
- def NAME#new : T_JMPr_c<PredNot, 1, 0>; // not taken
-}
-multiclass JMPR_base<string BaseOp> {
- let BaseOpcode = BaseOp in {
- def NAME : T_JMPr;
- defm t : JMPR_Pred<0>;
- defm f : JMPR_Pred<1>;
- }
+
+let isTerminator = 1, hasSideEffects = 0, isReturn = 1, isCodeGenOnly = 1,
+ isBarrier = 1, BaseOpcode = "JMPret" in {
+ def PS_jmpret : T_JMPr<J2_jumpr>, PredNewRel;
+ def PS_jmprett : T_JMPr_c<0, 0, 0, J2_jumprt>, PredNewRel;
+ def PS_jmpretf : T_JMPr_c<1, 0, 0, J2_jumprf>, PredNewRel;
+ def PS_jmprettnew : T_JMPr_c<0, 1, 0, J2_jumprtnew>, PredNewRel;
+ def PS_jmpretfnew : T_JMPr_c<1, 1, 0, J2_jumprfnew>, PredNewRel;
+ def PS_jmprettnewpt : T_JMPr_c<0, 1, 1, J2_jumprtnewpt>, PredNewRel;
+ def PS_jmpretfnewpt : T_JMPr_c<1, 1, 1, J2_jumprfnewpt>, PredNewRel;
}
-let isTerminator = 1, hasSideEffects = 0, isReturn = 1, isCodeGenOnly = 1, isBarrier = 1 in
-defm PS_jmpret : JMPR_base<"JMPret">, PredNewRel;
//defm V6_vtran2x2_map : HexagonMapping<(outs VectorRegs:$Vy32, VectorRegs:$Vx32), (ins VectorRegs:$Vx32in, IntRegs:$Rt32), "vtrans2x2(${Vy32},${Vx32},${Rt32})", (V6_vshuff VectorRegs:$Vy32, VectorRegs:$Vx32, VectorRegs:$Vx32in, IntRegs:$Rt32)>;
// The reason for the custom inserter is to record all ALLOCA instructions
// in MachineFunctionInfo.
-let Defs = [R29], isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 1 in
-def PS_alloca: InstHexagon<(outs IntRegs:$Rd),
- (ins IntRegs:$Rs, u32_0Imm:$A), "",
- [], "", ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>;
+let Defs = [R29], hasSideEffects = 1 in
+def PS_alloca: Pseudo <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, u32_0Imm:$A), "", []>;
// Load predicate.
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
@@ -322,35 +317,19 @@ def LDriw_mod : LDInst<(outs ModRegs:$dst),
(ins IntRegs:$addr, s32_0Imm:$off),
".error \"should not emit\"", []>;
-// Vector load
-let Predicates = [HasV60T, UseHVX] in
-let mayLoad = 1, hasSideEffects = 0 in
- class V6_LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VM_LD,
- IType type = TypeCVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
-
-// Vector store
-let Predicates = [HasV60T, UseHVX] in
-let mayStore = 1, hasSideEffects = 0 in
-class V6_STInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VM_ST,
- IType type = TypeCVI_VM_ST>
-: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
let isCodeGenOnly = 1, isPseudo = 1 in
-def PS_pselect : ALU64_rr<(outs DoubleRegs:$Rd),
+def PS_pselect: InstHexagon<(outs DoubleRegs:$Rd),
(ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt),
- ".error \"should not emit\" ", []>;
+ ".error \"should not emit\" ", [], "", A2_tfrpt.Itinerary, TypeALU32_2op>;
let isBranch = 1, isBarrier = 1, Defs = [PC], hasSideEffects = 0,
isPredicable = 1,
isExtendable = 1, opExtendable = 0, isExtentSigned = 1,
opExtentBits = 24, opExtentAlign = 2, InputType = "imm" in
-class T_JMP<string ExtStr>
- : JInst_CJUMP_UCJUMP<(outs), (ins b30_2Imm:$dst),
- "jump " # ExtStr # "$dst",
- [], "", J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT> {
+class T_JMP: InstHexagon<(outs), (ins b30_2Imm:$dst),
+ "jump $dst",
+ [], "", J2_jump.Itinerary, TypeJ>, OpcodeHexagon {
bits<24> dst;
let IClass = 0b0101;
@@ -362,16 +341,16 @@ class T_JMP<string ExtStr>
// Restore registers and dealloc return function call.
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
Defs = [R29, R30, R31, PC], isPredicable = 0, isAsmParserOnly = 1 in {
- def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP;
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP;
let Defs = [R14, R15, R28, R29, R30, R31, PC] in {
- def RESTORE_DEALLOC_RET_JMP_V4_PIC : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_PIC : T_JMP;
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC : T_JMP;
}
}
@@ -416,33 +395,38 @@ let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in {
def SAVE_REGISTERS_CALL_V4STK_EXT_PIC : T_Call<"">, PredRel;
}
-// Vector load/store pseudos
+// Vector store pseudos
+let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
+ mayStore = 1, hasSideEffects = 0 in
+class STrivv_template<RegisterClass RC, InstHexagon rootInst>
+ : InstHexagon<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src),
+ "", [], "", rootInst.Itinerary, rootInst.Type>;
-let isPseudo = 1, isCodeGenOnly = 1 in
-class STrivv_template<RegisterClass RC>
- : V6_STInst<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src), "", []>;
-
-def PS_vstorerw_ai: STrivv_template<VecDblRegs>,
- Requires<[HasV60T,UseHVXSgl]>;
-def PS_vstorerwu_ai: STrivv_template<VecDblRegs>,
+def PS_vstorerw_ai: STrivv_template<VecDblRegs, V6_vS32b_ai>,
Requires<[HasV60T,UseHVXSgl]>;
-def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B>,
- Requires<[HasV60T,UseHVXDbl]>;
-def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B>,
+def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
+def PS_vstorerwu_ai: STrivv_template<VecDblRegs, V6_vS32Ub_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32Ub_ai_128B>,
+ Requires<[HasV60T,UseHVXDbl]>;
-let isPseudo = 1, isCodeGenOnly = 1 in
-class LDrivv_template<RegisterClass RC>
- : V6_LDInst<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off), "", []>;
+// Vector load pseudos
+let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
+ mayLoad = 1, hasSideEffects = 0 in
+class LDrivv_template<RegisterClass RC, InstHexagon rootInst>
+ : InstHexagon<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off),
+ "", [], "", rootInst.Itinerary, rootInst.Type>;
-def PS_vloadrw_ai: LDrivv_template<VecDblRegs>,
- Requires<[HasV60T,UseHVXSgl]>;
-def PS_vloadrwu_ai: LDrivv_template<VecDblRegs>,
+def PS_vloadrw_ai: LDrivv_template<VecDblRegs, V6_vL32b_ai>,
Requires<[HasV60T,UseHVXSgl]>;
-def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B>,
+def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
-def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B>,
+
+def PS_vloadrwu_ai: LDrivv_template<VecDblRegs, V6_vL32Ub_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32Ub_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
// Store vector predicate pseudo.
@@ -469,25 +453,23 @@ let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
Requires<[HasV60T,UseHVXDbl]>;
}
-class VSELInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VA_DV,
- IType type = TypeCVI_VA_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
-
-let isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in {
- def PS_vselect: VSELInst<(outs VectorRegs:$dst),
- (ins PredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3), "", []>,
- Requires<[HasV60T,UseHVXSgl]>;
- def PS_vselect_128B: VSELInst<(outs VectorRegs128B:$dst),
- (ins PredRegs:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
- "", []>, Requires<[HasV60T,UseHVXDbl]>;
- def PS_wselect: VSELInst<(outs VecDblRegs:$dst),
- (ins PredRegs:$src1, VecDblRegs:$src2, VecDblRegs:$src3), "", []>,
- Requires<[HasV60T,UseHVXSgl]>;
- def PS_wselect_128B: VSELInst<(outs VecDblRegs128B:$dst),
- (ins PredRegs:$src1, VecDblRegs128B:$src2, VecDblRegs128B:$src3),
- "", []>, Requires<[HasV60T,UseHVXDbl]>;
-}
+let isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
+class VSELInst<dag outs, dag ins, InstHexagon rootInst>
+ : InstHexagon<outs, ins, "", [], "", rootInst.Itinerary, rootInst.Type>;
+
+def PS_vselect: VSELInst<(outs VectorRegs:$dst),
+ (ins PredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
+ V6_vcmov>, Requires<[HasV60T,UseHVXSgl]>;
+def PS_vselect_128B: VSELInst<(outs VectorRegs128B:$dst),
+ (ins PredRegs:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
+ V6_vcmov>, Requires<[HasV60T,UseHVXDbl]>;
+
+def PS_wselect: VSELInst<(outs VecDblRegs:$dst),
+ (ins PredRegs:$src1, VecDblRegs:$src2, VecDblRegs:$src3),
+ V6_vccombine>, Requires<[HasV60T,UseHVXSgl]>;
+def PS_wselect_128B: VSELInst<(outs VecDblRegs128B:$dst),
+ (ins PredRegs:$src1, VecDblRegs128B:$src2, VecDblRegs128B:$src3),
+ V6_vccombine>, Requires<[HasV60T,UseHVXDbl]>;
// Store predicate.
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
@@ -504,8 +486,10 @@ def STriw_mod : STInst<(outs),
let isExtendable = 1, opExtendable = 1, opExtentBits = 6,
isAsmParserOnly = 1 in
-def TFRI64_V4 : ALU64_rr<(outs DoubleRegs:$dst), (ins u64_0Imm:$src1),
- "$dst = #$src1">;
+def TFRI64_V4 : InstHexagon<(outs DoubleRegs:$dst),
+ (ins u64_0Imm:$src1),
+ "$dst = #$src1", [], "",
+ A2_combineii.Itinerary, TypeALU32_2op>, OpcodeHexagon;
// Hexagon doesn't have a vector multiply with C semantics.
// Instead, generate a pseudo instruction that gets expaneded into two
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td
index 2519b7c40062..45dbb3a6d218 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -122,12 +122,6 @@ let Namespace = "Hexagon" in {
def P2 : Rp<2, "p2">, DwarfRegNum<[65]>;
def P3 : Rp<3, "p3">, DwarfRegNum<[66]>;
- // Modifier registers.
- // C6 and C7 can also be M0 and M1, but register names must be unique, even
- // if belonging to different register classes.
- def M0 : Mx<0, "m0">, DwarfRegNum<[72]>;
- def M1 : Mx<1, "m1">, DwarfRegNum<[73]>;
-
// Fake register to represent USR.OVF bit. Artihmetic/saturating instruc-
// tions modify this bit, and multiple such instructions are allowed in the
// same packet. We need to ignore output dependencies on this bit, but not
@@ -149,8 +143,8 @@ let Namespace = "Hexagon" in {
// When defining more Cn registers, make sure to explicitly mark them
// as reserved in HexagonRegisterInfo.cpp.
def C5: Rc<5, "c5", ["c5"]>, DwarfRegNum<[72]>;
- def C6: Rc<6, "c6", [], [M0]>, DwarfRegNum<[73]>;
- def C7: Rc<7, "c7", [], [M1]>, DwarfRegNum<[74]>;
+ def M0: Rc<6, "m0", ["c6"]>, DwarfRegNum<[73]>;
+ def M1: Rc<7, "m1", ["c7"]>, DwarfRegNum<[74]>;
// Define C8 separately and make it aliased with USR.
// The problem is that USR has subregisters (e.g. overflow). If USR was
// specified as a subregister of C9_8, it would imply that subreg_overflow
@@ -177,7 +171,7 @@ let Namespace = "Hexagon" in {
def C1_0: Rcc<0, "c1:0", [SA0, LC0], ["lc0:sa0"]>, DwarfRegNum<[67]>;
def C3_2: Rcc<2, "c3:2", [SA1, LC1], ["lc1:sa1"]>, DwarfRegNum<[69]>;
def C5_4: Rcc<4, "c5:4", [P3_0, C5]>, DwarfRegNum<[71]>;
- def C7_6: Rcc<6, "c7:6", [C6, C7], ["m1:0"]>, DwarfRegNum<[72]>;
+ def C7_6: Rcc<6, "c7:6", [M0, M1], ["m1:0"]>, DwarfRegNum<[72]>;
// Use C8 instead of USR as a subregister of C9_8.
def C9_8: Rcc<8, "c9:8", [C8, PC]>, DwarfRegNum<[74]>;
def C11_10: Rcc<10, "c11:10", [UGP, GP]>, DwarfRegNum<[76]>;
@@ -280,8 +274,8 @@ def ModRegs : RegisterClass<"Hexagon", [i32], 32, (add M0, M1)>;
let Size = 32, isAllocatable = 0 in
def CtrRegs : RegisterClass<"Hexagon", [i32], 32,
- (add LC0, SA0, LC1, SA1, P3_0, C5, C6, C7,
- C8, PC, UGP, GP, CS0, CS1, UPCYCLELO, UPCYCLEHI,
+ (add LC0, SA0, LC1, SA1, P3_0, C5, C8, PC, UGP, GP, CS0, CS1,
+ UPCYCLELO, UPCYCLEHI,
FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI, UTIMERLO, UTIMERHI,
M0, M1, USR)>;
diff --git a/lib/Target/Hexagon/HexagonSchedule.td b/lib/Target/Hexagon/HexagonSchedule.td
index 9b5fbea04d18..ffee03e72639 100644
--- a/lib/Target/Hexagon/HexagonSchedule.td
+++ b/lib/Target/Hexagon/HexagonSchedule.td
@@ -7,6 +7,55 @@
//
//===----------------------------------------------------------------------===//
+def Hex_FWD : Bypass;
+def HVX_FWD : Bypass;
+
+// Functional Units.
+def SLOT0 : FuncUnit;
+def SLOT1 : FuncUnit;
+def SLOT2 : FuncUnit;
+def SLOT3 : FuncUnit;
+// Endloop is a pseudo instruction that is encoded with 2 bits in a packet
+// rather than taking an execution slot. This special unit is needed
+// to schedule an ENDLOOP with 4 other instructions.
+def SLOT_ENDLOOP: FuncUnit;
+
+// CVI pipes from the "Hexagon Multimedia Co-Processor Extensions Arch Spec".
+def CVI_ST : FuncUnit;
+def CVI_XLANE : FuncUnit;
+def CVI_SHIFT : FuncUnit;
+def CVI_MPY0 : FuncUnit;
+def CVI_MPY1 : FuncUnit;
+def CVI_LD : FuncUnit;
+
+// Combined functional units.
+def CVI_XLSHF : FuncUnit;
+def CVI_MPY01 : FuncUnit;
+def CVI_ALL : FuncUnit;
+def CVI_ALL_NOMEM : FuncUnit;
+
+// Combined functional unit data.
+def HexagonComboFuncsV60 :
+ ComboFuncUnits<[
+ ComboFuncData<CVI_XLSHF , [CVI_XLANE, CVI_SHIFT]>,
+ ComboFuncData<CVI_MPY01 , [CVI_MPY0, CVI_MPY1]>,
+ ComboFuncData<CVI_ALL , [CVI_ST, CVI_XLANE, CVI_SHIFT,
+ CVI_MPY0, CVI_MPY1, CVI_LD]>,
+ ComboFuncData<CVI_ALL_NOMEM, [CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1]>
+ ]>;
+
+// Itinerary classes.
+def PSEUDO : InstrItinClass;
+def PSEUDOM : InstrItinClass;
+def DUPLEX : InstrItinClass;
+def tc_ENDLOOP : InstrItinClass;
+
+//===----------------------------------------------------------------------===//
+// Auto-generated itinerary classes
+//===----------------------------------------------------------------------===//
+include "HexagonDepIICScalar.td"
+include "HexagonDepIICHVX.td"
+
//===----------------------------------------------------------------------===//
// V4 Machine Info +
//===----------------------------------------------------------------------===//
@@ -20,9 +69,9 @@ include "HexagonScheduleV55.td"
// V60 Machine Info -
//===----------------------------------------------------------------------===//
-include "HexagonScheduleV60.td"
include "HexagonIICScalar.td"
include "HexagonIICHVX.td"
+include "HexagonScheduleV60.td"
//===----------------------------------------------------------------------===//
// V62 Machine Info +
diff --git a/lib/Target/Hexagon/HexagonScheduleV4.td b/lib/Target/Hexagon/HexagonScheduleV4.td
index 880cc0a02b6a..69b704a805b8 100644
--- a/lib/Target/Hexagon/HexagonScheduleV4.td
+++ b/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -7,200 +7,31 @@
//
//===----------------------------------------------------------------------===//
-// There are four SLOTS (four parallel pipelines) in Hexagon V4 machine.
-// This file describes that machine information.
-
-//
-// |===========|==================================================|
-// | PIPELINE | Instruction Classes |
-// |===========|==================================================|
-// | SLOT0 | LD ST ALU32 MEMOP NV SYSTEM |
-// |-----------|--------------------------------------------------|
-// | SLOT1 | LD ST ALU32 |
-// |-----------|--------------------------------------------------|
-// | SLOT2 | XTYPE ALU32 J JR |
-// |-----------|--------------------------------------------------|
-// | SLOT3 | XTYPE ALU32 J CR |
-// |===========|==================================================|
-
-// Functional Units.
-def SLOT0 : FuncUnit;
-def SLOT1 : FuncUnit;
-def SLOT2 : FuncUnit;
-def SLOT3 : FuncUnit;
-// Endloop is a pseudo instruction that is encoded with 2 bits in a packet
-// rather than taking an execution slot. This special unit is needed
-// to schedule an ENDLOOP with 4 other instructions.
-def SLOT_ENDLOOP: FuncUnit;
-
-// Itinerary classes.
-def PSEUDO : InstrItinClass;
-def PSEUDOM : InstrItinClass;
-// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
-def DUPLEX : InstrItinClass;
-def PREFIX : InstrItinClass;
-def COMPOUND_CJ_ARCHDEPSLOT : InstrItinClass;
-def COMPOUND : InstrItinClass;
+def LD_tc_ld_SLOT01 : InstrItinClass;
+def ST_tc_st_SLOT01 : InstrItinClass;
+
+class HexagonV4PseudoItin {
+ list<InstrItinData> V4PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>]>
+ ];
+}
-def ALU32_2op_tc_1_SLOT0123 : InstrItinClass;
-def ALU32_2op_tc_2early_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_2early_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_1_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_2_SLOT0123 : InstrItinClass;
-def ALU32_ADDI_tc_1_SLOT0123 : InstrItinClass;
-def ALU64_tc_1_SLOT23 : InstrItinClass;
-def ALU64_tc_2_SLOT23 : InstrItinClass;
-def ALU64_tc_2early_SLOT23 : InstrItinClass;
-def ALU64_tc_3x_SLOT23 : InstrItinClass;
-def CR_tc_2_SLOT3 : InstrItinClass;
-def CR_tc_2early_SLOT23 : InstrItinClass;
-def CR_tc_2early_SLOT3 : InstrItinClass;
-def CR_tc_3x_SLOT23 : InstrItinClass;
-def CR_tc_3x_SLOT3 : InstrItinClass;
-def J_tc_2early_SLOT23 : InstrItinClass;
-def J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT : InstrItinClass;
-def J_tc_2early_SLOT2 : InstrItinClass;
-def LD_tc_ld_SLOT01 : InstrItinClass;
-def LD_tc_ld_pi_SLOT01 : InstrItinClass;
-def LD_tc_ld_SLOT0 : InstrItinClass;
-def LD_tc_3or4stall_SLOT0 : InstrItinClass;
-def M_tc_2_SLOT23 : InstrItinClass;
-def M_tc_2_acc_SLOT23 : InstrItinClass;
-def M_tc_3_SLOT23 : InstrItinClass;
-def M_tc_1_SLOT23 : InstrItinClass;
-def M_tc_3x_SLOT23 : InstrItinClass;
-def M_tc_3x_acc_SLOT23 : InstrItinClass;
-def M_tc_3or4x_SLOT23 : InstrItinClass;
-def M_tc_3or4x_acc_SLOT23 : InstrItinClass;
-def ST_tc_st_SLOT01 : InstrItinClass;
-def ST_tc_st_pi_SLOT01 : InstrItinClass;
-def ST_tc_st_SLOT0 : InstrItinClass;
-def ST_tc_st_pi_SLOT0 : InstrItinClass;
-def ST_tc_ld_SLOT0 : InstrItinClass;
-def ST_tc_3stall_SLOT0 : InstrItinClass;
-def S_2op_tc_1_SLOT23 : InstrItinClass;
-def S_2op_tc_2_SLOT23 : InstrItinClass;
-def S_2op_tc_2early_SLOT23 : InstrItinClass;
-def S_2op_tc_3or4x_SLOT23 : InstrItinClass;
-def S_3op_tc_1_SLOT23 : InstrItinClass;
-def S_3op_tc_2_SLOT23 : InstrItinClass;
-def S_3op_tc_2early_SLOT23 : InstrItinClass;
-def S_3op_tc_3_SLOT23 : InstrItinClass;
-def S_3op_tc_3x_SLOT23 : InstrItinClass;
-def NCJ_tc_3or4stall_SLOT0 : InstrItinClass;
-def V2LDST_tc_ld_SLOT01 : InstrItinClass;
-def V2LDST_tc_st_SLOT0 : InstrItinClass;
-def V2LDST_tc_st_SLOT01 : InstrItinClass;
-def V4LDST_tc_ld_SLOT01 : InstrItinClass;
-def V4LDST_tc_st_SLOT0 : InstrItinClass;
-def V4LDST_tc_st_SLOT01 : InstrItinClass;
-def J_tc_2early_SLOT0123 : InstrItinClass;
-def EXTENDER_tc_1_SLOT0123 : InstrItinClass;
-def S_3op_tc_3stall_SLOT23 : InstrItinClass;
+def HexagonV4ItinList : DepScalarItinV4, HexagonV4PseudoItin {
+ list<InstrItinData> V4Itin_list = [
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>
+ ];
+ list<InstrItinData> ItinList =
+ !listconcat(V4Itin_list, DepScalarItinV4_list, V4PseudoItin_list);
+}
def HexagonItinerariesV4 :
- ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>]>,
-
- // Jump (conditional/unconditional/return etc)
- // CR
- InstrItinData<CR_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // J
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>]>,
-
- //Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<LD_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // Store
- // ST
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_st_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- // ST0
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3stall_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // SYS
- InstrItinData<ST_tc_3stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // Mem ops - MEM_V4
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
-
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
-
- // ENDLOOP
- InstrItinData<J_tc_2early_SLOT0123 , [InstrStage<1, [SLOT_ENDLOOP]>]>,
-
- // Extender/PREFIX
- InstrItinData<EXTENDER_tc_1_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>]>
- ]>;
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP],
+ [Hex_FWD], HexagonV4ItinList.ItinList>;
def HexagonModelV4 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/lib/Target/Hexagon/HexagonScheduleV55.td b/lib/Target/Hexagon/HexagonScheduleV55.td
index 06cbcb16abb7..ca738be5d6ef 100644
--- a/lib/Target/Hexagon/HexagonScheduleV55.td
+++ b/lib/Target/Hexagon/HexagonScheduleV55.td
@@ -1,4 +1,4 @@
-//=-HexagonScheduleV4.td - HexagonV4 Scheduling Definitions --*- tablegen -*-=//
+//=-HexagonScheduleV55.td - HexagonV55 Scheduling Definitions -*- tablegen -*=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,190 +7,33 @@
//
//===----------------------------------------------------------------------===//
-// There are four SLOTS (four parallel pipelines) in Hexagon V4 machine.
-// This file describes that machine information.
-//
-// |===========|==================================================|
-// | PIPELINE | Instruction Classes |
-// |===========|==================================================|
-// | SLOT0 | LD ST ALU32 MEMOP NV SYSTEM |
-// |-----------|--------------------------------------------------|
-// | SLOT1 | LD ST ALU32 |
-// |-----------|--------------------------------------------------|
-// | SLOT2 | XTYPE ALU32 J JR |
-// |-----------|--------------------------------------------------|
-// | SLOT3 | XTYPE ALU32 J CR |
-// |===========|==================================================|
+class HexagonV55PseudoItin {
+ list<InstrItinData> V55PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>], [2]>
+ ];
+}
-def CJ_tc_1_SLOT23 : InstrItinClass;
-def CJ_tc_2early_SLOT23 : InstrItinClass;
-def COPROC_VMEM_vtc_long_SLOT01 : InstrItinClass;
-def COPROC_VX_vtc_long_SLOT23 : InstrItinClass;
-def COPROC_VX_vtc_SLOT23 : InstrItinClass;
-def J_tc_3stall_SLOT2 : InstrItinClass;
-def MAPPING_tc_1_SLOT0123 : InstrItinClass;
-def M_tc_3stall_SLOT23 : InstrItinClass;
+def HexagonV55ItinList : DepScalarItinV55,
+ HexagonV55PseudoItin {
+ list<InstrItinData> V55Itin_list = [
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>], [2, 1]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1]>
+ ];
+ list<InstrItinData> ItinList =
+ !listconcat(V55Itin_list, DepScalarItinV55_list,
+ V55PseudoItin_list);
+}
def HexagonItinerariesV55 :
- ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>], [3, 1, 1]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT,
- [InstrStage<1, [SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>], [2, 1, 1]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<1, [SLOT2]>], [3, 1, 1]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>],
- [3, 1, 1, 1]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<1, [SLOT_ENDLOOP]>],
- [2]>,
-
- // Vector
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23 ,
- [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_SLOT23 ,
- [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
-
- // Misc
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>
- ]>;
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP],
+ [Hex_FWD], HexagonV55ItinList.ItinList>;
def HexagonModelV55 : SchedMachineModel {
// Max issue per cycle == bundle width.
@@ -201,5 +44,5 @@ def HexagonModelV55 : SchedMachineModel {
}
//===----------------------------------------------------------------------===//
-// Hexagon V4 Resource Definitions -
+// Hexagon V55 Resource Definitions -
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonScheduleV60.td b/lib/Target/Hexagon/HexagonScheduleV60.td
index 63784710f52b..a2544c92a72c 100644
--- a/lib/Target/Hexagon/HexagonScheduleV60.td
+++ b/lib/Target/Hexagon/HexagonScheduleV60.td
@@ -7,61 +7,6 @@
//
//===----------------------------------------------------------------------===//
-// CVI pipes from the "Hexagon Multimedia Co-Processor Extensions Arch Spec".
-def CVI_ST : FuncUnit;
-def CVI_XLANE : FuncUnit;
-def CVI_SHIFT : FuncUnit;
-def CVI_MPY0 : FuncUnit;
-def CVI_MPY1 : FuncUnit;
-def CVI_LD : FuncUnit;
-
-// Combined functional units.
-def CVI_XLSHF : FuncUnit;
-def CVI_MPY01 : FuncUnit;
-def CVI_ALL : FuncUnit;
-def CVI_XLMPY0 : FuncUnit;
-def CVI_SHFMPY1: FuncUnit;
-
-// Combined functional unit data.
-def HexagonComboFuncsV60 :
- ComboFuncUnits<[
- ComboFuncData<CVI_XLSHF , [CVI_XLANE, CVI_SHIFT]>,
- ComboFuncData<CVI_MPY01 , [CVI_MPY0, CVI_MPY1]>,
- ComboFuncData<CVI_ALL , [CVI_ST, CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1, CVI_LD]>,
- ComboFuncData<CVI_XLMPY0 , [CVI_XLANE, CVI_MPY0]>,
- ComboFuncData<CVI_SHFMPY1 , [CVI_SHIFT, CVI_MPY1]>
- ]>;
-
-// Note: When adding additional vector scheduling classes, add the
-// corresponding methods to the class HexagonInstrInfo.
-def CVI_VA : InstrItinClass;
-def CVI_VA_DV : InstrItinClass;
-def CVI_VX_LONG : InstrItinClass;
-def CVI_VX_LATE : InstrItinClass;
-def CVI_VX : InstrItinClass;
-def CVI_VX_DV_LONG : InstrItinClass;
-def CVI_VX_DV : InstrItinClass;
-def CVI_VX_DV_SLOT2 : InstrItinClass;
-def CVI_VX_DV_SLOT2_LONG_EARLY : InstrItinClass;
-def CVI_VP : InstrItinClass;
-def CVI_VP_LONG : InstrItinClass;
-def CVI_VP_VS_EARLY : InstrItinClass;
-def CVI_VP_VS_LONG_EARLY : InstrItinClass;
-def CVI_VP_VS_LONG : InstrItinClass;
-def CVI_VP_VS : InstrItinClass;
-def CVI_VP_DV : InstrItinClass;
-def CVI_VS : InstrItinClass;
-def CVI_VINLANESAT : InstrItinClass;
-def CVI_VM_LD : InstrItinClass;
-def CVI_VM_TMP_LD : InstrItinClass;
-def CVI_VM_CUR_LD : InstrItinClass;
-def CVI_VM_VP_LDU : InstrItinClass;
-def CVI_VM_ST : InstrItinClass;
-def CVI_VM_NEW_ST : InstrItinClass;
-def CVI_VM_STU : InstrItinClass;
-def CVI_HIST : InstrItinClass;
-def CVI_VA_EXT : InstrItinClass;
// There are four SLOTS (four parallel pipelines) in Hexagon V60 machine.
// This file describes that machine information.
@@ -108,196 +53,20 @@ def CVI_VA_EXT : InstrItinClass;
// S0123| CVI_VA_EXT Extract |
// |=====================================================================|
+def HexagonV60ItinList : DepScalarItinV60, ScalarItin,
+ DepHVXItinV60,
+ HVXItin, PseudoItin {
+ list<InstrItinData> ItinList =
+ !listconcat(DepScalarItinV60_list, ScalarItin_list,
+ DepHVXItinV60_list, HVXItin_list, PseudoItin_list);
+}
+
def HexagonItinerariesV60 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP,
CVI_ST, CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1,
- CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<3, [SLOT3]>]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<2, [SLOT2]>]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<3, [SLOT2]>]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123, [InstrStage<1,
- [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<4, [SLOT0]>]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- // The S_2op_tc_3x_SLOT23 slots are 4 cycles on v60.
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<4, [SLOT0]>]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<2, [SLOT0, SLOT1]>]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<2, [SLOT_ENDLOOP]>]>,
-
- // Vector
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<COPROC_VX_vtc_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // Duplex and Compound
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // Misc
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDOM , [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // Latest CVI spec definitions.
- InstrItinData<CVI_VA,[InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE,CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VA_DV,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF, CVI_MPY01]>]>,
- InstrItinData<CVI_VX_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX_LATE, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX,[InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX_DV_LONG,
- [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VX_DV,
- [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VX_DV_SLOT2,
- [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VP, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VP_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VP_VS_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS_LONG,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS_LONG_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_DV , [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VS,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>]>,
- InstrItinData<CVI_VINLANESAT,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>]>,
- InstrItinData<CVI_VM_LD , [InstrStage<1, [SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_TMP_LD,[InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD]>]>,
- InstrItinData<CVI_VM_CUR_LD,[InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_VP_LDU,[InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VM_ST , [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_NEW_ST,[InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [CVI_ST]>]>,
- InstrItinData<CVI_VM_STU , [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_HIST , [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_ALL]>]>
- ]>;
+ CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL,
+ CVI_ALL_NOMEM],
+ [Hex_FWD, HVX_FWD], HexagonV60ItinList.ItinList>;
def HexagonModelV60 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/lib/Target/Hexagon/HexagonScheduleV62.td b/lib/Target/Hexagon/HexagonScheduleV62.td
index 0758788a600b..a0a8595f185f 100644
--- a/lib/Target/Hexagon/HexagonScheduleV62.td
+++ b/lib/Target/Hexagon/HexagonScheduleV62.td
@@ -6,115 +6,23 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
+// ScalarItin contains some old itineraries still used by a
+// handful of instructions. Hopefully, we will be able to get rid of them soon.
-// V62 follows the same schedule as V60 with following exceptions:
-// Following instructions are permissible on any slot on V62:
-// V4_J4_cmpeq_fp0_jump_nt
-// V4_J4_cmpeq_fp0_jump_t
-// V4_J4_cmpeq_fp1_jump_nt
-// V4_J4_cmpeq_fp1_jump_t
-// V4_J4_cmpeq_tp0_jump_nt
-// V4_J4_cmpeq_tp0_jump_t
-// V4_J4_cmpeq_tp1_jump_nt
-// V4_J4_cmpeq_tp1_jump_t
-// V4_J4_cmpeqi_fp0_jump_nt
-// V4_J4_cmpeqi_fp0_jump_t
-// V4_J4_cmpeqi_fp1_jump_nt
-// V4_J4_cmpeqi_fp1_jump_t
-// V4_J4_cmpeqi_tp0_jump_nt
-// V4_J4_cmpeqi_tp0_jump_t
-// V4_J4_cmpeqi_tp1_jump_nt
-// V4_J4_cmpeqi_tp1_jump_t
-// V4_J4_cmpeqn1_fp0_jump_nt
-// V4_J4_cmpeqn1_fp0_jump_t
-// V4_J4_cmpeqn1_fp1_jump_nt
-// V4_J4_cmpeqn1_fp1_jump_t
-// V4_J4_cmpeqn1_tp0_jump_nt
-// V4_J4_cmpeqn1_tp0_jump_t
-// V4_J4_cmpeqn1_tp1_jump_nt
-// V4_J4_cmpeqn1_tp1_jump_t
-// V4_J4_cmpgt_fp0_jump_nt
-// V4_J4_cmpgt_fp0_jump_t
-// V4_J4_cmpgt_fp1_jump_nt
-// V4_J4_cmpgt_fp1_jump_t
-// V4_J4_cmpgt_tp0_jump_nt
-// V4_J4_cmpgt_tp0_jump_t
-// V4_J4_cmpgt_tp1_jump_nt
-// V4_J4_cmpgt_tp1_jump_t
-// V4_J4_cmpgti_fp0_jump_nt
-// V4_J4_cmpgti_fp0_jump_t
-// V4_J4_cmpgti_fp1_jump_nt
-// V4_J4_cmpgti_fp1_jump_t
-// V4_J4_cmpgti_tp0_jump_nt
-// V4_J4_cmpgti_tp0_jump_t
-// V4_J4_cmpgti_tp1_jump_nt
-// V4_J4_cmpgti_tp1_jump_t
-// V4_J4_cmpgtn1_fp0_jump_nt
-// V4_J4_cmpgtn1_fp0_jump_t
-// V4_J4_cmpgtn1_fp1_jump_nt
-// V4_J4_cmpgtn1_fp1_jump_t
-// V4_J4_cmpgtn1_tp0_jump_nt
-// V4_J4_cmpgtn1_tp0_jump_t
-// V4_J4_cmpgtn1_tp1_jump_nt
-// V4_J4_cmpgtn1_tp1_jump_t
-// V4_J4_cmpgtu_fp0_jump_nt
-// V4_J4_cmpgtu_fp0_jump_t
-// V4_J4_cmpgtu_fp1_jump_nt
-// V4_J4_cmpgtu_fp1_jump_t
-// V4_J4_cmpgtu_tp0_jump_nt
-// V4_J4_cmpgtu_tp0_jump_t
-// V4_J4_cmpgtu_tp1_jump_nt
-// V4_J4_cmpgtu_tp1_jump_t
-// V4_J4_cmpgtui_fp0_jump_nt
-// V4_J4_cmpgtui_fp0_jump_t
-// V4_J4_cmpgtui_fp1_jump_nt
-// V4_J4_cmpgtui_fp1_jump_t
-// V4_J4_cmpgtui_tp0_jump_nt
-// V4_J4_cmpgtui_tp0_jump_t
-// V4_J4_cmpgtui_tp1_jump_nt
-// V4_J4_cmpgtui_tp1_jump_t
-// V4_J4_tstbit0_fp0_jump_nt
-// V4_J4_tstbit0_fp0_jump_t
-// V4_J4_tstbit0_fp1_jump_nt
-// V4_J4_tstbit0_fp1_jump_t
-// V4_J4_tstbit0_tp0_jump_nt
-// V4_J4_tstbit0_tp0_jump_t
-// V4_J4_tstbit0_tp1_jump_nt
-// V4_J4_tstbit0_tp1_jump_t
-// JMP
-// JMPEXT
-// JMPEXT_f
-// JMPEXT_fnew_nt
-// JMPEXT_fnew_t
-// JMPEXT_t
-// JMPEXT_tnew_nt
-// JMPEXT_tnew_t
-// JMPNOTEXT
-// JMPNOTEXT_f
-// JMPNOTEXT_fnew_nt
-// JMPNOTEXT_fnew_t
-// JMPNOTEXT_t
-// JMPNOTEXT_tnew_nt
-// JMPNOTEXT_tnew_t
-// JMP_f
-// JMP_fnew_nt
-// JMP_fnew_t
-// JMP_t
-// JMP_tnew_nt
-// JMP_tnew_t
-// RESTORE_DEALLOC_RET_JMP_V4
-// RESTORE_DEALLOC_RET_JMP_V4_EXT
-
-def HexagonV62ItinList : ScalarItin, HVXV62Itin {
+def HexagonV62ItinList : DepScalarItinV62, ScalarItin,
+ DepHVXItinV62, HVXItin, PseudoItin {
list<InstrItinData> ItinList =
- !listconcat(ScalarItin_list, HVXV62Itin_list);
+ !listconcat(DepScalarItinV62_list, ScalarItin_list,
+ DepHVXItinV62_list, HVXItin_list, PseudoItin_list);
}
def HexagonItinerariesV62 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP,
CVI_ST, CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1,
- CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL],
- [], HexagonV62ItinList.ItinList>;
+ CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL,
+ CVI_ALL_NOMEM],
+ [Hex_FWD, HVX_FWD], HexagonV62ItinList.ItinList>;
def HexagonModelV62 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp
index 033b93fc910a..8851a23ae8ac 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -73,6 +73,10 @@ static cl::opt<bool> OverrideLongCalls("hexagon-long-calls",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("If present, forces/disables the use of long calls"));
+static cl::opt<bool> EnablePredicatedCalls("hexagon-pred-calls",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Consider calls to be predicable"));
+
void HexagonSubtarget::initializeEnvironment() {
UseMemOps = false;
ModeIEEERndNear = false;
@@ -139,6 +143,59 @@ HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
UseBSBScheduling = hasV60TOps() && EnableBSBSched;
}
+/// \brief Perform target specific adjustments to the latency of a schedule
+/// dependency.
+void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
+ SDep &Dep) const {
+ MachineInstr *SrcInst = Src->getInstr();
+ MachineInstr *DstInst = Dst->getInstr();
+ if (!Src->isInstr() || !Dst->isInstr())
+ return;
+
+ const HexagonInstrInfo *QII = getInstrInfo();
+
+ // Instructions with .new operands have zero latency.
+ SmallSet<SUnit *, 4> ExclSrc;
+ SmallSet<SUnit *, 4> ExclDst;
+ if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
+ isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ if (!hasV60TOps())
+ return;
+
+ // If it's a REG_SEQUENCE, use its destination instruction to determine
+ // the correct latency.
+ if (DstInst->isRegSequence() && Dst->NumSuccs == 1) {
+ unsigned RSeqReg = DstInst->getOperand(0).getReg();
+ MachineInstr *RSeqDst = Dst->Succs[0].getSUnit()->getInstr();
+ unsigned UseIdx = -1;
+ for (unsigned OpNum = 0; OpNum < RSeqDst->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = RSeqDst->getOperand(OpNum);
+ if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == RSeqReg) {
+ UseIdx = OpNum;
+ break;
+ }
+ }
+ unsigned RSeqLatency = (InstrInfo.getOperandLatency(&InstrItins, *SrcInst,
+ 0, *RSeqDst, UseIdx));
+ Dep.setLatency(RSeqLatency);
+ }
+
+ // Try to schedule uses near definitions to generate .cur.
+ ExclSrc.clear();
+ ExclDst.clear();
+ if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
+ isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ updateLatency(*SrcInst, *DstInst, Dep);
+}
+
void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
for (auto &SU : DAG->SUnits) {
@@ -154,19 +211,19 @@ void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
for (auto &SU : DAG->SUnits) {
// Update the latency of chain edges between v60 vector load or store
- // instructions to be 1. These instructions cannot be scheduled in the
+ // instructions to be 1. These instruction cannot be scheduled in the
// same packet.
MachineInstr &MI1 = *SU.getInstr();
auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII);
bool IsStoreMI1 = MI1.mayStore();
bool IsLoadMI1 = MI1.mayLoad();
- if (!QII->isV60VectorInstruction(MI1) || !(IsStoreMI1 || IsLoadMI1))
+ if (!QII->isHVXVec(MI1) || !(IsStoreMI1 || IsLoadMI1))
continue;
for (auto &SI : SU.Succs) {
if (SI.getKind() != SDep::Order || SI.getLatency() != 0)
continue;
MachineInstr &MI2 = *SI.getSUnit()->getInstr();
- if (!QII->isV60VectorInstruction(MI2))
+ if (!QII->isHVXVec(MI2))
continue;
if ((IsStoreMI1 && MI2.mayStore()) || (IsLoadMI1 && MI2.mayLoad())) {
SI.setLatency(1);
@@ -204,69 +261,99 @@ bool HexagonSubtarget::enableMachineScheduler() const {
return true;
}
-bool HexagonSubtarget::enableSubRegLiveness() const {
- return EnableSubregLiveness;
+bool HexagonSubtarget::usePredicatedCalls() const {
+ return EnablePredicatedCalls;
}
-// This helper function is responsible for increasing the latency only.
void HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
MachineInstr &DstInst, SDep &Dep) const {
+ if (Dep.isArtificial()) {
+ Dep.setLatency(1);
+ return;
+ }
+
if (!hasV60TOps())
return;
auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo());
- if (EnableVecFrwdSched && QII.addLatencyToSchedule(SrcInst, DstInst)) {
- // Vec frwd scheduling.
- Dep.setLatency(Dep.getLatency() + 1);
- } else if (useBSBScheduling() &&
- QII.isLateInstrFeedsEarlyInstr(SrcInst, DstInst)) {
- // BSB scheduling.
- Dep.setLatency(Dep.getLatency() + 1);
- } else if (EnableTCLatencySched) {
- // TClass latency scheduling.
- // Check if SrcInst produces in 2C an operand of DstInst taken in stage 2B.
- if (QII.isTC1(SrcInst) || QII.isTC2(SrcInst))
- if (!QII.isTC1(DstInst) && !QII.isTC2(DstInst))
- Dep.setLatency(Dep.getLatency() + 1);
- }
+ // BSB scheduling.
+ if (QII.isHVXVec(SrcInst) || useBSBScheduling())
+ Dep.setLatency((Dep.getLatency() + 1) >> 1);
}
-/// If the SUnit has a zero latency edge, return the other SUnit.
-static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
- for (auto &I : Deps)
- if (I.isAssignedRegDep() && I.getLatency() == 0 &&
- !I.getSUnit()->getInstr()->isPseudo())
- return I.getSUnit();
- return nullptr;
+void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const {
+ MachineInstr *SrcI = Src->getInstr();
+ for (auto &I : Src->Succs) {
+ if (!I.isAssignedRegDep() || I.getSUnit() != Dst)
+ continue;
+ unsigned DepR = I.getReg();
+ int DefIdx = -1;
+ for (unsigned OpNum = 0; OpNum < SrcI->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = SrcI->getOperand(OpNum);
+ if (MO.isReg() && MO.isDef() && MO.getReg() == DepR)
+ DefIdx = OpNum;
+ }
+ assert(DefIdx >= 0 && "Def Reg not found in Src MI");
+ MachineInstr *DstI = Dst->getInstr();
+ for (unsigned OpNum = 0; OpNum < DstI->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = DstI->getOperand(OpNum);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == DepR) {
+ int Latency = (InstrInfo.getOperandLatency(&InstrItins, *SrcI,
+ DefIdx, *DstI, OpNum));
+
+ // For some instructions (ex: COPY), we might end up with < 0 latency
+ // as they don't have any Itinerary class associated with them.
+ if (Latency <= 0)
+ Latency = 1;
+
+ I.setLatency(Latency);
+ updateLatency(*SrcI, *DstI, I);
+ }
+ }
+
+ // Update the latency of opposite edge too.
+ for (auto &J : Dst->Preds) {
+ if (J.getSUnit() != Src)
+ continue;
+ J.setLatency(I.getLatency());
+ }
+ }
}
/// Change the latency between the two SUnits.
-void HexagonSubtarget::changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps,
- SUnit *Dst, unsigned Lat) const {
- MachineInstr &SrcI = *Src->getInstr();
- for (auto &I : Deps) {
+void HexagonSubtarget::changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat)
+ const {
+ for (auto &I : Src->Succs) {
if (I.getSUnit() != Dst)
continue;
+ SDep T = I;
I.setLatency(Lat);
- SUnit *UpdateDst = I.getSUnit();
- updateLatency(SrcI, *UpdateDst->getInstr(), I);
+
// Update the latency of opposite edge too.
- for (auto &PI : UpdateDst->Preds) {
- if (PI.getSUnit() != Src || !PI.isAssignedRegDep())
- continue;
- PI.setLatency(Lat);
- updateLatency(SrcI, *UpdateDst->getInstr(), PI);
- }
+ T.setSUnit(Src);
+ auto F = std::find(Dst->Preds.begin(), Dst->Preds.end(), T);
+ assert(F != Dst->Preds.end());
+ F->setLatency(I.getLatency());
}
}
+/// If the SUnit has a zero latency edge, return the other SUnit.
+static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
+ for (auto &I : Deps)
+ if (I.isAssignedRegDep() && I.getLatency() == 0 &&
+ !I.getSUnit()->getInstr()->isPseudo())
+ return I.getSUnit();
+ return nullptr;
+}
+
// Return true if these are the best two instructions to schedule
// together with a zero latency. Only one dependence should have a zero
// latency. If there are multiple choices, choose the best, and change
-// ther others, if needed.
+// the others, if needed.
bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
- const HexagonInstrInfo *TII) const {
+ const HexagonInstrInfo *TII, SmallSet<SUnit*, 4> &ExclSrc,
+ SmallSet<SUnit*, 4> &ExclDst) const {
MachineInstr &SrcInst = *Src->getInstr();
MachineInstr &DstInst = *Dst->getInstr();
@@ -277,6 +364,16 @@ bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
if (SrcInst.isPHI() || DstInst.isPHI())
return false;
+ if (!TII->isToBeScheduledASAP(SrcInst, DstInst) &&
+ !TII->canExecuteInBundle(SrcInst, DstInst))
+ return false;
+
+ // The architecture doesn't allow three dependent instructions in the same
+ // packet. So, if the destination has a zero latency successor, then it's
+ // not a candidate for a zero latency predecessor.
+ if (getZeroLatency(Dst, Dst->Succs) != nullptr)
+ return false;
+
// Check if the Dst instruction is the best candidate first.
SUnit *Best = nullptr;
SUnit *DstBest = nullptr;
@@ -290,98 +387,53 @@ bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
if (Best != Dst)
return false;
- // The caller frequents adds the same dependence twice. If so, then
+ // The caller frequently adds the same dependence twice. If so, then
// return true for this case too.
- if (Src == SrcBest && Dst == DstBest)
+ if ((Src == SrcBest && Dst == DstBest ) ||
+ (SrcBest == nullptr && Dst == DstBest) ||
+ (Src == SrcBest && Dst == nullptr))
return true;
// Reassign the latency for the previous bests, which requires setting
// the dependence edge in both directions.
- if (SrcBest != nullptr)
- changeLatency(SrcBest, SrcBest->Succs, Dst, 1);
- if (DstBest != nullptr)
- changeLatency(Src, Src->Succs, DstBest, 1);
- // If there is an edge from SrcBest to DstBst, then try to change that
- // to 0 now.
- if (SrcBest && DstBest)
- changeLatency(SrcBest, SrcBest->Succs, DstBest, 0);
-
- return true;
-}
-
-// Update the latency of a Phi when the Phi bridges two instructions that
-// require a multi-cycle latency.
-void HexagonSubtarget::changePhiLatency(MachineInstr &SrcInst, SUnit *Dst,
- SDep &Dep) const {
- if (!SrcInst.isPHI() || Dst->NumPreds == 0 || Dep.getLatency() != 0)
- return;
-
- for (const SDep &PI : Dst->Preds) {
- if (PI.getLatency() != 0)
- continue;
- Dep.setLatency(2);
- break;
- }
-}
-
-/// \brief Perform target specific adjustments to the latency of a schedule
-/// dependency.
-void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
- SDep &Dep) const {
- MachineInstr *SrcInst = Src->getInstr();
- MachineInstr *DstInst = Dst->getInstr();
- if (!Src->isInstr() || !Dst->isInstr())
- return;
-
- const HexagonInstrInfo *QII = static_cast<const HexagonInstrInfo *>(getInstrInfo());
-
- // Instructions with .new operands have zero latency.
- if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
- isBestZeroLatency(Src, Dst, QII)) {
- Dep.setLatency(0);
- return;
+ if (SrcBest != nullptr) {
+ if (!hasV60TOps())
+ changeLatency(SrcBest, Dst, 1);
+ else
+ restoreLatency(SrcBest, Dst);
}
-
- if (!hasV60TOps())
- return;
-
- // Don't adjust the latency of post-increment part of the instruction.
- if (QII->isPostIncrement(*SrcInst) && Dep.isAssignedRegDep()) {
- if (SrcInst->mayStore())
- return;
- if (Dep.getReg() != SrcInst->getOperand(0).getReg())
- return;
- } else if (QII->isPostIncrement(*DstInst) && Dep.getKind() == SDep::Anti) {
- if (DstInst->mayStore())
- return;
- if (Dep.getReg() != DstInst->getOperand(0).getReg())
- return;
- } else if (QII->isPostIncrement(*DstInst) && DstInst->mayStore() &&
- Dep.isAssignedRegDep()) {
- MachineOperand &Op = DstInst->getOperand(DstInst->getNumOperands() - 1);
- if (Op.isReg() && Dep.getReg() != Op.getReg())
- return;
- }
-
- // Check if we need to change any the latency values when Phis are added.
- if (useBSBScheduling() && SrcInst->isPHI()) {
- changePhiLatency(*SrcInst, Dst, Dep);
- return;
+ if (DstBest != nullptr) {
+ if (!hasV60TOps())
+ changeLatency(Src, DstBest, 1);
+ else
+ restoreLatency(Src, DstBest);
}
- // If it's a REG_SEQUENCE, use its destination instruction to determine
- // the correct latency.
- if (DstInst->isRegSequence() && Dst->NumSuccs == 1)
- DstInst = Dst->Succs[0].getSUnit()->getInstr();
-
- // Try to schedule uses near definitions to generate .cur.
- if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
- isBestZeroLatency(Src, Dst, QII)) {
- Dep.setLatency(0);
- return;
+ // Attempt to find another opprotunity for zero latency in a different
+ // dependence.
+ if (SrcBest && DstBest)
+ // If there is an edge from SrcBest to DstBst, then try to change that
+ // to 0 now.
+ changeLatency(SrcBest, DstBest, 0);
+ else if (DstBest) {
+ // Check if the previous best destination instruction has a new zero
+ // latency dependence opportunity.
+ ExclSrc.insert(Src);
+ for (auto &I : DstBest->Preds)
+ if (ExclSrc.count(I.getSUnit()) == 0 &&
+ isBestZeroLatency(I.getSUnit(), DstBest, TII, ExclSrc, ExclDst))
+ changeLatency(I.getSUnit(), DstBest, 0);
+ } else if (SrcBest) {
+ // Check if previous best source instruction has a new zero latency
+ // dependence opportunity.
+ ExclDst.insert(Dst);
+ for (auto &I : SrcBest->Succs)
+ if (ExclDst.count(I.getSUnit()) == 0 &&
+ isBestZeroLatency(SrcBest, I.getSUnit(), TII, ExclSrc, ExclDst))
+ changeLatency(SrcBest, I.getSUnit(), 0);
}
- updateLatency(*SrcInst, *DstInst, Dep);
+ return true;
}
unsigned HexagonSubtarget::getL1CacheLineSize() const {
@@ -392,3 +444,7 @@ unsigned HexagonSubtarget::getL1PrefetchDistance() const {
return 32;
}
+bool HexagonSubtarget::enableSubRegLiveness() const {
+ return EnableSubregLiveness;
+}
+
diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h
index 6a3e7f13be4c..4379efa79c9c 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/lib/Target/Hexagon/HexagonSubtarget.h
@@ -104,6 +104,7 @@ public:
bool useHVXDblOps() const { return UseHVXOps && UseHVXDblOps; }
bool useHVXSglOps() const { return UseHVXOps && !UseHVXDblOps; }
bool useLongCalls() const { return UseLongCalls; }
+ bool usePredicatedCalls() const;
bool useBSBScheduling() const { return UseBSBScheduling; }
bool enableMachineScheduler() const override;
@@ -146,11 +147,10 @@ private:
// Helper function responsible for increasing the latency only.
void updateLatency(MachineInstr &SrcInst, MachineInstr &DstInst, SDep &Dep)
const;
- void changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps, SUnit *Dst,
- unsigned Lat) const;
- bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII)
- const;
- void changePhiLatency(MachineInstr &SrcInst, SUnit *Dst, SDep &Dep) const;
+ void restoreLatency(SUnit *Src, SUnit *Dst) const;
+ void changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) const;
+ bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII,
+ SmallSet<SUnit*, 4> &ExclSrc, SmallSet<SUnit*, 4> &ExclDst) const;
};
} // end namespace llvm
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index bf1dce67bd0a..c21b6e2515d3 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -334,7 +334,7 @@ bool HexagonPacketizerList::isNewifiable(const MachineInstr &MI,
// Vector stores can be predicated, and can be new-value stores, but
// they cannot be predicated on a .new predicate value.
if (NewRC == &Hexagon::PredRegsRegClass)
- if (HII->isV60VectorInstruction(MI) && MI.mayStore())
+ if (HII->isHVXVec(MI) && MI.mayStore())
return false;
return HII->isCondInst(MI) || HII->isJumpR(MI) || MI.isReturn() ||
HII->mayBeNewStore(MI);
@@ -377,9 +377,9 @@ void HexagonPacketizerList::cleanUpDotCur() {
bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr &MI,
const SUnit *PacketSU, unsigned DepReg, MachineBasicBlock::iterator &MII,
const TargetRegisterClass *RC) {
- if (!HII->isV60VectorInstruction(MI))
+ if (!HII->isHVXVec(MI))
return false;
- if (!HII->isV60VectorInstruction(*MII))
+ if (!HII->isHVXVec(*MII))
return false;
// Already a dot new instruction.
@@ -1365,7 +1365,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// Data dpendence ok if we have load.cur.
if (DepType == SDep::Data && HII->isDotCurInst(J)) {
- if (HII->isV60VectorInstruction(I))
+ if (HII->isHVXVec(I))
continue;
}
@@ -1374,6 +1374,8 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
if (canPromoteToDotNew(I, SUJ, DepReg, II, RC)) {
if (promoteToDotNew(I, DepType, II, RC)) {
PromotedToDotNew = true;
+ if (cannotCoexist(I, J))
+ FoundSequentialDependence = true;
continue;
}
}
@@ -1418,26 +1420,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
DepType != SDep::Output)
continue;
- // Ignore output dependences due to superregs. We can write to two
- // different subregisters of R1:0 for instance in the same cycle.
-
- // If neither I nor J defines DepReg, then this is a superfluous output
- // dependence. The dependence must be of the form:
- // R0 = ...
- // R1 = ...
- // and there is an output dependence between the two instructions with
- // DepReg = D0.
- // We want to ignore these dependences. Ideally, the dependence
- // constructor should annotate such dependences. We can then avoid this
- // relatively expensive check.
- //
if (DepType == SDep::Output) {
- // DepReg is the register that's responsible for the dependence.
- unsigned DepReg = SUJ->Succs[i].getReg();
-
- // Check if I and J really defines DepReg.
- if (!I.definesRegister(DepReg) && !J.definesRegister(DepReg))
- continue;
FoundSequentialDependence = true;
break;
}
@@ -1553,10 +1536,9 @@ bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
MachineInstr &I = *SUI->getInstr();
MachineInstr &J = *SUJ->getInstr();
- if (cannotCoexist(I, J))
- return false;
+ bool Coexist = !cannotCoexist(I, J);
- if (!Dependence)
+ if (Coexist && !Dependence)
return true;
// Check if the instruction was promoted to a dot-new. If so, demote it
@@ -1659,21 +1641,6 @@ bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr &MI) {
}
-// Return true when ConsMI uses a register defined by ProdMI.
-static bool isDependent(const MachineInstr &ProdMI,
- const MachineInstr &ConsMI) {
- if (!ProdMI.getOperand(0).isReg())
- return false;
- unsigned DstReg = ProdMI.getOperand(0).getReg();
-
- for (auto &Op : ConsMI.operands())
- if (Op.isReg() && Op.isUse() && Op.getReg() == DstReg)
- // The MIs depend on each other.
- return true;
-
- return false;
-}
-
// V60 forward scheduling.
bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
// If the packet already stalls, then ignore the stall from a subsequent
@@ -1695,40 +1662,48 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
return false;
}
- // Check for stall between two vector instructions.
- if (HII->isV60VectorInstruction(I)) {
- for (auto J : OldPacketMIs) {
- if (!HII->isV60VectorInstruction(*J))
- continue;
- if (isDependent(*J, I) && !HII->isVecUsableNextPacket(*J, I))
- return true;
- }
+ SUnit *SUI = MIToSUnit[const_cast<MachineInstr *>(&I)];
- return false;
- }
+ // Check if the latency is 0 between this instruction and any instruction
+ // in the current packet. If so, we disregard any potential stalls due to
+ // the instructions in the previous packet. Most of the instruction pairs
+ // that can go together in the same packet have 0 latency between them.
+ // Only exceptions are newValueJumps as they're generated much later and
+ // the latencies can't be changed at that point. Another is .cur
+ // instructions if its consumer has a 0 latency successor (such as .new).
+ // In this case, the latency between .cur and the consumer stays non-zero
+ // even though we can have both .cur and .new in the same packet. Changing
+ // the latency to 0 is not an option as it causes software pipeliner to
+ // not pipeline in some cases.
+
+ // For Example:
+ // {
+ // I1: v6.cur = vmem(r0++#1)
+ // I2: v7 = valign(v6,v4,r2)
+ // I3: vmem(r5++#1) = v7.new
+ // }
+ // Here I2 and I3 has 0 cycle latency, but I1 and I2 has 2.
- // Check for stall between two scalar instructions. First, check that
- // there is no definition of a use in the current packet, because it
- // may be a candidate for .new.
- for (auto J : CurrentPacketMIs)
- if (!HII->isV60VectorInstruction(*J) && isDependent(*J, I))
- return false;
+ for (auto J : CurrentPacketMIs) {
+ SUnit *SUJ = MIToSUnit[J];
+ for (auto &Pred : SUI->Preds)
+ if (Pred.getSUnit() == SUJ &&
+ (Pred.getLatency() == 0 || HII->isNewValueJump(I) ||
+ HII->isToBeScheduledASAP(*J, I)))
+ return false;
+ }
- // Check for stall between I and instructions in the previous packet.
- if (MF.getSubtarget<HexagonSubtarget>().useBSBScheduling()) {
- for (auto J : OldPacketMIs) {
- if (HII->isV60VectorInstruction(*J))
- continue;
- if (!HII->isLateInstrFeedsEarlyInstr(*J, I))
- continue;
- if (isDependent(*J, I) && !HII->canExecuteInBundle(*J, I))
+ // Check if the latency is greater than one between this instruction and any
+ // instruction in the previous packet.
+ for (auto J : OldPacketMIs) {
+ SUnit *SUJ = MIToSUnit[J];
+ for (auto &Pred : SUI->Preds)
+ if (Pred.getSUnit() == SUJ && Pred.getLatency() > 1)
return true;
- }
}
// Check if the latency is greater than one between this instruction and any
// instruction in the previous packet.
- SUnit *SUI = MIToSUnit[const_cast<MachineInstr *>(&I)];
for (auto J : OldPacketMIs) {
SUnit *SUJ = MIToSUnit[J];
for (auto &Pred : SUI->Preds)
@@ -1739,7 +1714,6 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
return false;
}
-
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index adb546dc2140..d8009c5da08e 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -29,7 +29,7 @@ namespace llvm {
///
namespace HexagonII {
unsigned const TypeCVI_FIRST = TypeCVI_HIST;
- unsigned const TypeCVI_LAST = TypeCVI_VX_DV;
+ unsigned const TypeCVI_LAST = TypeCVI_VX_LATE;
enum SubTarget {
HasV4SubT = 0x3f,
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index dfb5f4cc8260..70410ff03a64 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -788,14 +788,6 @@ HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
if (HexagonMCInstrInfo::isSubInstruction(MI) ||
llvm::HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCJ)
return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg);
- switch(MI.getOpcode()){
- case Hexagon::A2_tfrrcr:
- case Hexagon::A2_tfrcrr:
- if(Reg == Hexagon::M0)
- Reg = Hexagon::C6;
- if(Reg == Hexagon::M1)
- Reg = Hexagon::C7;
- }
return MCT.getRegisterInfo()->getEncodingValue(Reg);
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index a5afa1daeb9e..564d43b45cb8 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -102,12 +102,13 @@ void HexagonCVIResource::SetupTUL(TypeUnitsAndLanes *TUL, StringRef CPU) {
UnitsAndLanes(CVI_XLANE | CVI_SHIFT | CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VA_DV] = UnitsAndLanes(CVI_XLANE | CVI_MPY0, 2);
(*TUL)[HexagonII::TypeCVI_VX] = UnitsAndLanes(CVI_MPY0 | CVI_MPY1, 1);
+ (*TUL)[HexagonII::TypeCVI_VX_LATE] = UnitsAndLanes(CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VX_DV] = UnitsAndLanes(CVI_MPY0, 2);
(*TUL)[HexagonII::TypeCVI_VP] = UnitsAndLanes(CVI_XLANE, 1);
(*TUL)[HexagonII::TypeCVI_VP_VS] = UnitsAndLanes(CVI_XLANE, 2);
(*TUL)[HexagonII::TypeCVI_VS] = UnitsAndLanes(CVI_SHIFT, 1);
(*TUL)[HexagonII::TypeCVI_VINLANESAT] =
- (CPU == "hexagonv60" || CPU == "hexagonv61" || CPU == "hexagonv61v1")
+ (CPU == "hexagonv60")
? UnitsAndLanes(CVI_SHIFT, 1)
: UnitsAndLanes(CVI_XLANE | CVI_SHIFT | CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VM_LD] =
@@ -291,10 +292,8 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeNCJ:
++memory; // NV insns are memory-like.
- if (HexagonMCInstrInfo::getDesc(MCII, ID).isBranch()) {
- ++jumps, ++jump1;
- foundBranches.push_back(ISJ);
- }
+ ++jumps, ++jump1;
+ foundBranches.push_back(ISJ);
break;
case HexagonII::TypeV2LDST:
if (HexagonMCInstrInfo::getDesc(MCII, ID).mayLoad()) {
diff --git a/lib/Target/Hexagon/RDFLiveness.cpp b/lib/Target/Hexagon/RDFLiveness.cpp
index 726b7af73b0a..9d8a3881797b 100644
--- a/lib/Target/Hexagon/RDFLiveness.cpp
+++ b/lib/Target/Hexagon/RDFLiveness.cpp
@@ -497,26 +497,33 @@ void Liveness::computePhiInfo() {
// = R1:0 u6 Not reached by d1 (covered collectively
// by d3 and d5), but following reached
// defs and uses from d1 will lead here.
- auto InPhiDefs = [&PhiDefs] (NodeAddr<DefNode*> DA) -> bool {
- return PhiDefs.count(DA.Id);
- };
for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) {
// For each reached register UI->first, there is a set UI->second, of
// uses of it. For each such use, check if it is reached by this phi,
// i.e. check if the set of its reaching uses intersects the set of
// this phi's defs.
- NodeRefSet &Uses = UI->second;
- for (auto I = Uses.begin(), E = Uses.end(); I != E; ) {
- auto UA = DFG.addr<UseNode*>(I->first);
+ NodeRefSet Uses = UI->second;
+ UI->second.clear();
+ for (std::pair<NodeId,LaneBitmask> I : Uses) {
+ auto UA = DFG.addr<UseNode*>(I.first);
// Undef flag is checked above.
assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0);
- RegisterRef R(UI->first, I->second);
- NodeList RDs = getAllReachingDefs(R, UA);
- // If none of the reaching defs of R are from this phi, remove this
- // use of R.
- I = any_of(RDs, InPhiDefs) ? std::next(I) : Uses.erase(I);
+ RegisterRef R(UI->first, I.second);
+ // Calculate the exposed part of the reached use.
+ RegisterAggr Covered(PRI);
+ for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) {
+ if (PhiDefs.count(DA.Id))
+ break;
+ Covered.insert(DA.Addr->getRegRef(DFG));
+ }
+ if (RegisterRef RC = Covered.clearIn(R)) {
+ // We are updating the map for register UI->first, so we need
+ // to map RC to be expressed in terms of that register.
+ RegisterRef S = PRI.mapTo(RC, UI->first);
+ UI->second.insert({I.first, S.Mask});
+ }
}
- UI = Uses.empty() ? RealUses.erase(UI) : std::next(UI);
+ UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI);
}
// If this phi reaches some "real" uses, add it to the queue for upward
@@ -626,7 +633,7 @@ void Liveness::computePhiInfo() {
const RegisterAggr &DRs = PhiDRs.at(P.first);
if (!DRs.hasAliasOf(R))
continue;
- R = DRs.intersectWith(R);
+ R = PRI.mapTo(DRs.intersectWith(R), T.first);
for (std::pair<NodeId,LaneBitmask> V : T.second) {
LaneBitmask M = R.Mask & V.second;
if (M.none())
diff --git a/lib/Target/Hexagon/RDFRegisters.cpp b/lib/Target/Hexagon/RDFRegisters.cpp
index 4224ded3418b..2aabf4ee1a38 100644
--- a/lib/Target/Hexagon/RDFRegisters.cpp
+++ b/lib/Target/Hexagon/RDFRegisters.cpp
@@ -212,6 +212,21 @@ bool PhysicalRegisterInfo::aliasMM(RegisterRef RM, RegisterRef RN) const {
return false;
}
+RegisterRef PhysicalRegisterInfo::mapTo(RegisterRef RR, unsigned R) const {
+ if (RR.Reg == R)
+ return RR;
+ if (unsigned Idx = TRI.getSubRegIndex(R, RR.Reg))
+ return RegisterRef(R, TRI.composeSubRegIndexLaneMask(Idx, RR.Mask));
+ if (unsigned Idx = TRI.getSubRegIndex(RR.Reg, R)) {
+ const RegInfo &RI = RegInfos[R];
+ LaneBitmask RCM = RI.RegClass ? RI.RegClass->LaneMask
+ : LaneBitmask::getAll();
+ LaneBitmask M = TRI.reverseComposeSubRegIndexLaneMask(Idx, RR.Mask);
+ return RegisterRef(R, M & RCM);
+ }
+ llvm_unreachable("Invalid arguments: unrelated registers?");
+}
+
bool RegisterAggr::hasAliasOf(RegisterRef RR) const {
if (PhysicalRegisterInfo::isRegMaskId(RR.Reg))
diff --git a/lib/Target/Hexagon/RDFRegisters.h b/lib/Target/Hexagon/RDFRegisters.h
index 314d8b5666d7..09b733ce616b 100644
--- a/lib/Target/Hexagon/RDFRegisters.h
+++ b/lib/Target/Hexagon/RDFRegisters.h
@@ -112,6 +112,7 @@ namespace rdf {
const BitVector &getMaskUnits(RegisterId MaskId) const {
return MaskInfos[TargetRegisterInfo::stackSlot2Index(MaskId)].Units;
}
+ RegisterRef mapTo(RegisterRef RR, unsigned R) const;
const TargetRegisterInfo &getTRI() const { return TRI; }
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 134f7ac3aea3..9cdbf510737f 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -81,7 +81,7 @@ bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
AsmPrinter::runOnMachineFunction(MF);
- EmitXRayTable();
+ emitXRayTable();
return true;
}
@@ -1148,39 +1148,6 @@ void MipsAsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind) {
recordSled(CurSled, MI, Kind);
}
-void MipsAsmPrinter::EmitXRayTable() {
- if (Sleds.empty())
- return;
- if (Subtarget->isTargetELF()) {
- auto PrevSection = OutStreamer->getCurrentSectionOnly();
- auto Fn = MF->getFunction();
- MCSection *Section;
-
- if (Fn->hasComdat())
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
- Fn->getComdat()->getName());
- else
- Section =
- OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC, 0, CurrentFnSym->getName());
-
- OutStreamer->SwitchSection(Section);
- for (const auto &Sled : Sleds) {
- OutStreamer->EmitSymbolValue(Sled.Sled, Subtarget->isGP64bit() ? 8 : 4);
- OutStreamer->EmitSymbolValue(CurrentFnSym, Subtarget->isGP64bit() ? 8 : 4);
- auto Kind = static_cast<uint8_t>(Sled.Kind);
- OutStreamer->EmitBytes(
- StringRef(reinterpret_cast<const char *>(&Kind), 1));
- OutStreamer->EmitBytes(
- StringRef(reinterpret_cast<const char *>(&Sled.AlwaysInstrument), 1));
- OutStreamer->EmitZeros(Subtarget->isGP64bit() ? 14 : 6);
- }
- OutStreamer->SwitchSection(PrevSection);
- }
- Sleds.clear();
-}
-
void MipsAsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI) {
EmitSled(MI, SledKind::FUNCTION_ENTER);
}
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 4d06912054a2..61fdda8aa109 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1,4661 +1,4662 @@
-//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that NVPTX uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/NVPTXBaseInfo.h"
-#include "NVPTX.h"
-#include "NVPTXISelLowering.h"
-#include "NVPTXSection.h"
-#include "NVPTXSubtarget.h"
-#include "NVPTXTargetMachine.h"
-#include "NVPTXTargetObjectFile.h"
-#include "NVPTXUtilities.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/CodeGen/Analysis.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/MachineValueType.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/IR/Argument.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Value.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CodeGen.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetCallingConv.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include <algorithm>
-#include <cassert>
-#include <cstdint>
-#include <iterator>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "nvptx-lower"
-
-using namespace llvm;
-
-static unsigned int uniqueCallSite = 0;
-
-static cl::opt<bool> sched4reg(
- "nvptx-sched4reg",
- cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
-
-static cl::opt<unsigned>
-FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
- " 1: do it 2: do it aggressively"),
- cl::init(2));
-
-static cl::opt<int> UsePrecDivF32(
- "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
- " IEEE Compliant F32 div.rnd if available."),
- cl::init(2));
-
-static cl::opt<bool> UsePrecSqrtF32(
- "nvptx-prec-sqrtf32", cl::Hidden,
- cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
- cl::init(true));
-
-static cl::opt<bool> FtzEnabled(
- "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
- cl::init(false));
-
-int NVPTXTargetLowering::getDivF32Level() const {
- if (UsePrecDivF32.getNumOccurrences() > 0) {
- // If nvptx-prec-div32=N is used on the command-line, always honor it
- return UsePrecDivF32;
- } else {
- // Otherwise, use div.approx if fast math is enabled
- if (getTargetMachine().Options.UnsafeFPMath)
- return 0;
- else
- return 2;
- }
-}
-
-bool NVPTXTargetLowering::usePrecSqrtF32() const {
- if (UsePrecSqrtF32.getNumOccurrences() > 0) {
- // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
- return UsePrecSqrtF32;
- } else {
- // Otherwise, use sqrt.approx if fast math is enabled
- return !getTargetMachine().Options.UnsafeFPMath;
- }
-}
-
-bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
- // TODO: Get rid of this flag; there can be only one way to do this.
- if (FtzEnabled.getNumOccurrences() > 0) {
- // If nvptx-f32ftz is used on the command-line, always honor it
- return FtzEnabled;
- } else {
- const Function *F = MF.getFunction();
- // Otherwise, check for an nvptx-f32ftz attribute on the function
- if (F->hasFnAttribute("nvptx-f32ftz"))
- return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
- else
- return false;
- }
-}
-
-static bool IsPTXVectorType(MVT VT) {
- switch (VT.SimpleTy) {
- default:
- return false;
- case MVT::v2i1:
- case MVT::v4i1:
- case MVT::v2i8:
- case MVT::v4i8:
- case MVT::v2i16:
- case MVT::v4i16:
- case MVT::v2i32:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v4f16:
- case MVT::v8f16: // <4 x f16x2>
- case MVT::v2f32:
- case MVT::v4f32:
- case MVT::v2f64:
- return true;
- }
-}
-
-/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
-/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
-/// into their primitive components.
-/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
-/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
-/// LowerCall, and LowerReturn.
-static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
- Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
- SmallVectorImpl<uint64_t> *Offsets = nullptr,
- uint64_t StartingOffset = 0) {
- SmallVector<EVT, 16> TempVTs;
- SmallVector<uint64_t, 16> TempOffsets;
-
- ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
- for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
- EVT VT = TempVTs[i];
- uint64_t Off = TempOffsets[i];
- // Split vectors into individual elements, except for v2f16, which
- // we will pass as a single scalar.
- if (VT.isVector()) {
- unsigned NumElts = VT.getVectorNumElements();
- EVT EltVT = VT.getVectorElementType();
- // Vectors with an even number of f16 elements will be passed to
- // us as an array of v2f16 elements. We must match this so we
- // stay in sync with Ins/Outs.
- if (EltVT == MVT::f16 && NumElts % 2 == 0) {
- EltVT = MVT::v2f16;
- NumElts /= 2;
- }
- for (unsigned j = 0; j != NumElts; ++j) {
- ValueVTs.push_back(EltVT);
- if (Offsets)
- Offsets->push_back(Off + j * EltVT.getStoreSize());
- }
- } else {
- ValueVTs.push_back(VT);
- if (Offsets)
- Offsets->push_back(Off);
- }
- }
-}
-
-// Check whether we can merge loads/stores of some of the pieces of a
-// flattened function parameter or return value into a single vector
-// load/store.
-//
-// The flattened parameter is represented as a list of EVTs and
-// offsets, and the whole structure is aligned to ParamAlignment. This
-// function determines whether we can load/store pieces of the
-// parameter starting at index Idx using a single vectorized op of
-// size AccessSize. If so, it returns the number of param pieces
-// covered by the vector op. Otherwise, it returns 1.
-static unsigned CanMergeParamLoadStoresStartingAt(
- unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
- const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {
- assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");
-
- // Can't vectorize if param alignment is not sufficient.
- if (AccessSize > ParamAlignment)
- return 1;
- // Can't vectorize if offset is not aligned.
- if (Offsets[Idx] & (AccessSize - 1))
- return 1;
-
- EVT EltVT = ValueVTs[Idx];
- unsigned EltSize = EltVT.getStoreSize();
-
- // Element is too large to vectorize.
- if (EltSize >= AccessSize)
- return 1;
-
- unsigned NumElts = AccessSize / EltSize;
- // Can't vectorize if AccessBytes if not a multiple of EltSize.
- if (AccessSize != EltSize * NumElts)
- return 1;
-
- // We don't have enough elements to vectorize.
- if (Idx + NumElts > ValueVTs.size())
- return 1;
-
- // PTX ISA can only deal with 2- and 4-element vector ops.
- if (NumElts != 4 && NumElts != 2)
- return 1;
-
- for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
- // Types do not match.
- if (ValueVTs[j] != EltVT)
- return 1;
-
- // Elements are not contiguous.
- if (Offsets[j] - Offsets[j - 1] != EltSize)
- return 1;
- }
- // OK. We can vectorize ValueVTs[i..i+NumElts)
- return NumElts;
-}
-
-// Flags for tracking per-element vectorization state of loads/stores
-// of a flattened function parameter or return value.
-enum ParamVectorizationFlags {
- PVF_INNER = 0x0, // Middle elements of a vector.
- PVF_FIRST = 0x1, // First element of the vector.
- PVF_LAST = 0x2, // Last element of the vector.
- // Scalar is effectively a 1-element vector.
- PVF_SCALAR = PVF_FIRST | PVF_LAST
-};
-
-// Computes whether and how we can vectorize the loads/stores of a
-// flattened function parameter or return value.
-//
-// The flattened parameter is represented as the list of ValueVTs and
-// Offsets, and is aligned to ParamAlignment bytes. We return a vector
-// of the same size as ValueVTs indicating how each piece should be
-// loaded/stored (i.e. as a scalar, or as part of a vector
-// load/store).
-static SmallVector<ParamVectorizationFlags, 16>
-VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
- const SmallVectorImpl<uint64_t> &Offsets,
- unsigned ParamAlignment) {
- // Set vector size to match ValueVTs and mark all elements as
- // scalars by default.
- SmallVector<ParamVectorizationFlags, 16> VectorInfo;
- VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
-
- // Check what we can vectorize using 128/64/32-bit accesses.
- for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
- // Skip elements we've already processed.
- assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
- for (unsigned AccessSize : {16, 8, 4, 2}) {
- unsigned NumElts = CanMergeParamLoadStoresStartingAt(
- I, AccessSize, ValueVTs, Offsets, ParamAlignment);
- // Mark vectorized elements.
- switch (NumElts) {
- default:
- llvm_unreachable("Unexpected return value");
- case 1:
- // Can't vectorize using this size, try next smaller size.
- continue;
- case 2:
- assert(I + 1 < E && "Not enough elements.");
- VectorInfo[I] = PVF_FIRST;
- VectorInfo[I + 1] = PVF_LAST;
- I += 1;
- break;
- case 4:
- assert(I + 3 < E && "Not enough elements.");
- VectorInfo[I] = PVF_FIRST;
- VectorInfo[I + 1] = PVF_INNER;
- VectorInfo[I + 2] = PVF_INNER;
- VectorInfo[I + 3] = PVF_LAST;
- I += 3;
- break;
- }
- // Break out of the inner loop because we've already succeeded
- // using largest possible AccessSize.
- break;
- }
- }
- return VectorInfo;
-}
-
-// NVPTXTargetLowering Constructor.
-NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
- const NVPTXSubtarget &STI)
- : TargetLowering(TM), nvTM(&TM), STI(STI) {
- // always lower memset, memcpy, and memmove intrinsics to load/store
- // instructions, rather
- // then generating calls to memset, mempcy or memmove.
- MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
- MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
- MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
-
- setBooleanContents(ZeroOrNegativeOneBooleanContent);
- setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
-
- // Jump is Expensive. Don't create extra control flow for 'and', 'or'
- // condition branches.
- setJumpIsExpensive(true);
-
- // Wide divides are _very_ slow. Try to reduce the width of the divide if
- // possible.
- addBypassSlowDiv(64, 32);
-
- // By default, use the Source scheduling
- if (sched4reg)
- setSchedulingPreference(Sched::RegPressure);
- else
- setSchedulingPreference(Sched::Source);
-
- auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
- LegalizeAction NoF16Action) {
- setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
- };
-
- addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
- addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
- addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
- addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
- addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
- addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
- addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
- addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
-
- // Conversion to/from FP16/FP16x2 is always legal.
- setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
- setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
-
- setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
- setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
-
- // Operations not directly supported by NVPTX.
- setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- setOperationAction(ISD::BR_CC, MVT::f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::f32, Expand);
- setOperationAction(ISD::BR_CC, MVT::f64, Expand);
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::BR_CC, MVT::i8, Expand);
- setOperationAction(ISD::BR_CC, MVT::i16, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
- setOperationAction(ISD::BR_CC, MVT::i64, Expand);
- // Some SIGN_EXTEND_INREG can be done using cvt instruction.
- // For others we will expand to a SHL/SRA pair.
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
-
- setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
- setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
-
- if (STI.hasROT64()) {
- setOperationAction(ISD::ROTL, MVT::i64, Legal);
- setOperationAction(ISD::ROTR, MVT::i64, Legal);
- } else {
- setOperationAction(ISD::ROTL, MVT::i64, Expand);
- setOperationAction(ISD::ROTR, MVT::i64, Expand);
- }
- if (STI.hasROT32()) {
- setOperationAction(ISD::ROTL, MVT::i32, Legal);
- setOperationAction(ISD::ROTR, MVT::i32, Legal);
- } else {
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::ROTR, MVT::i32, Expand);
- }
-
- setOperationAction(ISD::ROTL, MVT::i16, Expand);
- setOperationAction(ISD::ROTR, MVT::i16, Expand);
- setOperationAction(ISD::ROTL, MVT::i8, Expand);
- setOperationAction(ISD::ROTR, MVT::i8, Expand);
- setOperationAction(ISD::BSWAP, MVT::i16, Expand);
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- setOperationAction(ISD::BSWAP, MVT::i64, Expand);
-
- // Indirect branch is not supported.
- // This also disables Jump Table creation.
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BRIND, MVT::Other, Expand);
-
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
-
- // We want to legalize constant related memmove and memcopy
- // intrinsics.
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
-
- // Turn FP extload into load/fpextend
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
- // Turn FP truncstore into trunc + store.
- // FIXME: vector types should also be expanded
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- // PTX does not support load / store predicate registers
- setOperationAction(ISD::LOAD, MVT::i1, Custom);
- setOperationAction(ISD::STORE, MVT::i1, Custom);
-
- for (MVT VT : MVT::integer_valuetypes()) {
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
- setTruncStoreAction(VT, MVT::i1, Expand);
- }
-
- // This is legal in NVPTX
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
-
- // TRAP can be lowered to PTX trap
- setOperationAction(ISD::TRAP, MVT::Other, Legal);
-
- setOperationAction(ISD::ADDC, MVT::i64, Expand);
- setOperationAction(ISD::ADDE, MVT::i64, Expand);
-
- // Register custom handling for vector loads/stores
- for (MVT VT : MVT::vector_valuetypes()) {
- if (IsPTXVectorType(VT)) {
- setOperationAction(ISD::LOAD, VT, Custom);
- setOperationAction(ISD::STORE, VT, Custom);
- setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
- }
- }
-
- // Custom handling for i8 intrinsics
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
-
- for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
- setOperationAction(ISD::SMIN, Ty, Legal);
- setOperationAction(ISD::SMAX, Ty, Legal);
- setOperationAction(ISD::UMIN, Ty, Legal);
- setOperationAction(ISD::UMAX, Ty, Legal);
-
- setOperationAction(ISD::CTPOP, Ty, Legal);
- setOperationAction(ISD::CTLZ, Ty, Legal);
- }
-
- setOperationAction(ISD::CTTZ, MVT::i16, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ, MVT::i64, Expand);
-
- // PTX does not directly support SELP of i1, so promote to i32 first
- setOperationAction(ISD::SELECT, MVT::i1, Custom);
-
- // PTX cannot multiply two i64s in a single instruction.
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-
- // We have some custom DAG combine patterns for these nodes
- setTargetDAGCombine(ISD::ADD);
- setTargetDAGCombine(ISD::AND);
- setTargetDAGCombine(ISD::FADD);
- setTargetDAGCombine(ISD::MUL);
- setTargetDAGCombine(ISD::SHL);
- setTargetDAGCombine(ISD::SREM);
- setTargetDAGCombine(ISD::UREM);
-
- // setcc for f16x2 needs special handling to prevent legalizer's
- // attempt to scalarize it due to v2i1 not being legal.
- if (STI.allowFP16Math())
- setTargetDAGCombine(ISD::SETCC);
-
- // Promote fp16 arithmetic if fp16 hardware isn't available or the
- // user passed --nvptx-no-fp16-math. The flag is useful because,
- // although sm_53+ GPUs have some sort of FP16 support in
- // hardware, only sm_53 and sm_60 have full implementation. Others
- // only have token amount of hardware and are likely to run faster
- // by using fp32 units instead.
- for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
- setFP16OperationAction(Op, MVT::f16, Legal, Promote);
- setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
- }
-
- // There's no neg.f16 instruction. Expand to (0-x).
- setOperationAction(ISD::FNEG, MVT::f16, Expand);
- setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
-
- // (would be) Library functions.
-
- // These map to conversion instructions for scalar FP types.
- for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
- ISD::FROUND, ISD::FTRUNC}) {
- setOperationAction(Op, MVT::f16, Legal);
- setOperationAction(Op, MVT::f32, Legal);
- setOperationAction(Op, MVT::f64, Legal);
- setOperationAction(Op, MVT::v2f16, Expand);
- }
-
- // 'Expand' implements FCOPYSIGN without calling an external library.
- setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-
- // These map to corresponding instructions for f32/f64. f16 must be
- // promoted to f32. v2f16 is expanded to f16, which is then promoted
- // to f32.
- for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
- ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
- setOperationAction(Op, MVT::f16, Promote);
- setOperationAction(Op, MVT::f32, Legal);
- setOperationAction(Op, MVT::f64, Legal);
- setOperationAction(Op, MVT::v2f16, Expand);
- }
- setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMINNAN, MVT::f16, Promote);
- setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);
-
- // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
- // No FPOW or FREM in PTX.
-
- // Now deduce the information based on the above mentioned
- // actions
- computeRegisterProperties(STI.getRegisterInfo());
-}
-
-const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch ((NVPTXISD::NodeType)Opcode) {
- case NVPTXISD::FIRST_NUMBER:
- break;
- case NVPTXISD::CALL:
- return "NVPTXISD::CALL";
- case NVPTXISD::RET_FLAG:
- return "NVPTXISD::RET_FLAG";
- case NVPTXISD::LOAD_PARAM:
- return "NVPTXISD::LOAD_PARAM";
- case NVPTXISD::Wrapper:
- return "NVPTXISD::Wrapper";
- case NVPTXISD::DeclareParam:
- return "NVPTXISD::DeclareParam";
- case NVPTXISD::DeclareScalarParam:
- return "NVPTXISD::DeclareScalarParam";
- case NVPTXISD::DeclareRet:
- return "NVPTXISD::DeclareRet";
- case NVPTXISD::DeclareScalarRet:
- return "NVPTXISD::DeclareScalarRet";
- case NVPTXISD::DeclareRetParam:
- return "NVPTXISD::DeclareRetParam";
- case NVPTXISD::PrintCall:
- return "NVPTXISD::PrintCall";
- case NVPTXISD::PrintConvergentCall:
- return "NVPTXISD::PrintConvergentCall";
- case NVPTXISD::PrintCallUni:
- return "NVPTXISD::PrintCallUni";
- case NVPTXISD::PrintConvergentCallUni:
- return "NVPTXISD::PrintConvergentCallUni";
- case NVPTXISD::LoadParam:
- return "NVPTXISD::LoadParam";
- case NVPTXISD::LoadParamV2:
- return "NVPTXISD::LoadParamV2";
- case NVPTXISD::LoadParamV4:
- return "NVPTXISD::LoadParamV4";
- case NVPTXISD::StoreParam:
- return "NVPTXISD::StoreParam";
- case NVPTXISD::StoreParamV2:
- return "NVPTXISD::StoreParamV2";
- case NVPTXISD::StoreParamV4:
- return "NVPTXISD::StoreParamV4";
- case NVPTXISD::StoreParamS32:
- return "NVPTXISD::StoreParamS32";
- case NVPTXISD::StoreParamU32:
- return "NVPTXISD::StoreParamU32";
- case NVPTXISD::CallArgBegin:
- return "NVPTXISD::CallArgBegin";
- case NVPTXISD::CallArg:
- return "NVPTXISD::CallArg";
- case NVPTXISD::LastCallArg:
- return "NVPTXISD::LastCallArg";
- case NVPTXISD::CallArgEnd:
- return "NVPTXISD::CallArgEnd";
- case NVPTXISD::CallVoid:
- return "NVPTXISD::CallVoid";
- case NVPTXISD::CallVal:
- return "NVPTXISD::CallVal";
- case NVPTXISD::CallSymbol:
- return "NVPTXISD::CallSymbol";
- case NVPTXISD::Prototype:
- return "NVPTXISD::Prototype";
- case NVPTXISD::MoveParam:
- return "NVPTXISD::MoveParam";
- case NVPTXISD::StoreRetval:
- return "NVPTXISD::StoreRetval";
- case NVPTXISD::StoreRetvalV2:
- return "NVPTXISD::StoreRetvalV2";
- case NVPTXISD::StoreRetvalV4:
- return "NVPTXISD::StoreRetvalV4";
- case NVPTXISD::PseudoUseParam:
- return "NVPTXISD::PseudoUseParam";
- case NVPTXISD::RETURN:
- return "NVPTXISD::RETURN";
- case NVPTXISD::CallSeqBegin:
- return "NVPTXISD::CallSeqBegin";
- case NVPTXISD::CallSeqEnd:
- return "NVPTXISD::CallSeqEnd";
- case NVPTXISD::CallPrototype:
- return "NVPTXISD::CallPrototype";
- case NVPTXISD::LoadV2:
- return "NVPTXISD::LoadV2";
- case NVPTXISD::LoadV4:
- return "NVPTXISD::LoadV4";
- case NVPTXISD::LDGV2:
- return "NVPTXISD::LDGV2";
- case NVPTXISD::LDGV4:
- return "NVPTXISD::LDGV4";
- case NVPTXISD::LDUV2:
- return "NVPTXISD::LDUV2";
- case NVPTXISD::LDUV4:
- return "NVPTXISD::LDUV4";
- case NVPTXISD::StoreV2:
- return "NVPTXISD::StoreV2";
- case NVPTXISD::StoreV4:
- return "NVPTXISD::StoreV4";
- case NVPTXISD::FUN_SHFL_CLAMP:
- return "NVPTXISD::FUN_SHFL_CLAMP";
- case NVPTXISD::FUN_SHFR_CLAMP:
- return "NVPTXISD::FUN_SHFR_CLAMP";
- case NVPTXISD::IMAD:
- return "NVPTXISD::IMAD";
- case NVPTXISD::SETP_F16X2:
- return "NVPTXISD::SETP_F16X2";
- case NVPTXISD::Dummy:
- return "NVPTXISD::Dummy";
- case NVPTXISD::MUL_WIDE_SIGNED:
- return "NVPTXISD::MUL_WIDE_SIGNED";
- case NVPTXISD::MUL_WIDE_UNSIGNED:
- return "NVPTXISD::MUL_WIDE_UNSIGNED";
- case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
- case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
- case NVPTXISD::Tex1DFloatFloatLevel:
- return "NVPTXISD::Tex1DFloatFloatLevel";
- case NVPTXISD::Tex1DFloatFloatGrad:
- return "NVPTXISD::Tex1DFloatFloatGrad";
- case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
- case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
- case NVPTXISD::Tex1DS32FloatLevel:
- return "NVPTXISD::Tex1DS32FloatLevel";
- case NVPTXISD::Tex1DS32FloatGrad:
- return "NVPTXISD::Tex1DS32FloatGrad";
- case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
- case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
- case NVPTXISD::Tex1DU32FloatLevel:
- return "NVPTXISD::Tex1DU32FloatLevel";
- case NVPTXISD::Tex1DU32FloatGrad:
- return "NVPTXISD::Tex1DU32FloatGrad";
- case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
- case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
- case NVPTXISD::Tex1DArrayFloatFloatLevel:
- return "NVPTXISD::Tex1DArrayFloatFloatLevel";
- case NVPTXISD::Tex1DArrayFloatFloatGrad:
- return "NVPTXISD::Tex1DArrayFloatFloatGrad";
- case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
- case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
- case NVPTXISD::Tex1DArrayS32FloatLevel:
- return "NVPTXISD::Tex1DArrayS32FloatLevel";
- case NVPTXISD::Tex1DArrayS32FloatGrad:
- return "NVPTXISD::Tex1DArrayS32FloatGrad";
- case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
- case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
- case NVPTXISD::Tex1DArrayU32FloatLevel:
- return "NVPTXISD::Tex1DArrayU32FloatLevel";
- case NVPTXISD::Tex1DArrayU32FloatGrad:
- return "NVPTXISD::Tex1DArrayU32FloatGrad";
- case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
- case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
- case NVPTXISD::Tex2DFloatFloatLevel:
- return "NVPTXISD::Tex2DFloatFloatLevel";
- case NVPTXISD::Tex2DFloatFloatGrad:
- return "NVPTXISD::Tex2DFloatFloatGrad";
- case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
- case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
- case NVPTXISD::Tex2DS32FloatLevel:
- return "NVPTXISD::Tex2DS32FloatLevel";
- case NVPTXISD::Tex2DS32FloatGrad:
- return "NVPTXISD::Tex2DS32FloatGrad";
- case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
- case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
- case NVPTXISD::Tex2DU32FloatLevel:
- return "NVPTXISD::Tex2DU32FloatLevel";
- case NVPTXISD::Tex2DU32FloatGrad:
- return "NVPTXISD::Tex2DU32FloatGrad";
- case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
- case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
- case NVPTXISD::Tex2DArrayFloatFloatLevel:
- return "NVPTXISD::Tex2DArrayFloatFloatLevel";
- case NVPTXISD::Tex2DArrayFloatFloatGrad:
- return "NVPTXISD::Tex2DArrayFloatFloatGrad";
- case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
- case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
- case NVPTXISD::Tex2DArrayS32FloatLevel:
- return "NVPTXISD::Tex2DArrayS32FloatLevel";
- case NVPTXISD::Tex2DArrayS32FloatGrad:
- return "NVPTXISD::Tex2DArrayS32FloatGrad";
- case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
- case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
- case NVPTXISD::Tex2DArrayU32FloatLevel:
- return "NVPTXISD::Tex2DArrayU32FloatLevel";
- case NVPTXISD::Tex2DArrayU32FloatGrad:
- return "NVPTXISD::Tex2DArrayU32FloatGrad";
- case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
- case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
- case NVPTXISD::Tex3DFloatFloatLevel:
- return "NVPTXISD::Tex3DFloatFloatLevel";
- case NVPTXISD::Tex3DFloatFloatGrad:
- return "NVPTXISD::Tex3DFloatFloatGrad";
- case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
- case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
- case NVPTXISD::Tex3DS32FloatLevel:
- return "NVPTXISD::Tex3DS32FloatLevel";
- case NVPTXISD::Tex3DS32FloatGrad:
- return "NVPTXISD::Tex3DS32FloatGrad";
- case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
- case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
- case NVPTXISD::Tex3DU32FloatLevel:
- return "NVPTXISD::Tex3DU32FloatLevel";
- case NVPTXISD::Tex3DU32FloatGrad:
- return "NVPTXISD::Tex3DU32FloatGrad";
- case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
- case NVPTXISD::TexCubeFloatFloatLevel:
- return "NVPTXISD::TexCubeFloatFloatLevel";
- case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
- case NVPTXISD::TexCubeS32FloatLevel:
- return "NVPTXISD::TexCubeS32FloatLevel";
- case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
- case NVPTXISD::TexCubeU32FloatLevel:
- return "NVPTXISD::TexCubeU32FloatLevel";
- case NVPTXISD::TexCubeArrayFloatFloat:
- return "NVPTXISD::TexCubeArrayFloatFloat";
- case NVPTXISD::TexCubeArrayFloatFloatLevel:
- return "NVPTXISD::TexCubeArrayFloatFloatLevel";
- case NVPTXISD::TexCubeArrayS32Float:
- return "NVPTXISD::TexCubeArrayS32Float";
- case NVPTXISD::TexCubeArrayS32FloatLevel:
- return "NVPTXISD::TexCubeArrayS32FloatLevel";
- case NVPTXISD::TexCubeArrayU32Float:
- return "NVPTXISD::TexCubeArrayU32Float";
- case NVPTXISD::TexCubeArrayU32FloatLevel:
- return "NVPTXISD::TexCubeArrayU32FloatLevel";
- case NVPTXISD::Tld4R2DFloatFloat:
- return "NVPTXISD::Tld4R2DFloatFloat";
- case NVPTXISD::Tld4G2DFloatFloat:
- return "NVPTXISD::Tld4G2DFloatFloat";
- case NVPTXISD::Tld4B2DFloatFloat:
- return "NVPTXISD::Tld4B2DFloatFloat";
- case NVPTXISD::Tld4A2DFloatFloat:
- return "NVPTXISD::Tld4A2DFloatFloat";
- case NVPTXISD::Tld4R2DS64Float:
- return "NVPTXISD::Tld4R2DS64Float";
- case NVPTXISD::Tld4G2DS64Float:
- return "NVPTXISD::Tld4G2DS64Float";
- case NVPTXISD::Tld4B2DS64Float:
- return "NVPTXISD::Tld4B2DS64Float";
- case NVPTXISD::Tld4A2DS64Float:
- return "NVPTXISD::Tld4A2DS64Float";
- case NVPTXISD::Tld4R2DU64Float:
- return "NVPTXISD::Tld4R2DU64Float";
- case NVPTXISD::Tld4G2DU64Float:
- return "NVPTXISD::Tld4G2DU64Float";
- case NVPTXISD::Tld4B2DU64Float:
- return "NVPTXISD::Tld4B2DU64Float";
- case NVPTXISD::Tld4A2DU64Float:
- return "NVPTXISD::Tld4A2DU64Float";
-
- case NVPTXISD::TexUnified1DFloatS32:
- return "NVPTXISD::TexUnified1DFloatS32";
- case NVPTXISD::TexUnified1DFloatFloat:
- return "NVPTXISD::TexUnified1DFloatFloat";
- case NVPTXISD::TexUnified1DFloatFloatLevel:
- return "NVPTXISD::TexUnified1DFloatFloatLevel";
- case NVPTXISD::TexUnified1DFloatFloatGrad:
- return "NVPTXISD::TexUnified1DFloatFloatGrad";
- case NVPTXISD::TexUnified1DS32S32:
- return "NVPTXISD::TexUnified1DS32S32";
- case NVPTXISD::TexUnified1DS32Float:
- return "NVPTXISD::TexUnified1DS32Float";
- case NVPTXISD::TexUnified1DS32FloatLevel:
- return "NVPTXISD::TexUnified1DS32FloatLevel";
- case NVPTXISD::TexUnified1DS32FloatGrad:
- return "NVPTXISD::TexUnified1DS32FloatGrad";
- case NVPTXISD::TexUnified1DU32S32:
- return "NVPTXISD::TexUnified1DU32S32";
- case NVPTXISD::TexUnified1DU32Float:
- return "NVPTXISD::TexUnified1DU32Float";
- case NVPTXISD::TexUnified1DU32FloatLevel:
- return "NVPTXISD::TexUnified1DU32FloatLevel";
- case NVPTXISD::TexUnified1DU32FloatGrad:
- return "NVPTXISD::TexUnified1DU32FloatGrad";
- case NVPTXISD::TexUnified1DArrayFloatS32:
- return "NVPTXISD::TexUnified1DArrayFloatS32";
- case NVPTXISD::TexUnified1DArrayFloatFloat:
- return "NVPTXISD::TexUnified1DArrayFloatFloat";
- case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
- return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
- case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
- return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
- case NVPTXISD::TexUnified1DArrayS32S32:
- return "NVPTXISD::TexUnified1DArrayS32S32";
- case NVPTXISD::TexUnified1DArrayS32Float:
- return "NVPTXISD::TexUnified1DArrayS32Float";
- case NVPTXISD::TexUnified1DArrayS32FloatLevel:
- return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
- case NVPTXISD::TexUnified1DArrayS32FloatGrad:
- return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
- case NVPTXISD::TexUnified1DArrayU32S32:
- return "NVPTXISD::TexUnified1DArrayU32S32";
- case NVPTXISD::TexUnified1DArrayU32Float:
- return "NVPTXISD::TexUnified1DArrayU32Float";
- case NVPTXISD::TexUnified1DArrayU32FloatLevel:
- return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
- case NVPTXISD::TexUnified1DArrayU32FloatGrad:
- return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
- case NVPTXISD::TexUnified2DFloatS32:
- return "NVPTXISD::TexUnified2DFloatS32";
- case NVPTXISD::TexUnified2DFloatFloat:
- return "NVPTXISD::TexUnified2DFloatFloat";
- case NVPTXISD::TexUnified2DFloatFloatLevel:
- return "NVPTXISD::TexUnified2DFloatFloatLevel";
- case NVPTXISD::TexUnified2DFloatFloatGrad:
- return "NVPTXISD::TexUnified2DFloatFloatGrad";
- case NVPTXISD::TexUnified2DS32S32:
- return "NVPTXISD::TexUnified2DS32S32";
- case NVPTXISD::TexUnified2DS32Float:
- return "NVPTXISD::TexUnified2DS32Float";
- case NVPTXISD::TexUnified2DS32FloatLevel:
- return "NVPTXISD::TexUnified2DS32FloatLevel";
- case NVPTXISD::TexUnified2DS32FloatGrad:
- return "NVPTXISD::TexUnified2DS32FloatGrad";
- case NVPTXISD::TexUnified2DU32S32:
- return "NVPTXISD::TexUnified2DU32S32";
- case NVPTXISD::TexUnified2DU32Float:
- return "NVPTXISD::TexUnified2DU32Float";
- case NVPTXISD::TexUnified2DU32FloatLevel:
- return "NVPTXISD::TexUnified2DU32FloatLevel";
- case NVPTXISD::TexUnified2DU32FloatGrad:
- return "NVPTXISD::TexUnified2DU32FloatGrad";
- case NVPTXISD::TexUnified2DArrayFloatS32:
- return "NVPTXISD::TexUnified2DArrayFloatS32";
- case NVPTXISD::TexUnified2DArrayFloatFloat:
- return "NVPTXISD::TexUnified2DArrayFloatFloat";
- case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
- return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
- case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
- return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
- case NVPTXISD::TexUnified2DArrayS32S32:
- return "NVPTXISD::TexUnified2DArrayS32S32";
- case NVPTXISD::TexUnified2DArrayS32Float:
- return "NVPTXISD::TexUnified2DArrayS32Float";
- case NVPTXISD::TexUnified2DArrayS32FloatLevel:
- return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
- case NVPTXISD::TexUnified2DArrayS32FloatGrad:
- return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
- case NVPTXISD::TexUnified2DArrayU32S32:
- return "NVPTXISD::TexUnified2DArrayU32S32";
- case NVPTXISD::TexUnified2DArrayU32Float:
- return "NVPTXISD::TexUnified2DArrayU32Float";
- case NVPTXISD::TexUnified2DArrayU32FloatLevel:
- return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
- case NVPTXISD::TexUnified2DArrayU32FloatGrad:
- return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
- case NVPTXISD::TexUnified3DFloatS32:
- return "NVPTXISD::TexUnified3DFloatS32";
- case NVPTXISD::TexUnified3DFloatFloat:
- return "NVPTXISD::TexUnified3DFloatFloat";
- case NVPTXISD::TexUnified3DFloatFloatLevel:
- return "NVPTXISD::TexUnified3DFloatFloatLevel";
- case NVPTXISD::TexUnified3DFloatFloatGrad:
- return "NVPTXISD::TexUnified3DFloatFloatGrad";
- case NVPTXISD::TexUnified3DS32S32:
- return "NVPTXISD::TexUnified3DS32S32";
- case NVPTXISD::TexUnified3DS32Float:
- return "NVPTXISD::TexUnified3DS32Float";
- case NVPTXISD::TexUnified3DS32FloatLevel:
- return "NVPTXISD::TexUnified3DS32FloatLevel";
- case NVPTXISD::TexUnified3DS32FloatGrad:
- return "NVPTXISD::TexUnified3DS32FloatGrad";
- case NVPTXISD::TexUnified3DU32S32:
- return "NVPTXISD::TexUnified3DU32S32";
- case NVPTXISD::TexUnified3DU32Float:
- return "NVPTXISD::TexUnified3DU32Float";
- case NVPTXISD::TexUnified3DU32FloatLevel:
- return "NVPTXISD::TexUnified3DU32FloatLevel";
- case NVPTXISD::TexUnified3DU32FloatGrad:
- return "NVPTXISD::TexUnified3DU32FloatGrad";
- case NVPTXISD::TexUnifiedCubeFloatFloat:
- return "NVPTXISD::TexUnifiedCubeFloatFloat";
- case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
- return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
- case NVPTXISD::TexUnifiedCubeS32Float:
- return "NVPTXISD::TexUnifiedCubeS32Float";
- case NVPTXISD::TexUnifiedCubeS32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
- case NVPTXISD::TexUnifiedCubeU32Float:
- return "NVPTXISD::TexUnifiedCubeU32Float";
- case NVPTXISD::TexUnifiedCubeU32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
- case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayS32Float:
- return "NVPTXISD::TexUnifiedCubeArrayS32Float";
- case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayU32Float:
- return "NVPTXISD::TexUnifiedCubeArrayU32Float";
- case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
- case NVPTXISD::Tld4UnifiedR2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
- case NVPTXISD::Tld4UnifiedG2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
- case NVPTXISD::Tld4UnifiedB2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
- case NVPTXISD::Tld4UnifiedA2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
- case NVPTXISD::Tld4UnifiedR2DS64Float:
- return "NVPTXISD::Tld4UnifiedR2DS64Float";
- case NVPTXISD::Tld4UnifiedG2DS64Float:
- return "NVPTXISD::Tld4UnifiedG2DS64Float";
- case NVPTXISD::Tld4UnifiedB2DS64Float:
- return "NVPTXISD::Tld4UnifiedB2DS64Float";
- case NVPTXISD::Tld4UnifiedA2DS64Float:
- return "NVPTXISD::Tld4UnifiedA2DS64Float";
- case NVPTXISD::Tld4UnifiedR2DU64Float:
- return "NVPTXISD::Tld4UnifiedR2DU64Float";
- case NVPTXISD::Tld4UnifiedG2DU64Float:
- return "NVPTXISD::Tld4UnifiedG2DU64Float";
- case NVPTXISD::Tld4UnifiedB2DU64Float:
- return "NVPTXISD::Tld4UnifiedB2DU64Float";
- case NVPTXISD::Tld4UnifiedA2DU64Float:
- return "NVPTXISD::Tld4UnifiedA2DU64Float";
-
- case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
- case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
- case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
- case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
- case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
- case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
- case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
- case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
- case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
- case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
- case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
-
- case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
- case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
- case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
- case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
- case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
- case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
- case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
- case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
- case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
- case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
- case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
-
- case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
- case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
- case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
- case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
- case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
- case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
- case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
- case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
- case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
- case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
- case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
-
- case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
- case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
- case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
- case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
- case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
- case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
- case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
- case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
- case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
- case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
- case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
-
- case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
- case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
- case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
- case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
- case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
- case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
- case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
- case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
- case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
- case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
- case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
-
- case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
- case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
- case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
- case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
- case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
- case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
- case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
- case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
- case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
- case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
- case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
-
- case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
- case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
- case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
- case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
- case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
- case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
- case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
- case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
- case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
- case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
- case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
-
- case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
- case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
- case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
- case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
- case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
- case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
- case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
- case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
- case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
- case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
- case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
-
- case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
- case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
- case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
- case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
- case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
- case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
- case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
- case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
- case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
- case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
- case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
-
- case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
- case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
- case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
- case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
- case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
- case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
- case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
- case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
- case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
- case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
- case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
-
- case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
- case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
- case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
- case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
- case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
- case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
- case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
- case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
- case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
- case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
- case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
-
- case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
- case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
- case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
- case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
- case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
- case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
- case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
- case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
- case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
- case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
- case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
-
- case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
- case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
- case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
- case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
- case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
- case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
- case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
- case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
- case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
- case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
- case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
-
- case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
- case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
- case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
- case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
- case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
- case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
- case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
- case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
- case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
- case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
- case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
-
- case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
- case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
- case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
- case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
- case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
- case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
- case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
- case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
- case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
- case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
- case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
- }
- return nullptr;
-}
-
-TargetLoweringBase::LegalizeTypeAction
-NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
- if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
- return TypeSplitVector;
- if (VT == MVT::v2f16)
- return TypeLegal;
- return TargetLoweringBase::getPreferredVectorAction(VT);
-}
-
-SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
- int Enabled, int &ExtraSteps,
- bool &UseOneConst,
- bool Reciprocal) const {
- if (!(Enabled == ReciprocalEstimate::Enabled ||
- (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
- return SDValue();
-
- if (ExtraSteps == ReciprocalEstimate::Unspecified)
- ExtraSteps = 0;
-
- SDLoc DL(Operand);
- EVT VT = Operand.getValueType();
- bool Ftz = useF32FTZ(DAG.getMachineFunction());
-
- auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(IID, DL, MVT::i32), Operand);
- };
-
- // The sqrt and rsqrt refinement processes assume we always start out with an
- // approximation of the rsqrt. Therefore, if we're going to do any refinement
- // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
- // any refinement, we must return a regular sqrt.
- if (Reciprocal || ExtraSteps > 0) {
- if (VT == MVT::f32)
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
- : Intrinsic::nvvm_rsqrt_approx_f);
- else if (VT == MVT::f64)
- return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
- else
- return SDValue();
- } else {
- if (VT == MVT::f32)
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
- : Intrinsic::nvvm_sqrt_approx_f);
- else {
- // There's no sqrt.approx.f64 instruction, so we emit
- // reciprocal(rsqrt(x)). This is faster than
- // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
- // x * rsqrt(x).)
- return DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
- MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
- }
- }
-}
-
-SDValue
-NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- auto PtrVT = getPointerTy(DAG.getDataLayout());
- Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
- return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
-}
-
-std::string NVPTXTargetLowering::getPrototype(
- const DataLayout &DL, Type *retTy, const ArgListTy &Args,
- const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
- const ImmutableCallSite *CS) const {
- auto PtrVT = getPointerTy(DL);
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return "";
-
- std::stringstream O;
- O << "prototype_" << uniqueCallSite << " : .callprototype ";
-
- if (retTy->getTypeID() == Type::VoidTyID) {
- O << "()";
- } else {
- O << "(";
- if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
- unsigned size = 0;
- if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
- size = ITy->getBitWidth();
- } else {
- assert(retTy->isFloatingPointTy() &&
- "Floating point type expected here");
- size = retTy->getPrimitiveSizeInBits();
- }
- // PTX ABI requires all scalar return values to be at least 32
- // bits in size. fp16 normally uses .b16 as its storage type in
- // PTX, so its size must be adjusted here, too.
- if (size < 32)
- size = 32;
-
- O << ".param .b" << size << " _";
- } else if (isa<PointerType>(retTy)) {
- O << ".param .b" << PtrVT.getSizeInBits() << " _";
- } else if (retTy->isAggregateType() || retTy->isVectorTy()) {
- auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
- O << ".param .align " << retAlignment << " .b8 _["
- << DL.getTypeAllocSize(retTy) << "]";
- } else {
- llvm_unreachable("Unknown return type");
- }
- O << ") ";
- }
- O << "_ (";
-
- bool first = true;
-
- unsigned OIdx = 0;
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
- Type *Ty = Args[i].Ty;
- if (!first) {
- O << ", ";
- }
- first = false;
-
- if (!Outs[OIdx].Flags.isByVal()) {
- if (Ty->isAggregateType() || Ty->isVectorTy()) {
- unsigned align = 0;
- const CallInst *CallI = cast<CallInst>(CS->getInstruction());
- // +1 because index 0 is reserved for return type alignment
- if (!getAlign(*CallI, i + 1, align))
- align = DL.getABITypeAlignment(Ty);
- unsigned sz = DL.getTypeAllocSize(Ty);
- O << ".param .align " << align << " .b8 ";
- O << "_";
- O << "[" << sz << "]";
- // update the index for Outs
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, DL, Ty, vtparts);
- if (unsigned len = vtparts.size())
- OIdx += len - 1;
- continue;
- }
- // i8 types in IR will be i16 types in SDAG
- assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
- (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
- "type mismatch between callee prototype and arguments");
- // scalar type
- unsigned sz = 0;
- if (isa<IntegerType>(Ty)) {
- sz = cast<IntegerType>(Ty)->getBitWidth();
- if (sz < 32)
- sz = 32;
- } else if (isa<PointerType>(Ty)) {
- sz = PtrVT.getSizeInBits();
- } else if (Ty->isHalfTy())
- // PTX ABI requires all scalar parameters to be at least 32
- // bits in size. fp16 normally uses .b16 as its storage type
- // in PTX, so its size must be adjusted here, too.
- sz = 32;
- else
- sz = Ty->getPrimitiveSizeInBits();
- O << ".param .b" << sz << " ";
- O << "_";
- continue;
- }
- auto *PTy = dyn_cast<PointerType>(Ty);
- assert(PTy && "Param with byval attribute should be a pointer type");
- Type *ETy = PTy->getElementType();
-
- unsigned align = Outs[OIdx].Flags.getByValAlign();
- unsigned sz = DL.getTypeAllocSize(ETy);
- O << ".param .align " << align << " .b8 ";
- O << "_";
- O << "[" << sz << "]";
- }
- O << ");";
- return O.str();
-}
-
-unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
- const ImmutableCallSite *CS,
- Type *Ty, unsigned Idx,
- const DataLayout &DL) const {
- if (!CS) {
- // CallSite is zero, fallback to ABI type alignment
- return DL.getABITypeAlignment(Ty);
- }
-
- unsigned Align = 0;
- const Value *DirectCallee = CS->getCalledFunction();
-
- if (!DirectCallee) {
- // We don't have a direct function symbol, but that may be because of
- // constant cast instructions in the call.
- const Instruction *CalleeI = CS->getInstruction();
- assert(CalleeI && "Call target is not a function or derived value?");
-
- // With bitcast'd call targets, the instruction will be the call
- if (isa<CallInst>(CalleeI)) {
- // Check if we have call alignment metadata
- if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))
- return Align;
-
- const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
- // Ignore any bitcast instructions
- while (isa<ConstantExpr>(CalleeV)) {
- const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
- if (!CE->isCast())
- break;
- // Look through the bitcast
- CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
- }
-
- // We have now looked past all of the bitcasts. Do we finally have a
- // Function?
- if (isa<Function>(CalleeV))
- DirectCallee = CalleeV;
- }
- }
-
- // Check for function alignment information if we found that the
- // ultimate target is a Function
- if (DirectCallee)
- if (getAlign(*cast<Function>(DirectCallee), Idx, Align))
- return Align;
-
- // Call is indirect or alignment information is not available, fall back to
- // the ABI type alignment
- return DL.getABITypeAlignment(Ty);
-}
-
-SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- SelectionDAG &DAG = CLI.DAG;
- SDLoc dl = CLI.DL;
- SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
- SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
- SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
- bool &isTailCall = CLI.IsTailCall;
- ArgListTy &Args = CLI.getArgs();
- Type *RetTy = CLI.RetTy;
- ImmutableCallSite *CS = CLI.CS;
- const DataLayout &DL = DAG.getDataLayout();
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- SDValue tempChain = Chain;
- Chain = DAG.getCALLSEQ_START(
- Chain, DAG.getIntPtrConstant(uniqueCallSite, dl, true), dl);
- SDValue InFlag = Chain.getValue(1);
-
- unsigned paramCount = 0;
- // Args.size() and Outs.size() need not match.
- // Outs.size() will be larger
- // * if there is an aggregate argument with multiple fields (each field
- // showing up separately in Outs)
- // * if there is a vector argument with more than typical vector-length
- // elements (generally if more than 4) where each vector element is
- // individually present in Outs.
- // So a different index should be used for indexing into Outs/OutVals.
- // See similar issue in LowerFormalArguments.
- unsigned OIdx = 0;
- // Declare the .params or .reg need to pass values
- // to the function
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
- EVT VT = Outs[OIdx].VT;
- Type *Ty = Args[i].Ty;
-
- if (!Outs[OIdx].Flags.isByVal()) {
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
- unsigned ArgAlign =
- getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
- unsigned AllocSize = DL.getTypeAllocSize(Ty);
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- bool NeedAlign; // Does argument declaration specify alignment?
- if (Ty->isAggregateType() || Ty->isVectorTy()) {
- // declare .param .align <align> .b8 .param<n>[<size>];
- SDValue DeclareParamOps[] = {
- Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps);
- NeedAlign = true;
- } else {
- // declare .param .b<size> .param<n>;
- if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
- // PTX ABI requires integral types to be at least 32 bits in
- // size. FP16 is loaded/stored using i16, so it's handled
- // here as well.
- AllocSize = 4;
- }
- SDValue DeclareScalarParamOps[] = {
- Chain, DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(AllocSize * 8, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
- DeclareScalarParamOps);
- NeedAlign = false;
- }
- InFlag = Chain.getValue(1);
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
- // than 32-bits are sign extended or zero extended, depending on
- // whether they are signed or unsigned types. This case applies
- // only to scalar parameters and not to aggregate values.
- bool ExtendIntegerParam =
- Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
-
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
- SmallVector<SDValue, 6> StoreOperands;
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
- // New store.
- if (VectorInfo[j] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Unfinished preceeding store.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
- StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
- }
-
- EVT EltVT = VTs[j];
- SDValue StVal = OutVals[OIdx];
- if (ExtendIntegerParam) {
- assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
- // zext/sext to i32
- StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND,
- dl, MVT::i32, StVal);
- } else if (EltVT.getSizeInBits() < 16) {
- // Use 16-bit registers for small stores as it's the
- // smallest general purpose register size supported by NVPTX.
- StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
- }
-
- // Record the value to store.
- StoreOperands.push_back(StVal);
-
- if (VectorInfo[j] & PVF_LAST) {
- unsigned NumElts = StoreOperands.size() - 3;
- NVPTXISD::NodeType Op;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::StoreParam;
- break;
- case 2:
- Op = NVPTXISD::StoreParamV2;
- break;
- case 4:
- Op = NVPTXISD::StoreParamV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- StoreOperands.push_back(InFlag);
-
- // Adjust type of the store op if we've extended the scalar
- // return value.
- EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
- unsigned EltAlign =
- NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;
-
- Chain = DAG.getMemIntrinsicNode(
- Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
- TheStoreType, MachinePointerInfo(), EltAlign);
- InFlag = Chain.getValue(1);
-
- // Cleanup.
- StoreOperands.clear();
- }
- ++OIdx;
- }
- assert(StoreOperands.empty() && "Unfinished parameter store.");
- if (VTs.size() > 0)
- --OIdx;
- ++paramCount;
- continue;
- }
-
- // ByVal arguments
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
- assert(PTy && "Type of a byval parameter should be pointer");
- ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
-
- // declare .param .align <align> .b8 .param<n>[<size>];
- unsigned sz = Outs[OIdx].Flags.getByValSize();
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
- // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
- // so we don't need to worry about natural alignment or not.
- // See TargetLowering::LowerCallTo().
-
- // Enforce minumum alignment of 4 to work around ptxas miscompile
- // for sm_50+. See corresponding alignment adjustment in
- // emitFunctionParamList() for details.
- if (ArgAlign < 4)
- ArgAlign = 4;
- SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(sz, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps);
- InFlag = Chain.getValue(1);
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
- EVT elemtype = VTs[j];
- int curOffset = Offsets[j];
- unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
- auto PtrVT = getPointerTy(DL);
- SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
- DAG.getConstant(curOffset, dl, PtrVT));
- SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), PartAlign);
- if (elemtype.getSizeInBits() < 16) {
- theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
- }
- SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(curOffset, dl, MVT::i32),
- theVal, InFlag };
- Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
- CopyParamOps, elemtype,
- MachinePointerInfo());
-
- InFlag = Chain.getValue(1);
- }
- ++paramCount;
- }
-
- GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
- unsigned retAlignment = 0;
-
- // Handle Result
- if (Ins.size() > 0) {
- SmallVector<EVT, 16> resvtparts;
- ComputeValueVTs(*this, DL, RetTy, resvtparts);
-
- // Declare
- // .param .align 16 .b8 retval0[<size-in-bytes>], or
- // .param .b<size-in-bits> retval0
- unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
- // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
- // these three types to match the logic in
- // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
- // Plus, this behavior is consistent with nvcc's.
- if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||
- RetTy->isPointerTy()) {
- // Scalar needs to be at least 32bit wide
- if (resultsz < 32)
- resultsz = 32;
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(resultsz, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
- DeclareRetOps);
- InFlag = Chain.getValue(1);
- } else {
- retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain,
- DAG.getConstant(retAlignment, dl, MVT::i32),
- DAG.getConstant(resultsz / 8, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
- DeclareRetOps);
- InFlag = Chain.getValue(1);
- }
- }
-
- if (!Func) {
- // This is indirect function call case : PTX requires a prototype of the
- // form
- // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
- // to be emitted, and the label has to used as the last arg of call
- // instruction.
- // The prototype is embedded in a string and put as the operand for a
- // CallPrototype SDNode which will print out to the value of the string.
- SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
- const char *ProtoStr =
- nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
- SDValue ProtoOps[] = {
- Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
- };
- Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
- InFlag = Chain.getValue(1);
- }
- // Op to just print "call"
- SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrintCallOps[] = {
- Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
- };
- // We model convergent calls as separate opcodes.
- unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
- if (CLI.IsConvergent)
- Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
- : NVPTXISD::PrintConvergentCall;
- Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
- InFlag = Chain.getValue(1);
-
- // Ops to print out the function name
- SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallVoidOps[] = { Chain, Callee, InFlag };
- Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
- InFlag = Chain.getValue(1);
-
- // Ops to print out the param list
- SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgBeginOps[] = { Chain, InFlag };
- Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
- CallArgBeginOps);
- InFlag = Chain.getValue(1);
-
- for (unsigned i = 0, e = paramCount; i != e; ++i) {
- unsigned opcode;
- if (i == (e - 1))
- opcode = NVPTXISD::LastCallArg;
- else
- opcode = NVPTXISD::CallArg;
- SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(i, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
- InFlag = Chain.getValue(1);
- }
- SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgEndOps[] = { Chain,
- DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
- InFlag };
- Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
- InFlag = Chain.getValue(1);
-
- if (!Func) {
- SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrototypeOps[] = { Chain,
- DAG.getConstant(uniqueCallSite, dl, MVT::i32),
- InFlag };
- Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
- InFlag = Chain.getValue(1);
- }
-
- // Generate loads from param memory/moves from registers for result
- if (Ins.size() > 0) {
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
- assert(VTs.size() == Ins.size() && "Bad value decomposition");
-
- unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
-
- SmallVector<EVT, 6> LoadVTs;
- int VecIdx = -1; // Index of the first element of the vector.
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
- // 32-bits are sign extended or zero extended, depending on whether
- // they are signed or unsigned types.
- bool ExtendIntegerRetVal =
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
-
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- bool needTruncate = false;
- EVT TheLoadType = VTs[i];
- EVT EltType = Ins[i].VT;
- unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);
- if (ExtendIntegerRetVal) {
- TheLoadType = MVT::i32;
- EltType = MVT::i32;
- needTruncate = true;
- } else if (TheLoadType.getSizeInBits() < 16) {
- if (VTs[i].isInteger())
- needTruncate = true;
- EltType = MVT::i16;
- }
-
- // Record index of the very first element of the vector.
- if (VectorInfo[i] & PVF_FIRST) {
- assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
- VecIdx = i;
- }
-
- LoadVTs.push_back(EltType);
-
- if (VectorInfo[i] & PVF_LAST) {
- unsigned NumElts = LoadVTs.size();
- LoadVTs.push_back(MVT::Other);
- LoadVTs.push_back(MVT::Glue);
- NVPTXISD::NodeType Op;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::LoadParam;
- break;
- case 2:
- Op = NVPTXISD::LoadParamV2;
- break;
- case 4:
- Op = NVPTXISD::LoadParamV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- SDValue LoadOperands[] = {
- Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
- SDValue RetVal = DAG.getMemIntrinsicNode(
- Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
- MachinePointerInfo(), EltAlign);
-
- for (unsigned j = 0; j < NumElts; ++j) {
- SDValue Ret = RetVal.getValue(j);
- if (needTruncate)
- Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
- InVals.push_back(Ret);
- }
- Chain = RetVal.getValue(NumElts);
- InFlag = RetVal.getValue(NumElts + 1);
-
- // Cleanup
- VecIdx = -1;
- LoadVTs.clear();
- }
- }
- }
-
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(uniqueCallSite, dl, true),
- DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
- true),
- InFlag, dl);
- uniqueCallSite++;
-
- // set isTailCall to false for now, until we figure out how to express
- // tail call optimization in PTX
- isTailCall = false;
- return Chain;
-}
-
-// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
-// (see LegalizeDAG.cpp). This is slow and uses local memory.
-// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
-SDValue
-NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- SmallVector<SDValue, 8> Ops;
- unsigned NumOperands = Node->getNumOperands();
- for (unsigned i = 0; i < NumOperands; ++i) {
- SDValue SubOp = Node->getOperand(i);
- EVT VVT = SubOp.getNode()->getValueType(0);
- EVT EltVT = VVT.getVectorElementType();
- unsigned NumSubElem = VVT.getVectorNumElements();
- for (unsigned j = 0; j < NumSubElem; ++j) {
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
- DAG.getIntPtrConstant(j, dl)));
- }
- }
- return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
-}
-
-// We can init constant f16x2 with a single .b32 move. Normally it
-// would get lowered as two constant loads and vector-packing move.
-// mov.b16 %h1, 0x4000;
-// mov.b16 %h2, 0x3C00;
-// mov.b32 %hh2, {%h2, %h1};
-// Instead we want just a constant move:
-// mov.b32 %hh2, 0x40003C00
-//
-// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
-// generates good SASS in both cases.
-SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
- SelectionDAG &DAG) const {
- //return Op;
- if (!(Op->getValueType(0) == MVT::v2f16 &&
- isa<ConstantFPSDNode>(Op->getOperand(0)) &&
- isa<ConstantFPSDNode>(Op->getOperand(1))))
- return Op;
-
- APInt E0 =
- cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
- APInt E1 =
- cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
- SDValue Const =
- DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
- return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
-}
-
-SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue Index = Op->getOperand(1);
- // Constant index will be matched by tablegen.
- if (isa<ConstantSDNode>(Index.getNode()))
- return Op;
-
- // Extract individual elements and select one of them.
- SDValue Vector = Op->getOperand(0);
- EVT VectorVT = Vector.getValueType();
- assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
- EVT EltVT = VectorVT.getVectorElementType();
-
- SDLoc dl(Op.getNode());
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
- DAG.getIntPtrConstant(0, dl));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
- DAG.getIntPtrConstant(1, dl));
- return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
- ISD::CondCode::SETEQ);
-}
-
-/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
-/// amount, or
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
-/// amount.
-SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
-
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- SDLoc dl(Op);
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
- unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
-
- if (VTBits == 32 && STI.getSmVersion() >= 35) {
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
- // {dHi, dLo} = {aHi, aLo} >> Amt
- // dHi = aHi >> Amt
- // dLo = shf.r.clamp aLo, aHi, Amt
-
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
- ShAmt);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
- else {
- // {dHi, dLo} = {aHi, aLo} >> Amt
- // - if (Amt>=size) then
- // dLo = aHi >> (Amt-size)
- // dHi = aHi >> Amt (this is either all 0 or all 1)
- // else
- // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
- // dHi = aHi >> Amt
-
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
-
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ISD::SETGE);
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
-}
-
-/// LowerShiftLeftParts - Lower SHL_PARTS, which
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
-/// amount, or
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
-/// amount.
-SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- assert(Op.getOpcode() == ISD::SHL_PARTS);
-
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- SDLoc dl(Op);
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
-
- if (VTBits == 32 && STI.getSmVersion() >= 35) {
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
- // {dHi, dLo} = {aHi, aLo} << Amt
- // dHi = shf.l.clamp aLo, aHi, Amt
- // dLo = aLo << Amt
-
- SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
- ShAmt);
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
- else {
- // {dHi, dLo} = {aHi, aLo} << Amt
- // - if (Amt>=size) then
- // dLo = aLo << Amt (all 0)
- // dLo = aLo << (Amt-size)
- // else
- // dLo = aLo << Amt
- // dHi = (aHi << Amt) | (aLo >> (size-Amt))
-
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
-
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ISD::SETGE);
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
- SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
-}
-
-SDValue
-NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- case ISD::RETURNADDR:
- return SDValue();
- case ISD::FRAMEADDR:
- return SDValue();
- case ISD::GlobalAddress:
- return LowerGlobalAddress(Op, DAG);
- case ISD::INTRINSIC_W_CHAIN:
- return Op;
- case ISD::BUILD_VECTOR:
- return LowerBUILD_VECTOR(Op, DAG);
- case ISD::EXTRACT_SUBVECTOR:
- return Op;
- case ISD::EXTRACT_VECTOR_ELT:
- return LowerEXTRACT_VECTOR_ELT(Op, DAG);
- case ISD::CONCAT_VECTORS:
- return LowerCONCAT_VECTORS(Op, DAG);
- case ISD::STORE:
- return LowerSTORE(Op, DAG);
- case ISD::LOAD:
- return LowerLOAD(Op, DAG);
- case ISD::SHL_PARTS:
- return LowerShiftLeftParts(Op, DAG);
- case ISD::SRA_PARTS:
- case ISD::SRL_PARTS:
- return LowerShiftRightParts(Op, DAG);
- case ISD::SELECT:
- return LowerSelect(Op, DAG);
- default:
- llvm_unreachable("Custom lowering not defined for operation");
- }
-}
-
-SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
- SDValue Op0 = Op->getOperand(0);
- SDValue Op1 = Op->getOperand(1);
- SDValue Op2 = Op->getOperand(2);
- SDLoc DL(Op.getNode());
-
- assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
-
- Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
- Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
- SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
-
- return Trunc;
-}
-
-SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
- if (Op.getValueType() == MVT::i1)
- return LowerLOADi1(Op, DAG);
-
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned
- // loads and have to handle it here.
- if (Op.getValueType() == MVT::v2f16) {
- LoadSDNode *Load = cast<LoadSDNode>(Op);
- EVT MemVT = Load->getMemoryVT();
- if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- Load->getAddressSpace(), Load->getAlignment())) {
- SDValue Ops[2];
- std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
- return DAG.getMergeValues(Ops, SDLoc(Op));
- }
- }
-
- return SDValue();
-}
-
-// v = ld i1* addr
-// =>
-// v1 = ld i8* addr (-> i16)
-// v = trunc i16 to i1
-SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- LoadSDNode *LD = cast<LoadSDNode>(Node);
- SDLoc dl(Node);
- assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
- assert(Node->getValueType(0) == MVT::i1 &&
- "Custom lowering for i1 load only");
- SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
- LD->getPointerInfo(), LD->getAlignment(),
- LD->getMemOperand()->getFlags());
- SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
- // The legalizer (the caller) is expecting two values from the legalized
- // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
- // in LegalizeDAG.cpp which also uses MergeValues.
- SDValue Ops[] = { result, LD->getChain() };
- return DAG.getMergeValues(Ops, dl);
-}
-
-SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- StoreSDNode *Store = cast<StoreSDNode>(Op);
- EVT VT = Store->getMemoryVT();
-
- if (VT == MVT::i1)
- return LowerSTOREi1(Op, DAG);
-
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned
- // stores and have to handle it here.
- if (VT == MVT::v2f16 &&
- !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- Store->getAddressSpace(), Store->getAlignment()))
- return expandUnalignedStore(Store, DAG);
-
- if (VT.isVector())
- return LowerSTOREVector(Op, DAG);
-
- return SDValue();
-}
-
-SDValue
-NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
- SDNode *N = Op.getNode();
- SDValue Val = N->getOperand(1);
- SDLoc DL(N);
- EVT ValVT = Val.getValueType();
-
- if (ValVT.isVector()) {
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not
- // legal. We can (and should) split that into 2 stores of <2 x double> here
- // but I'm leaving that as a TODO for now.
- if (!ValVT.isSimple())
- return SDValue();
- switch (ValVT.getSimpleVT().SimpleTy) {
- default:
- return SDValue();
- case MVT::v2i8:
- case MVT::v2i16:
- case MVT::v2i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v2f32:
- case MVT::v2f64:
- case MVT::v4i8:
- case MVT::v4i16:
- case MVT::v4i32:
- case MVT::v4f16:
- case MVT::v4f32:
- case MVT::v8f16: // <4 x f16x2>
- // This is a "native" vector type
- break;
- }
-
- MemSDNode *MemSD = cast<MemSDNode>(N);
- const DataLayout &TD = DAG.getDataLayout();
-
- unsigned Align = MemSD->getAlignment();
- unsigned PrefAlign =
- TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
- if (Align < PrefAlign) {
- // This store is not sufficiently aligned, so bail out and let this vector
- // store be scalarized. Note that we may still be able to emit smaller
- // vector stores. For example, if we are storing a <4 x float> with an
- // alignment of 8, this check will fail but the legalizer will try again
- // with 2 x <2 x float>, which will succeed with an alignment of 8.
- return SDValue();
- }
-
- unsigned Opcode = 0;
- EVT EltVT = ValVT.getVectorElementType();
- unsigned NumElts = ValVT.getVectorNumElements();
-
- // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // stored type to i16 and propagate the "real" type as the memory type.
- bool NeedExt = false;
- if (EltVT.getSizeInBits() < 16)
- NeedExt = true;
-
- bool StoreF16x2 = false;
- switch (NumElts) {
- default:
- return SDValue();
- case 2:
- Opcode = NVPTXISD::StoreV2;
- break;
- case 4:
- Opcode = NVPTXISD::StoreV4;
- break;
- case 8:
- // v8f16 is a special case. PTX doesn't have st.v8.f16
- // instruction. Instead, we split the vector into v2f16 chunks and
- // store them with st.v4.b32.
- assert(EltVT == MVT::f16 && "Wrong type for the vector.");
- Opcode = NVPTXISD::StoreV4;
- StoreF16x2 = true;
- break;
- }
-
- SmallVector<SDValue, 8> Ops;
-
- // First is the chain
- Ops.push_back(N->getOperand(0));
-
- if (StoreF16x2) {
- // Combine f16,f16 -> v2f16
- NumElts /= 2;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
- DAG.getIntPtrConstant(i * 2, DL));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
- DAG.getIntPtrConstant(i * 2 + 1, DL));
- SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
- Ops.push_back(V2);
- }
- } else {
- // Then the split values
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
- DAG.getIntPtrConstant(i, DL));
- if (NeedExt)
- ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
- Ops.push_back(ExtVal);
- }
- }
-
- // Then any remaining arguments
- Ops.append(N->op_begin() + 2, N->op_end());
-
- SDValue NewSt =
- DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
- MemSD->getMemoryVT(), MemSD->getMemOperand());
-
- // return DCI.CombineTo(N, NewSt, true);
- return NewSt;
- }
-
- return SDValue();
-}
-
-// st i1 v, addr
-// =>
-// v1 = zxt v to i16
-// st.u8 i16, addr
-SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- StoreSDNode *ST = cast<StoreSDNode>(Node);
- SDValue Tmp1 = ST->getChain();
- SDValue Tmp2 = ST->getBasePtr();
- SDValue Tmp3 = ST->getValue();
- assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
- Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
- SDValue Result =
- DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
- ST->getAlignment(), ST->getMemOperand()->getFlags());
- return Result;
-}
-
-SDValue
-NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
- std::string ParamSym;
- raw_string_ostream ParamStr(ParamSym);
-
- ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
- ParamStr.flush();
-
- std::string *SavedStr =
- nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
- return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
-}
-
-// Check to see if the kernel argument is image*_t or sampler_t
-
-static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
- static const char *const specialTypes[] = { "struct._image2d_t",
- "struct._image3d_t",
- "struct._sampler_t" };
-
- Type *Ty = arg->getType();
- auto *PTy = dyn_cast<PointerType>(Ty);
-
- if (!PTy)
- return false;
-
- if (!context)
- return false;
-
- auto *STy = dyn_cast<StructType>(PTy->getElementType());
- if (!STy || STy->isLiteral())
- return false;
-
- return std::find(std::begin(specialTypes), std::end(specialTypes),
- STy->getName()) != std::end(specialTypes);
-}
-
-SDValue NVPTXTargetLowering::LowerFormalArguments(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
- MachineFunction &MF = DAG.getMachineFunction();
- const DataLayout &DL = DAG.getDataLayout();
- auto PtrVT = getPointerTy(DAG.getDataLayout());
-
- const Function *F = MF.getFunction();
- const AttributeList &PAL = F->getAttributes();
- const TargetLowering *TLI = STI.getTargetLowering();
-
- SDValue Root = DAG.getRoot();
- std::vector<SDValue> OutChains;
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- std::vector<Type *> argTypes;
- std::vector<const Argument *> theArgs;
- for (const Argument &I : F->args()) {
- theArgs.push_back(&I);
- argTypes.push_back(I.getType());
- }
- // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
- // Ins.size() will be larger
- // * if there is an aggregate argument with multiple fields (each field
- // showing up separately in Ins)
- // * if there is a vector argument with more than typical vector-length
- // elements (generally if more than 4) where each vector element is
- // individually present in Ins.
- // So a different index should be used for indexing into Ins.
- // See similar issue in LowerCall.
- unsigned InsIdx = 0;
-
- int idx = 0;
- for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
- Type *Ty = argTypes[i];
-
- // If the kernel argument is image*_t or sampler_t, convert it to
- // a i32 constant holding the parameter position. This can later
- // matched in the AsmPrinter to output the correct mangled name.
- if (isImageOrSamplerVal(
- theArgs[i],
- (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
- : nullptr))) {
- assert(isKernelFunction(*F) &&
- "Only kernels can have image/sampler params");
- InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
- continue;
- }
-
- if (theArgs[i]->use_empty()) {
- // argument is dead
- if (Ty->isAggregateType()) {
- SmallVector<EVT, 16> vtparts;
-
- ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
- assert(vtparts.size() > 0 && "empty aggregate type not expected");
- for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
- ++parti) {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- ++InsIdx;
- }
- if (vtparts.size() > 0)
- --InsIdx;
- continue;
- }
- if (Ty->isVectorTy()) {
- EVT ObjectVT = getValueType(DL, Ty);
- unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
- for (unsigned parti = 0; parti < NumRegs; ++parti) {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- ++InsIdx;
- }
- if (NumRegs > 0)
- --InsIdx;
- continue;
- }
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- continue;
- }
-
- // In the following cases, assign a node order of "idx+1"
- // to newly created nodes. The SDNodes for params have to
- // appear in the same order as their order of appearance
- // in the original function. "idx+1" holds that order.
- if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
- bool aggregateIsPacked = false;
- if (StructType *STy = dyn_cast<StructType>(Ty))
- aggregateIsPacked = STy->isPacked();
-
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
- assert(VTs.size() > 0 && "Unexpected empty type.");
- auto VectorInfo =
- VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));
-
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
- int VecIdx = -1; // Index of the first element of the current vector.
- for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
- if (VectorInfo[parti] & PVF_FIRST) {
- assert(VecIdx == -1 && "Orphaned vector.");
- VecIdx = parti;
- }
-
- // That's the last element of this store op.
- if (VectorInfo[parti] & PVF_LAST) {
- unsigned NumElts = parti - VecIdx + 1;
- EVT EltVT = VTs[parti];
- // i1 is loaded/stored as i8.
- EVT LoadVT = EltVT;
- if (EltVT == MVT::i1)
- LoadVT = MVT::i8;
- else if (EltVT == MVT::v2f16)
- // getLoad needs a vector type, but it can't handle
- // vectors which contain v2f16 elements. So we must load
- // using i32 here and then bitcast back.
- LoadVT = MVT::i32;
-
- EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
- SDValue VecAddr =
- DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
- DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
- Value *srcValue = Constant::getNullValue(PointerType::get(
- EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
- SDValue P =
- DAG.getLoad(VecVT, dl, Root, VecAddr,
- MachinePointerInfo(srcValue), aggregateIsPacked,
- MachineMemOperand::MODereferenceable |
- MachineMemOperand::MOInvariant);
- if (P.getNode())
- P.getNode()->setIROrder(idx + 1);
- for (unsigned j = 0; j < NumElts; ++j) {
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
- DAG.getIntPtrConstant(j, dl));
- // We've loaded i1 as an i8 and now must truncate it back to i1
- if (EltVT == MVT::i1)
- Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
- // v2f16 was loaded as an i32. Now we must bitcast it back.
- else if (EltVT == MVT::v2f16)
- Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
- // Extend the element if necesary (e.g. an i8 is loaded
- // into an i16 register)
- if (Ins[InsIdx].VT.isInteger() &&
- Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
- unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND;
- Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
- }
- InVals.push_back(Elt);
- }
-
- // Reset vector tracking state.
- VecIdx = -1;
- }
- ++InsIdx;
- }
- if (VTs.size() > 0)
- --InsIdx;
- continue;
- }
-
- // Param has ByVal attribute
- // Return MoveParam(param symbol).
- // Ideally, the param symbol can be returned directly,
- // but when SDNode builder decides to use it in a CopyToReg(),
- // machine instruction fails because TargetExternalSymbol
- // (not lowered) is target dependent, and CopyToReg assumes
- // the source is lowered.
- EVT ObjectVT = getValueType(DL, Ty);
- assert(ObjectVT == Ins[InsIdx].VT &&
- "Ins type did not match function type");
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
- SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
- if (p.getNode())
- p.getNode()->setIROrder(idx + 1);
- InVals.push_back(p);
- }
-
- // Clang will check explicit VarArg and issue error if any. However, Clang
- // will let code with
- // implicit var arg like f() pass. See bug 617733.
- // We treat this case as if the arg list is empty.
- // if (F.isVarArg()) {
- // assert(0 && "VarArg not supported yet!");
- //}
-
- if (!OutChains.empty())
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
-
- return Chain;
-}
-
-SDValue
-NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SDLoc &dl, SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- Type *RetTy = MF.getFunction()->getReturnType();
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- const DataLayout DL = DAG.getDataLayout();
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
- assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
-
- auto VectorInfo = VectorizePTXValueVTs(
- VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
- // 32-bits are sign extended or zero extended, depending on whether
- // they are signed or unsigned types.
- bool ExtendIntegerRetVal =
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
-
- SmallVector<SDValue, 6> StoreOperands;
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- // New load/store. Record chain and offset operands.
- if (VectorInfo[i] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Orphaned operand list.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
- }
-
- SDValue RetVal = OutVals[i];
- if (ExtendIntegerRetVal) {
- RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND,
- dl, MVT::i32, RetVal);
- } else if (RetVal.getValueSizeInBits() < 16) {
- // Use 16-bit registers for small load-stores as it's the
- // smallest general purpose register size supported by NVPTX.
- RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
- }
-
- // Record the value to return.
- StoreOperands.push_back(RetVal);
-
- // That's the last element of this store op.
- if (VectorInfo[i] & PVF_LAST) {
- NVPTXISD::NodeType Op;
- unsigned NumElts = StoreOperands.size() - 2;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::StoreRetval;
- break;
- case 2:
- Op = NVPTXISD::StoreRetvalV2;
- break;
- case 4:
- Op = NVPTXISD::StoreRetvalV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- // Adjust type of load/store op if we've extended the scalar
- // return value.
- EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
- Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),
- StoreOperands, TheStoreType,
- MachinePointerInfo(), 1);
- // Cleanup vector state.
- StoreOperands.clear();
- }
- }
-
- return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
-}
-
-void NVPTXTargetLowering::LowerAsmOperandForConstraint(
- SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const {
- if (Constraint.length() > 1)
- return;
- else
- TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
-}
-
-static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
- switch (Intrinsic) {
- default:
- return 0;
-
- case Intrinsic::nvvm_tex_1d_v4f32_s32:
- return NVPTXISD::Tex1DFloatS32;
- case Intrinsic::nvvm_tex_1d_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloat;
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloatGrad;
- case Intrinsic::nvvm_tex_1d_v4s32_s32:
- return NVPTXISD::Tex1DS32S32;
- case Intrinsic::nvvm_tex_1d_v4s32_f32:
- return NVPTXISD::Tex1DS32Float;
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
- return NVPTXISD::Tex1DS32FloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
- return NVPTXISD::Tex1DS32FloatGrad;
- case Intrinsic::nvvm_tex_1d_v4u32_s32:
- return NVPTXISD::Tex1DU32S32;
- case Intrinsic::nvvm_tex_1d_v4u32_f32:
- return NVPTXISD::Tex1DU32Float;
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
- return NVPTXISD::Tex1DU32FloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
- return NVPTXISD::Tex1DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
- return NVPTXISD::Tex1DArrayFloatS32;
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloat;
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
- return NVPTXISD::Tex1DArrayS32S32;
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32Float;
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
- return NVPTXISD::Tex1DArrayU32S32;
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32Float;
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_2d_v4f32_s32:
- return NVPTXISD::Tex2DFloatS32;
- case Intrinsic::nvvm_tex_2d_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloat;
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloatGrad;
- case Intrinsic::nvvm_tex_2d_v4s32_s32:
- return NVPTXISD::Tex2DS32S32;
- case Intrinsic::nvvm_tex_2d_v4s32_f32:
- return NVPTXISD::Tex2DS32Float;
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
- return NVPTXISD::Tex2DS32FloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
- return NVPTXISD::Tex2DS32FloatGrad;
- case Intrinsic::nvvm_tex_2d_v4u32_s32:
- return NVPTXISD::Tex2DU32S32;
- case Intrinsic::nvvm_tex_2d_v4u32_f32:
- return NVPTXISD::Tex2DU32Float;
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
- return NVPTXISD::Tex2DU32FloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
- return NVPTXISD::Tex2DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
- return NVPTXISD::Tex2DArrayFloatS32;
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloat;
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
- return NVPTXISD::Tex2DArrayS32S32;
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32Float;
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
- return NVPTXISD::Tex2DArrayU32S32;
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32Float;
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_3d_v4f32_s32:
- return NVPTXISD::Tex3DFloatS32;
- case Intrinsic::nvvm_tex_3d_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloat;
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloatGrad;
- case Intrinsic::nvvm_tex_3d_v4s32_s32:
- return NVPTXISD::Tex3DS32S32;
- case Intrinsic::nvvm_tex_3d_v4s32_f32:
- return NVPTXISD::Tex3DS32Float;
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
- return NVPTXISD::Tex3DS32FloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
- return NVPTXISD::Tex3DS32FloatGrad;
- case Intrinsic::nvvm_tex_3d_v4u32_s32:
- return NVPTXISD::Tex3DU32S32;
- case Intrinsic::nvvm_tex_3d_v4u32_f32:
- return NVPTXISD::Tex3DU32Float;
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
- return NVPTXISD::Tex3DU32FloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
- return NVPTXISD::Tex3DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_cube_v4f32_f32:
- return NVPTXISD::TexCubeFloatFloat;
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
- return NVPTXISD::TexCubeFloatFloatLevel;
- case Intrinsic::nvvm_tex_cube_v4s32_f32:
- return NVPTXISD::TexCubeS32Float;
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
- return NVPTXISD::TexCubeS32FloatLevel;
- case Intrinsic::nvvm_tex_cube_v4u32_f32:
- return NVPTXISD::TexCubeU32Float;
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
- return NVPTXISD::TexCubeU32FloatLevel;
-
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
- return NVPTXISD::TexCubeArrayFloatFloat;
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
- return NVPTXISD::TexCubeArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
- return NVPTXISD::TexCubeArrayS32Float;
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
- return NVPTXISD::TexCubeArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
- return NVPTXISD::TexCubeArrayU32Float;
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
- return NVPTXISD::TexCubeArrayU32FloatLevel;
-
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
- return NVPTXISD::Tld4R2DFloatFloat;
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
- return NVPTXISD::Tld4G2DFloatFloat;
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
- return NVPTXISD::Tld4B2DFloatFloat;
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
- return NVPTXISD::Tld4A2DFloatFloat;
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
- return NVPTXISD::Tld4R2DS64Float;
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
- return NVPTXISD::Tld4G2DS64Float;
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
- return NVPTXISD::Tld4B2DS64Float;
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
- return NVPTXISD::Tld4A2DS64Float;
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
- return NVPTXISD::Tld4R2DU64Float;
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
- return NVPTXISD::Tld4G2DU64Float;
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
- return NVPTXISD::Tld4B2DU64Float;
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
- return NVPTXISD::Tld4A2DU64Float;
-
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
- return NVPTXISD::TexUnified1DFloatS32;
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloat;
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
- return NVPTXISD::TexUnified1DS32S32;
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
- return NVPTXISD::TexUnified1DS32Float;
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
- return NVPTXISD::TexUnified1DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
- return NVPTXISD::TexUnified1DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
- return NVPTXISD::TexUnified1DU32S32;
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
- return NVPTXISD::TexUnified1DU32Float;
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
- return NVPTXISD::TexUnified1DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
- return NVPTXISD::TexUnified1DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
- return NVPTXISD::TexUnified1DArrayFloatS32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
- return NVPTXISD::TexUnified1DArrayS32S32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32Float;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
- return NVPTXISD::TexUnified1DArrayU32S32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32Float;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
- return NVPTXISD::TexUnified2DFloatS32;
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloat;
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
- return NVPTXISD::TexUnified2DS32S32;
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
- return NVPTXISD::TexUnified2DS32Float;
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
- return NVPTXISD::TexUnified2DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
- return NVPTXISD::TexUnified2DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
- return NVPTXISD::TexUnified2DU32S32;
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
- return NVPTXISD::TexUnified2DU32Float;
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
- return NVPTXISD::TexUnified2DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
- return NVPTXISD::TexUnified2DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
- return NVPTXISD::TexUnified2DArrayFloatS32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
- return NVPTXISD::TexUnified2DArrayS32S32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32Float;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
- return NVPTXISD::TexUnified2DArrayU32S32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32Float;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
- return NVPTXISD::TexUnified3DFloatS32;
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloat;
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
- return NVPTXISD::TexUnified3DS32S32;
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
- return NVPTXISD::TexUnified3DS32Float;
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
- return NVPTXISD::TexUnified3DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
- return NVPTXISD::TexUnified3DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
- return NVPTXISD::TexUnified3DU32S32;
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
- return NVPTXISD::TexUnified3DU32Float;
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
- return NVPTXISD::TexUnified3DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
- return NVPTXISD::TexUnified3DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeFloatFloat;
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeS32Float;
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeU32Float;
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeU32FloatLevel;
-
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeArrayS32Float;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeArrayU32Float;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
-
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedR2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedG2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedB2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedA2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedR2DS64Float;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedG2DS64Float;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedB2DS64Float;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedA2DS64Float;
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedR2DU64Float;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedG2DU64Float;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedB2DU64Float;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedA2DU64Float;
- }
-}
-
-static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
- switch (Intrinsic) {
- default:
- return 0;
- case Intrinsic::nvvm_suld_1d_i8_clamp:
- return NVPTXISD::Suld1DI8Clamp;
- case Intrinsic::nvvm_suld_1d_i16_clamp:
- return NVPTXISD::Suld1DI16Clamp;
- case Intrinsic::nvvm_suld_1d_i32_clamp:
- return NVPTXISD::Suld1DI32Clamp;
- case Intrinsic::nvvm_suld_1d_i64_clamp:
- return NVPTXISD::Suld1DI64Clamp;
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:
- return NVPTXISD::Suld1DV2I8Clamp;
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:
- return NVPTXISD::Suld1DV2I16Clamp;
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:
- return NVPTXISD::Suld1DV2I32Clamp;
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:
- return NVPTXISD::Suld1DV2I64Clamp;
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:
- return NVPTXISD::Suld1DV4I8Clamp;
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:
- return NVPTXISD::Suld1DV4I16Clamp;
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:
- return NVPTXISD::Suld1DV4I32Clamp;
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:
- return NVPTXISD::Suld1DArrayI8Clamp;
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:
- return NVPTXISD::Suld1DArrayI16Clamp;
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:
- return NVPTXISD::Suld1DArrayI32Clamp;
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:
- return NVPTXISD::Suld1DArrayI64Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
- return NVPTXISD::Suld1DArrayV2I8Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
- return NVPTXISD::Suld1DArrayV2I16Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
- return NVPTXISD::Suld1DArrayV2I32Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
- return NVPTXISD::Suld1DArrayV2I64Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
- return NVPTXISD::Suld1DArrayV4I8Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
- return NVPTXISD::Suld1DArrayV4I16Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
- return NVPTXISD::Suld1DArrayV4I32Clamp;
- case Intrinsic::nvvm_suld_2d_i8_clamp:
- return NVPTXISD::Suld2DI8Clamp;
- case Intrinsic::nvvm_suld_2d_i16_clamp:
- return NVPTXISD::Suld2DI16Clamp;
- case Intrinsic::nvvm_suld_2d_i32_clamp:
- return NVPTXISD::Suld2DI32Clamp;
- case Intrinsic::nvvm_suld_2d_i64_clamp:
- return NVPTXISD::Suld2DI64Clamp;
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:
- return NVPTXISD::Suld2DV2I8Clamp;
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:
- return NVPTXISD::Suld2DV2I16Clamp;
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:
- return NVPTXISD::Suld2DV2I32Clamp;
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:
- return NVPTXISD::Suld2DV2I64Clamp;
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:
- return NVPTXISD::Suld2DV4I8Clamp;
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:
- return NVPTXISD::Suld2DV4I16Clamp;
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:
- return NVPTXISD::Suld2DV4I32Clamp;
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:
- return NVPTXISD::Suld2DArrayI8Clamp;
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:
- return NVPTXISD::Suld2DArrayI16Clamp;
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:
- return NVPTXISD::Suld2DArrayI32Clamp;
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:
- return NVPTXISD::Suld2DArrayI64Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
- return NVPTXISD::Suld2DArrayV2I8Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
- return NVPTXISD::Suld2DArrayV2I16Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
- return NVPTXISD::Suld2DArrayV2I32Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
- return NVPTXISD::Suld2DArrayV2I64Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
- return NVPTXISD::Suld2DArrayV4I8Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
- return NVPTXISD::Suld2DArrayV4I16Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
- return NVPTXISD::Suld2DArrayV4I32Clamp;
- case Intrinsic::nvvm_suld_3d_i8_clamp:
- return NVPTXISD::Suld3DI8Clamp;
- case Intrinsic::nvvm_suld_3d_i16_clamp:
- return NVPTXISD::Suld3DI16Clamp;
- case Intrinsic::nvvm_suld_3d_i32_clamp:
- return NVPTXISD::Suld3DI32Clamp;
- case Intrinsic::nvvm_suld_3d_i64_clamp:
- return NVPTXISD::Suld3DI64Clamp;
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:
- return NVPTXISD::Suld3DV2I8Clamp;
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:
- return NVPTXISD::Suld3DV2I16Clamp;
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:
- return NVPTXISD::Suld3DV2I32Clamp;
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:
- return NVPTXISD::Suld3DV2I64Clamp;
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:
- return NVPTXISD::Suld3DV4I8Clamp;
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:
- return NVPTXISD::Suld3DV4I16Clamp;
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:
- return NVPTXISD::Suld3DV4I32Clamp;
- case Intrinsic::nvvm_suld_1d_i8_trap:
- return NVPTXISD::Suld1DI8Trap;
- case Intrinsic::nvvm_suld_1d_i16_trap:
- return NVPTXISD::Suld1DI16Trap;
- case Intrinsic::nvvm_suld_1d_i32_trap:
- return NVPTXISD::Suld1DI32Trap;
- case Intrinsic::nvvm_suld_1d_i64_trap:
- return NVPTXISD::Suld1DI64Trap;
- case Intrinsic::nvvm_suld_1d_v2i8_trap:
- return NVPTXISD::Suld1DV2I8Trap;
- case Intrinsic::nvvm_suld_1d_v2i16_trap:
- return NVPTXISD::Suld1DV2I16Trap;
- case Intrinsic::nvvm_suld_1d_v2i32_trap:
- return NVPTXISD::Suld1DV2I32Trap;
- case Intrinsic::nvvm_suld_1d_v2i64_trap:
- return NVPTXISD::Suld1DV2I64Trap;
- case Intrinsic::nvvm_suld_1d_v4i8_trap:
- return NVPTXISD::Suld1DV4I8Trap;
- case Intrinsic::nvvm_suld_1d_v4i16_trap:
- return NVPTXISD::Suld1DV4I16Trap;
- case Intrinsic::nvvm_suld_1d_v4i32_trap:
- return NVPTXISD::Suld1DV4I32Trap;
- case Intrinsic::nvvm_suld_1d_array_i8_trap:
- return NVPTXISD::Suld1DArrayI8Trap;
- case Intrinsic::nvvm_suld_1d_array_i16_trap:
- return NVPTXISD::Suld1DArrayI16Trap;
- case Intrinsic::nvvm_suld_1d_array_i32_trap:
- return NVPTXISD::Suld1DArrayI32Trap;
- case Intrinsic::nvvm_suld_1d_array_i64_trap:
- return NVPTXISD::Suld1DArrayI64Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
- return NVPTXISD::Suld1DArrayV2I8Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
- return NVPTXISD::Suld1DArrayV2I16Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
- return NVPTXISD::Suld1DArrayV2I32Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
- return NVPTXISD::Suld1DArrayV2I64Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
- return NVPTXISD::Suld1DArrayV4I8Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
- return NVPTXISD::Suld1DArrayV4I16Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
- return NVPTXISD::Suld1DArrayV4I32Trap;
- case Intrinsic::nvvm_suld_2d_i8_trap:
- return NVPTXISD::Suld2DI8Trap;
- case Intrinsic::nvvm_suld_2d_i16_trap:
- return NVPTXISD::Suld2DI16Trap;
- case Intrinsic::nvvm_suld_2d_i32_trap:
- return NVPTXISD::Suld2DI32Trap;
- case Intrinsic::nvvm_suld_2d_i64_trap:
- return NVPTXISD::Suld2DI64Trap;
- case Intrinsic::nvvm_suld_2d_v2i8_trap:
- return NVPTXISD::Suld2DV2I8Trap;
- case Intrinsic::nvvm_suld_2d_v2i16_trap:
- return NVPTXISD::Suld2DV2I16Trap;
- case Intrinsic::nvvm_suld_2d_v2i32_trap:
- return NVPTXISD::Suld2DV2I32Trap;
- case Intrinsic::nvvm_suld_2d_v2i64_trap:
- return NVPTXISD::Suld2DV2I64Trap;
- case Intrinsic::nvvm_suld_2d_v4i8_trap:
- return NVPTXISD::Suld2DV4I8Trap;
- case Intrinsic::nvvm_suld_2d_v4i16_trap:
- return NVPTXISD::Suld2DV4I16Trap;
- case Intrinsic::nvvm_suld_2d_v4i32_trap:
- return NVPTXISD::Suld2DV4I32Trap;
- case Intrinsic::nvvm_suld_2d_array_i8_trap:
- return NVPTXISD::Suld2DArrayI8Trap;
- case Intrinsic::nvvm_suld_2d_array_i16_trap:
- return NVPTXISD::Suld2DArrayI16Trap;
- case Intrinsic::nvvm_suld_2d_array_i32_trap:
- return NVPTXISD::Suld2DArrayI32Trap;
- case Intrinsic::nvvm_suld_2d_array_i64_trap:
- return NVPTXISD::Suld2DArrayI64Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
- return NVPTXISD::Suld2DArrayV2I8Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
- return NVPTXISD::Suld2DArrayV2I16Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
- return NVPTXISD::Suld2DArrayV2I32Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
- return NVPTXISD::Suld2DArrayV2I64Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
- return NVPTXISD::Suld2DArrayV4I8Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
- return NVPTXISD::Suld2DArrayV4I16Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
- return NVPTXISD::Suld2DArrayV4I32Trap;
- case Intrinsic::nvvm_suld_3d_i8_trap:
- return NVPTXISD::Suld3DI8Trap;
- case Intrinsic::nvvm_suld_3d_i16_trap:
- return NVPTXISD::Suld3DI16Trap;
- case Intrinsic::nvvm_suld_3d_i32_trap:
- return NVPTXISD::Suld3DI32Trap;
- case Intrinsic::nvvm_suld_3d_i64_trap:
- return NVPTXISD::Suld3DI64Trap;
- case Intrinsic::nvvm_suld_3d_v2i8_trap:
- return NVPTXISD::Suld3DV2I8Trap;
- case Intrinsic::nvvm_suld_3d_v2i16_trap:
- return NVPTXISD::Suld3DV2I16Trap;
- case Intrinsic::nvvm_suld_3d_v2i32_trap:
- return NVPTXISD::Suld3DV2I32Trap;
- case Intrinsic::nvvm_suld_3d_v2i64_trap:
- return NVPTXISD::Suld3DV2I64Trap;
- case Intrinsic::nvvm_suld_3d_v4i8_trap:
- return NVPTXISD::Suld3DV4I8Trap;
- case Intrinsic::nvvm_suld_3d_v4i16_trap:
- return NVPTXISD::Suld3DV4I16Trap;
- case Intrinsic::nvvm_suld_3d_v4i32_trap:
- return NVPTXISD::Suld3DV4I32Trap;
- case Intrinsic::nvvm_suld_1d_i8_zero:
- return NVPTXISD::Suld1DI8Zero;
- case Intrinsic::nvvm_suld_1d_i16_zero:
- return NVPTXISD::Suld1DI16Zero;
- case Intrinsic::nvvm_suld_1d_i32_zero:
- return NVPTXISD::Suld1DI32Zero;
- case Intrinsic::nvvm_suld_1d_i64_zero:
- return NVPTXISD::Suld1DI64Zero;
- case Intrinsic::nvvm_suld_1d_v2i8_zero:
- return NVPTXISD::Suld1DV2I8Zero;
- case Intrinsic::nvvm_suld_1d_v2i16_zero:
- return NVPTXISD::Suld1DV2I16Zero;
- case Intrinsic::nvvm_suld_1d_v2i32_zero:
- return NVPTXISD::Suld1DV2I32Zero;
- case Intrinsic::nvvm_suld_1d_v2i64_zero:
- return NVPTXISD::Suld1DV2I64Zero;
- case Intrinsic::nvvm_suld_1d_v4i8_zero:
- return NVPTXISD::Suld1DV4I8Zero;
- case Intrinsic::nvvm_suld_1d_v4i16_zero:
- return NVPTXISD::Suld1DV4I16Zero;
- case Intrinsic::nvvm_suld_1d_v4i32_zero:
- return NVPTXISD::Suld1DV4I32Zero;
- case Intrinsic::nvvm_suld_1d_array_i8_zero:
- return NVPTXISD::Suld1DArrayI8Zero;
- case Intrinsic::nvvm_suld_1d_array_i16_zero:
- return NVPTXISD::Suld1DArrayI16Zero;
- case Intrinsic::nvvm_suld_1d_array_i32_zero:
- return NVPTXISD::Suld1DArrayI32Zero;
- case Intrinsic::nvvm_suld_1d_array_i64_zero:
- return NVPTXISD::Suld1DArrayI64Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
- return NVPTXISD::Suld1DArrayV2I8Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
- return NVPTXISD::Suld1DArrayV2I16Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
- return NVPTXISD::Suld1DArrayV2I32Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
- return NVPTXISD::Suld1DArrayV2I64Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
- return NVPTXISD::Suld1DArrayV4I8Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
- return NVPTXISD::Suld1DArrayV4I16Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
- return NVPTXISD::Suld1DArrayV4I32Zero;
- case Intrinsic::nvvm_suld_2d_i8_zero:
- return NVPTXISD::Suld2DI8Zero;
- case Intrinsic::nvvm_suld_2d_i16_zero:
- return NVPTXISD::Suld2DI16Zero;
- case Intrinsic::nvvm_suld_2d_i32_zero:
- return NVPTXISD::Suld2DI32Zero;
- case Intrinsic::nvvm_suld_2d_i64_zero:
- return NVPTXISD::Suld2DI64Zero;
- case Intrinsic::nvvm_suld_2d_v2i8_zero:
- return NVPTXISD::Suld2DV2I8Zero;
- case Intrinsic::nvvm_suld_2d_v2i16_zero:
- return NVPTXISD::Suld2DV2I16Zero;
- case Intrinsic::nvvm_suld_2d_v2i32_zero:
- return NVPTXISD::Suld2DV2I32Zero;
- case Intrinsic::nvvm_suld_2d_v2i64_zero:
- return NVPTXISD::Suld2DV2I64Zero;
- case Intrinsic::nvvm_suld_2d_v4i8_zero:
- return NVPTXISD::Suld2DV4I8Zero;
- case Intrinsic::nvvm_suld_2d_v4i16_zero:
- return NVPTXISD::Suld2DV4I16Zero;
- case Intrinsic::nvvm_suld_2d_v4i32_zero:
- return NVPTXISD::Suld2DV4I32Zero;
- case Intrinsic::nvvm_suld_2d_array_i8_zero:
- return NVPTXISD::Suld2DArrayI8Zero;
- case Intrinsic::nvvm_suld_2d_array_i16_zero:
- return NVPTXISD::Suld2DArrayI16Zero;
- case Intrinsic::nvvm_suld_2d_array_i32_zero:
- return NVPTXISD::Suld2DArrayI32Zero;
- case Intrinsic::nvvm_suld_2d_array_i64_zero:
- return NVPTXISD::Suld2DArrayI64Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
- return NVPTXISD::Suld2DArrayV2I8Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
- return NVPTXISD::Suld2DArrayV2I16Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
- return NVPTXISD::Suld2DArrayV2I32Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
- return NVPTXISD::Suld2DArrayV2I64Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
- return NVPTXISD::Suld2DArrayV4I8Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
- return NVPTXISD::Suld2DArrayV4I16Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
- return NVPTXISD::Suld2DArrayV4I32Zero;
- case Intrinsic::nvvm_suld_3d_i8_zero:
- return NVPTXISD::Suld3DI8Zero;
- case Intrinsic::nvvm_suld_3d_i16_zero:
- return NVPTXISD::Suld3DI16Zero;
- case Intrinsic::nvvm_suld_3d_i32_zero:
- return NVPTXISD::Suld3DI32Zero;
- case Intrinsic::nvvm_suld_3d_i64_zero:
- return NVPTXISD::Suld3DI64Zero;
- case Intrinsic::nvvm_suld_3d_v2i8_zero:
- return NVPTXISD::Suld3DV2I8Zero;
- case Intrinsic::nvvm_suld_3d_v2i16_zero:
- return NVPTXISD::Suld3DV2I16Zero;
- case Intrinsic::nvvm_suld_3d_v2i32_zero:
- return NVPTXISD::Suld3DV2I32Zero;
- case Intrinsic::nvvm_suld_3d_v2i64_zero:
- return NVPTXISD::Suld3DV2I64Zero;
- case Intrinsic::nvvm_suld_3d_v4i8_zero:
- return NVPTXISD::Suld3DV4I8Zero;
- case Intrinsic::nvvm_suld_3d_v4i16_zero:
- return NVPTXISD::Suld3DV4I16Zero;
- case Intrinsic::nvvm_suld_3d_v4i32_zero:
- return NVPTXISD::Suld3DV4I32Zero;
- }
-}
-
-// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
-// TgtMemIntrinsic
-// because we need the information that is only available in the "Value" type
-// of destination
-// pointer. In particular, the address space information.
-bool NVPTXTargetLowering::getTgtMemIntrinsic(
- IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
- switch (Intrinsic) {
- default:
- return false;
-
- case Intrinsic::nvvm_atomic_load_add_f32:
- case Intrinsic::nvvm_atomic_load_inc_32:
- case Intrinsic::nvvm_atomic_load_dec_32:
-
- case Intrinsic::nvvm_atomic_add_gen_f_cta:
- case Intrinsic::nvvm_atomic_add_gen_f_sys:
- case Intrinsic::nvvm_atomic_add_gen_i_cta:
- case Intrinsic::nvvm_atomic_add_gen_i_sys:
- case Intrinsic::nvvm_atomic_and_gen_i_cta:
- case Intrinsic::nvvm_atomic_and_gen_i_sys:
- case Intrinsic::nvvm_atomic_cas_gen_i_cta:
- case Intrinsic::nvvm_atomic_cas_gen_i_sys:
- case Intrinsic::nvvm_atomic_dec_gen_i_cta:
- case Intrinsic::nvvm_atomic_dec_gen_i_sys:
- case Intrinsic::nvvm_atomic_inc_gen_i_cta:
- case Intrinsic::nvvm_atomic_inc_gen_i_sys:
- case Intrinsic::nvvm_atomic_max_gen_i_cta:
- case Intrinsic::nvvm_atomic_max_gen_i_sys:
- case Intrinsic::nvvm_atomic_min_gen_i_cta:
- case Intrinsic::nvvm_atomic_min_gen_i_sys:
- case Intrinsic::nvvm_atomic_or_gen_i_cta:
- case Intrinsic::nvvm_atomic_or_gen_i_sys:
- case Intrinsic::nvvm_atomic_exch_gen_i_cta:
- case Intrinsic::nvvm_atomic_exch_gen_i_sys:
- case Intrinsic::nvvm_atomic_xor_gen_i_cta:
- case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
- auto &DL = I.getModule()->getDataLayout();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = true;
- Info.align = 0;
- return true;
- }
-
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p: {
- auto &DL = I.getModule()->getDataLayout();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
- Info.memVT = getValueType(DL, I.getType());
- else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
- Info.memVT = getPointerTy(DL);
- else
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
-
- return true;
- }
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p: {
- auto &DL = I.getModule()->getDataLayout();
-
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
- Info.memVT = getValueType(DL, I.getType());
- else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
- Info.memVT = getPointerTy(DL);
- else
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
-
- return true;
- }
-
- case Intrinsic::nvvm_tex_1d_v4f32_s32:
- case Intrinsic::nvvm_tex_1d_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_v4f32_s32:
- case Intrinsic::nvvm_tex_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_v4f32_s32:
- case Intrinsic::nvvm_tex_3d_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
- Info.opc = getOpcForTextureInstr(Intrinsic);
- Info.memVT = MVT::v4f32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_tex_1d_v4s32_s32:
- case Intrinsic::nvvm_tex_1d_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_v4s32_s32:
- case Intrinsic::nvvm_tex_2d_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_v4s32_s32:
- case Intrinsic::nvvm_tex_3d_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_v4u32_s32:
- case Intrinsic::nvvm_tex_1d_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_v4u32_s32:
- case Intrinsic::nvvm_tex_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_v4u32_s32:
- case Intrinsic::nvvm_tex_3d_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
- Info.opc = getOpcForTextureInstr(Intrinsic);
- Info.memVT = MVT::v4i32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i8_clamp:
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
- case Intrinsic::nvvm_suld_2d_i8_clamp:
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
- case Intrinsic::nvvm_suld_3d_i8_clamp:
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:
- case Intrinsic::nvvm_suld_1d_i8_trap:
- case Intrinsic::nvvm_suld_1d_v2i8_trap:
- case Intrinsic::nvvm_suld_1d_v4i8_trap:
- case Intrinsic::nvvm_suld_1d_array_i8_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
- case Intrinsic::nvvm_suld_2d_i8_trap:
- case Intrinsic::nvvm_suld_2d_v2i8_trap:
- case Intrinsic::nvvm_suld_2d_v4i8_trap:
- case Intrinsic::nvvm_suld_2d_array_i8_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
- case Intrinsic::nvvm_suld_3d_i8_trap:
- case Intrinsic::nvvm_suld_3d_v2i8_trap:
- case Intrinsic::nvvm_suld_3d_v4i8_trap:
- case Intrinsic::nvvm_suld_1d_i8_zero:
- case Intrinsic::nvvm_suld_1d_v2i8_zero:
- case Intrinsic::nvvm_suld_1d_v4i8_zero:
- case Intrinsic::nvvm_suld_1d_array_i8_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
- case Intrinsic::nvvm_suld_2d_i8_zero:
- case Intrinsic::nvvm_suld_2d_v2i8_zero:
- case Intrinsic::nvvm_suld_2d_v4i8_zero:
- case Intrinsic::nvvm_suld_2d_array_i8_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
- case Intrinsic::nvvm_suld_3d_i8_zero:
- case Intrinsic::nvvm_suld_3d_v2i8_zero:
- case Intrinsic::nvvm_suld_3d_v4i8_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i8;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i16_clamp:
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
- case Intrinsic::nvvm_suld_2d_i16_clamp:
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
- case Intrinsic::nvvm_suld_3d_i16_clamp:
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:
- case Intrinsic::nvvm_suld_1d_i16_trap:
- case Intrinsic::nvvm_suld_1d_v2i16_trap:
- case Intrinsic::nvvm_suld_1d_v4i16_trap:
- case Intrinsic::nvvm_suld_1d_array_i16_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
- case Intrinsic::nvvm_suld_2d_i16_trap:
- case Intrinsic::nvvm_suld_2d_v2i16_trap:
- case Intrinsic::nvvm_suld_2d_v4i16_trap:
- case Intrinsic::nvvm_suld_2d_array_i16_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
- case Intrinsic::nvvm_suld_3d_i16_trap:
- case Intrinsic::nvvm_suld_3d_v2i16_trap:
- case Intrinsic::nvvm_suld_3d_v4i16_trap:
- case Intrinsic::nvvm_suld_1d_i16_zero:
- case Intrinsic::nvvm_suld_1d_v2i16_zero:
- case Intrinsic::nvvm_suld_1d_v4i16_zero:
- case Intrinsic::nvvm_suld_1d_array_i16_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
- case Intrinsic::nvvm_suld_2d_i16_zero:
- case Intrinsic::nvvm_suld_2d_v2i16_zero:
- case Intrinsic::nvvm_suld_2d_v4i16_zero:
- case Intrinsic::nvvm_suld_2d_array_i16_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
- case Intrinsic::nvvm_suld_3d_i16_zero:
- case Intrinsic::nvvm_suld_3d_v2i16_zero:
- case Intrinsic::nvvm_suld_3d_v4i16_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i16;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i32_clamp:
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
- case Intrinsic::nvvm_suld_2d_i32_clamp:
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
- case Intrinsic::nvvm_suld_3d_i32_clamp:
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:
- case Intrinsic::nvvm_suld_1d_i32_trap:
- case Intrinsic::nvvm_suld_1d_v2i32_trap:
- case Intrinsic::nvvm_suld_1d_v4i32_trap:
- case Intrinsic::nvvm_suld_1d_array_i32_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
- case Intrinsic::nvvm_suld_2d_i32_trap:
- case Intrinsic::nvvm_suld_2d_v2i32_trap:
- case Intrinsic::nvvm_suld_2d_v4i32_trap:
- case Intrinsic::nvvm_suld_2d_array_i32_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
- case Intrinsic::nvvm_suld_3d_i32_trap:
- case Intrinsic::nvvm_suld_3d_v2i32_trap:
- case Intrinsic::nvvm_suld_3d_v4i32_trap:
- case Intrinsic::nvvm_suld_1d_i32_zero:
- case Intrinsic::nvvm_suld_1d_v2i32_zero:
- case Intrinsic::nvvm_suld_1d_v4i32_zero:
- case Intrinsic::nvvm_suld_1d_array_i32_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
- case Intrinsic::nvvm_suld_2d_i32_zero:
- case Intrinsic::nvvm_suld_2d_v2i32_zero:
- case Intrinsic::nvvm_suld_2d_v4i32_zero:
- case Intrinsic::nvvm_suld_2d_array_i32_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
- case Intrinsic::nvvm_suld_3d_i32_zero:
- case Intrinsic::nvvm_suld_3d_v2i32_zero:
- case Intrinsic::nvvm_suld_3d_v4i32_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i64_clamp:
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
- case Intrinsic::nvvm_suld_2d_i64_clamp:
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
- case Intrinsic::nvvm_suld_3d_i64_clamp:
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:
- case Intrinsic::nvvm_suld_1d_i64_trap:
- case Intrinsic::nvvm_suld_1d_v2i64_trap:
- case Intrinsic::nvvm_suld_1d_array_i64_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
- case Intrinsic::nvvm_suld_2d_i64_trap:
- case Intrinsic::nvvm_suld_2d_v2i64_trap:
- case Intrinsic::nvvm_suld_2d_array_i64_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
- case Intrinsic::nvvm_suld_3d_i64_trap:
- case Intrinsic::nvvm_suld_3d_v2i64_trap:
- case Intrinsic::nvvm_suld_1d_i64_zero:
- case Intrinsic::nvvm_suld_1d_v2i64_zero:
- case Intrinsic::nvvm_suld_1d_array_i64_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
- case Intrinsic::nvvm_suld_2d_i64_zero:
- case Intrinsic::nvvm_suld_2d_v2i64_zero:
- case Intrinsic::nvvm_suld_2d_array_i64_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
- case Intrinsic::nvvm_suld_3d_i64_zero:
- case Intrinsic::nvvm_suld_3d_v2i64_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i64;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
- }
- return false;
-}
-
-/// isLegalAddressingMode - Return true if the addressing mode represented
-/// by AM is legal for this target, for a load/store of the specified type.
-/// Used to guide target specific optimizations, like loop strength reduction
-/// (LoopStrengthReduce.cpp) and memory optimization for address mode
-/// (CodeGenPrepare.cpp)
-bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
- const AddrMode &AM, Type *Ty,
- unsigned AS) const {
- // AddrMode - This represents an addressing mode of:
- // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
- //
- // The legal address modes are
- // - [avar]
- // - [areg]
- // - [areg+immoff]
- // - [immAddr]
-
- if (AM.BaseGV) {
- return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
- }
-
- switch (AM.Scale) {
- case 0: // "r", "r+i" or "i" is allowed
- break;
- case 1:
- if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
- return false;
- // Otherwise we have r+i.
- break;
- default:
- // No scale > 1 is allowed
- return false;
- }
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-NVPTXTargetLowering::ConstraintType
-NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default:
- break;
- case 'b':
- case 'r':
- case 'h':
- case 'c':
- case 'l':
- case 'f':
- case 'd':
- case '0':
- case 'N':
- return C_RegisterClass;
- }
- }
- return TargetLowering::getConstraintType(Constraint);
-}
-
-std::pair<unsigned, const TargetRegisterClass *>
-NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
- StringRef Constraint,
- MVT VT) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'b':
- return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
- case 'c':
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
- case 'h':
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
- case 'r':
- return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
- case 'l':
- case 'N':
- return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
- case 'f':
- return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
- case 'd':
- return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
- }
- }
- return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX DAG Combining
-//===----------------------------------------------------------------------===//
-
-bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
- CodeGenOpt::Level OptLevel) const {
- // Always honor command-line argument
- if (FMAContractLevelOpt.getNumOccurrences() > 0)
- return FMAContractLevelOpt > 0;
-
- // Do not contract if we're not optimizing the code.
- if (OptLevel == 0)
- return false;
-
- // Honor TargetOptions flags that explicitly say fusion is okay.
- if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
- return true;
-
- return allowUnsafeFPMath(MF);
-}
-
-bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
- // Honor TargetOptions flags that explicitly say unsafe math is okay.
- if (MF.getTarget().Options.UnsafeFPMath)
- return true;
-
- // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
- const Function *F = MF.getFunction();
- if (F->hasFnAttribute("unsafe-fp-math")) {
- Attribute Attr = F->getFnAttribute("unsafe-fp-math");
- StringRef Val = Attr.getValueAsString();
- if (Val == "true")
- return true;
- }
-
- return false;
-}
-
-/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
-/// operands N0 and N1. This is a helper for PerformADDCombine that is
-/// called with the default operands, and if that fails, with commuted
-/// operands.
-static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
- TargetLowering::DAGCombinerInfo &DCI,
- const NVPTXSubtarget &Subtarget,
- CodeGenOpt::Level OptLevel) {
- SelectionDAG &DAG = DCI.DAG;
- // Skip non-integer, non-scalar case
- EVT VT=N0.getValueType();
- if (VT.isVector())
- return SDValue();
-
- // fold (add (mul a, b), c) -> (mad a, b, c)
- //
- if (N0.getOpcode() == ISD::MUL) {
- assert (VT.isInteger());
- // For integer:
- // Since integer multiply-add costs the same as integer multiply
- // but is more costly than integer add, do the fusion only when
- // the mul is only used in the add.
- if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
- !N0.getNode()->hasOneUse())
- return SDValue();
-
- // Do the folding
- return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1), N1);
- }
- else if (N0.getOpcode() == ISD::FMUL) {
- if (VT == MVT::f32 || VT == MVT::f64) {
- const auto *TLI = static_cast<const NVPTXTargetLowering *>(
- &DAG.getTargetLoweringInfo());
- if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
- return SDValue();
-
- // For floating point:
- // Do the fusion only when the mul has less than 5 uses and all
- // are add.
- // The heuristic is that if a use is not an add, then that use
- // cannot be fused into fma, therefore mul is still needed anyway.
- // If there are more than 4 uses, even if they are all add, fusing
- // them will increase register pressue.
- //
- int numUses = 0;
- int nonAddCount = 0;
- for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
- UE = N0.getNode()->use_end();
- UI != UE; ++UI) {
- numUses++;
- SDNode *User = *UI;
- if (User->getOpcode() != ISD::FADD)
- ++nonAddCount;
- }
- if (numUses >= 5)
- return SDValue();
- if (nonAddCount) {
- int orderNo = N->getIROrder();
- int orderNo2 = N0.getNode()->getIROrder();
- // simple heuristics here for considering potential register
- // pressure, the logics here is that the differnce are used
- // to measure the distance between def and use, the longer distance
- // more likely cause register pressure.
- if (orderNo - orderNo2 < 500)
- return SDValue();
-
- // Now, check if at least one of the FMUL's operands is live beyond the node N,
- // which guarantees that the FMA will not increase register pressure at node N.
- bool opIsLive = false;
- const SDNode *left = N0.getOperand(0).getNode();
- const SDNode *right = N0.getOperand(1).getNode();
-
- if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
- opIsLive = true;
-
- if (!opIsLive)
- for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- int orderNo3 = User->getIROrder();
- if (orderNo3 > orderNo) {
- opIsLive = true;
- break;
- }
- }
-
- if (!opIsLive)
- for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- int orderNo3 = User->getIROrder();
- if (orderNo3 > orderNo) {
- opIsLive = true;
- break;
- }
- }
-
- if (!opIsLive)
- return SDValue();
- }
-
- return DAG.getNode(ISD::FMA, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1), N1);
- }
- }
-
- return SDValue();
-}
-
-/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
-///
-static SDValue PerformADDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const NVPTXSubtarget &Subtarget,
- CodeGenOpt::Level OptLevel) {
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
-
- // First try with the default operand order.
- if (SDValue Result =
- PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
- return Result;
-
- // If that didn't work, try again with the operands commuted.
- return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
-}
-
-static SDValue PerformANDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // The type legalizer turns a vector load of i8 values into a zextload to i16
- // registers, optionally ANY_EXTENDs it (if target type is integer),
- // and ANDs off the high 8 bits. Since we turn this load into a
- // target-specific DAG node, the DAG combiner fails to eliminate these AND
- // nodes. Do that here.
- SDValue Val = N->getOperand(0);
- SDValue Mask = N->getOperand(1);
-
- if (isa<ConstantSDNode>(Val)) {
- std::swap(Val, Mask);
- }
-
- SDValue AExt;
- // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
- if (Val.getOpcode() == ISD::ANY_EXTEND) {
- AExt = Val;
- Val = Val->getOperand(0);
- }
-
- if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
- Val = Val->getOperand(0);
- }
-
- if (Val->getOpcode() == NVPTXISD::LoadV2 ||
- Val->getOpcode() == NVPTXISD::LoadV4) {
- ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
- if (!MaskCnst) {
- // Not an AND with a constant
- return SDValue();
- }
-
- uint64_t MaskVal = MaskCnst->getZExtValue();
- if (MaskVal != 0xff) {
- // Not an AND that chops off top 8 bits
- return SDValue();
- }
-
- MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
- if (!Mem) {
- // Not a MemSDNode?!?
- return SDValue();
- }
-
- EVT MemVT = Mem->getMemoryVT();
- if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
- // We only handle the i8 case
- return SDValue();
- }
-
- unsigned ExtType =
- cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
- getZExtValue();
- if (ExtType == ISD::SEXTLOAD) {
- // If for some reason the load is a sextload, the and is needed to zero
- // out the high 8 bits
- return SDValue();
- }
-
- bool AddTo = false;
- if (AExt.getNode() != nullptr) {
- // Re-insert the ext as a zext.
- Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
- AExt.getValueType(), Val);
- AddTo = true;
- }
-
- // If we get here, the AND is unnecessary. Just replace it with the load
- DCI.CombineTo(N, Val, AddTo);
- }
-
- return SDValue();
-}
-
-static SDValue PerformREMCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
-
- // Don't do anything at less than -O2.
- if (OptLevel < CodeGenOpt::Default)
- return SDValue();
-
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- bool IsSigned = N->getOpcode() == ISD::SREM;
- unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
-
- const SDValue &Num = N->getOperand(0);
- const SDValue &Den = N->getOperand(1);
-
- for (const SDNode *U : Num->uses()) {
- if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
- U->getOperand(1) == Den) {
- // Num % Den -> Num - (Num / Den) * Den
- return DAG.getNode(ISD::SUB, DL, VT, Num,
- DAG.getNode(ISD::MUL, DL, VT,
- DAG.getNode(DivOpc, DL, VT, Num, Den),
- Den));
- }
- }
- return SDValue();
-}
-
-enum OperandSignedness {
- Signed = 0,
- Unsigned,
- Unknown
-};
-
-/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
-/// that can be demoted to \p OptSize bits without loss of information. The
-/// signedness of the operand, if determinable, is placed in \p S.
-static bool IsMulWideOperandDemotable(SDValue Op,
- unsigned OptSize,
- OperandSignedness &S) {
- S = Unknown;
-
- if (Op.getOpcode() == ISD::SIGN_EXTEND ||
- Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
- EVT OrigVT = Op.getOperand(0).getValueType();
- if (OrigVT.getSizeInBits() <= OptSize) {
- S = Signed;
- return true;
- }
- } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
- EVT OrigVT = Op.getOperand(0).getValueType();
- if (OrigVT.getSizeInBits() <= OptSize) {
- S = Unsigned;
- return true;
- }
- }
-
- return false;
-}
-
-/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
-/// be demoted to \p OptSize bits without loss of information. If the operands
-/// contain a constant, it should appear as the RHS operand. The signedness of
-/// the operands is placed in \p IsSigned.
-static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
- unsigned OptSize,
- bool &IsSigned) {
- OperandSignedness LHSSign;
-
- // The LHS operand must be a demotable op
- if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
- return false;
-
- // We should have been able to determine the signedness from the LHS
- if (LHSSign == Unknown)
- return false;
-
- IsSigned = (LHSSign == Signed);
-
- // The RHS can be a demotable op or a constant
- if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
- const APInt &Val = CI->getAPIntValue();
- if (LHSSign == Unsigned) {
- return Val.isIntN(OptSize);
- } else {
- return Val.isSignedIntN(OptSize);
- }
- } else {
- OperandSignedness RHSSign;
- if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
- return false;
-
- return LHSSign == RHSSign;
- }
-}
-
-/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
-/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
-/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
-/// amount.
-static SDValue TryMULWIDECombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT MulType = N->getValueType(0);
- if (MulType != MVT::i32 && MulType != MVT::i64) {
- return SDValue();
- }
-
- SDLoc DL(N);
- unsigned OptSize = MulType.getSizeInBits() >> 1;
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
-
- // Canonicalize the multiply so the constant (if any) is on the right
- if (N->getOpcode() == ISD::MUL) {
- if (isa<ConstantSDNode>(LHS)) {
- std::swap(LHS, RHS);
- }
- }
-
- // If we have a SHL, determine the actual multiply amount
- if (N->getOpcode() == ISD::SHL) {
- ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
- if (!ShlRHS) {
- return SDValue();
- }
-
- APInt ShiftAmt = ShlRHS->getAPIntValue();
- unsigned BitWidth = MulType.getSizeInBits();
- if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
- APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
- RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
- } else {
- return SDValue();
- }
- }
-
- bool Signed;
- // Verify that our operands are demotable
- if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
- return SDValue();
- }
-
- EVT DemotedVT;
- if (MulType == MVT::i32) {
- DemotedVT = MVT::i16;
- } else {
- DemotedVT = MVT::i32;
- }
-
- // Truncate the operands to the correct size. Note that these are just for
- // type consistency and will (likely) be eliminated in later phases.
- SDValue TruncLHS =
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
- SDValue TruncRHS =
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
-
- unsigned Opc;
- if (Signed) {
- Opc = NVPTXISD::MUL_WIDE_SIGNED;
- } else {
- Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
- }
-
- return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
-}
-
-/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
-static SDValue PerformMULCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- if (OptLevel > 0) {
- // Try mul.wide combining at OptLevel > 0
- if (SDValue Ret = TryMULWIDECombine(N, DCI))
- return Ret;
- }
-
- return SDValue();
-}
-
-/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
-static SDValue PerformSHLCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- if (OptLevel > 0) {
- // Try mul.wide combining at OptLevel > 0
- if (SDValue Ret = TryMULWIDECombine(N, DCI))
- return Ret;
- }
-
- return SDValue();
-}
-
-static SDValue PerformSETCCCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT CCType = N->getValueType(0);
- SDValue A = N->getOperand(0);
- SDValue B = N->getOperand(1);
-
- if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
- return SDValue();
-
- SDLoc DL(N);
- // setp.f16x2 returns two scalar predicates, which we need to
- // convert back to v2i1. The returned result will be scalarized by
- // the legalizer, but the comparison will remain a single vector
- // instruction.
- SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
- DCI.DAG.getVTList(MVT::i1, MVT::i1),
- {A, B, N->getOperand(2)});
- return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
- CCNode.getValue(1));
-}
-
-SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
- CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
- switch (N->getOpcode()) {
- default: break;
- case ISD::ADD:
- case ISD::FADD:
- return PerformADDCombine(N, DCI, STI, OptLevel);
- case ISD::MUL:
- return PerformMULCombine(N, DCI, OptLevel);
- case ISD::SHL:
- return PerformSHLCombine(N, DCI, OptLevel);
- case ISD::AND:
- return PerformANDCombine(N, DCI);
- case ISD::UREM:
- case ISD::SREM:
- return PerformREMCombine(N, DCI, OptLevel);
- case ISD::SETCC:
- return PerformSETCCCombine(N, DCI);
- }
- return SDValue();
-}
-
-/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
-static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Results) {
- EVT ResVT = N->getValueType(0);
- SDLoc DL(N);
-
- assert(ResVT.isVector() && "Vector load must have vector type");
-
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not
- // legal. We can (and should) split that into 2 loads of <2 x double> here
- // but I'm leaving that as a TODO for now.
- assert(ResVT.isSimple() && "Can only handle simple types");
- switch (ResVT.getSimpleVT().SimpleTy) {
- default:
- return;
- case MVT::v2i8:
- case MVT::v2i16:
- case MVT::v2i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v2f32:
- case MVT::v2f64:
- case MVT::v4i8:
- case MVT::v4i16:
- case MVT::v4i32:
- case MVT::v4f16:
- case MVT::v4f32:
- case MVT::v8f16: // <4 x f16x2>
- // This is a "native" vector type
- break;
- }
-
- LoadSDNode *LD = cast<LoadSDNode>(N);
-
- unsigned Align = LD->getAlignment();
- auto &TD = DAG.getDataLayout();
- unsigned PrefAlign =
- TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
- if (Align < PrefAlign) {
- // This load is not sufficiently aligned, so bail out and let this vector
- // load be scalarized. Note that we may still be able to emit smaller
- // vector loads. For example, if we are loading a <4 x float> with an
- // alignment of 8, this check will fail but the legalizer will try again
- // with 2 x <2 x float>, which will succeed with an alignment of 8.
- return;
- }
-
- EVT EltVT = ResVT.getVectorElementType();
- unsigned NumElts = ResVT.getVectorNumElements();
-
- // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propagate the "real" type as the memory type.
- bool NeedTrunc = false;
- if (EltVT.getSizeInBits() < 16) {
- EltVT = MVT::i16;
- NeedTrunc = true;
- }
-
- unsigned Opcode = 0;
- SDVTList LdResVTs;
- bool LoadF16x2 = false;
-
- switch (NumElts) {
- default:
- return;
- case 2:
- Opcode = NVPTXISD::LoadV2;
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
- break;
- case 4: {
- Opcode = NVPTXISD::LoadV4;
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- case 8: {
- // v8f16 is a special case. PTX doesn't have ld.v8.f16
- // instruction. Instead, we split the vector into v2f16 chunks and
- // load them with ld.v4.b32.
- assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
- LoadF16x2 = true;
- Opcode = NVPTXISD::LoadV4;
- EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
- MVT::Other};
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- }
-
- // Copy regular operands
- SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
-
- // The select routine does not have access to the LoadSDNode instance, so
- // pass along the extension information
- OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
-
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
- LD->getMemoryVT(),
- LD->getMemOperand());
-
- SmallVector<SDValue, 8> ScalarRes;
- if (LoadF16x2) {
- // Split v2f16 subvectors back into individual elements.
- NumElts /= 2;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue SubVector = NewLD.getValue(i);
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
- DAG.getIntPtrConstant(0, DL));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
- DAG.getIntPtrConstant(1, DL));
- ScalarRes.push_back(E0);
- ScalarRes.push_back(E1);
- }
- } else {
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Res = NewLD.getValue(i);
- if (NeedTrunc)
- Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
- ScalarRes.push_back(Res);
- }
- }
-
- SDValue LoadChain = NewLD.getValue(NumElts);
-
- SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
-
- Results.push_back(BuildVec);
- Results.push_back(LoadChain);
-}
-
-static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Results) {
- SDValue Chain = N->getOperand(0);
- SDValue Intrin = N->getOperand(1);
- SDLoc DL(N);
-
- // Get the intrinsic ID
- unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p: {
- EVT ResVT = N->getValueType(0);
-
- if (ResVT.isVector()) {
- // Vector LDG/LDU
-
- unsigned NumElts = ResVT.getVectorNumElements();
- EVT EltVT = ResVT.getVectorElementType();
-
- // Since LDU/LDG are target nodes, we cannot rely on DAG type
- // legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propagate the "real" type as the memory type.
- bool NeedTrunc = false;
- if (EltVT.getSizeInBits() < 16) {
- EltVT = MVT::i16;
- NeedTrunc = true;
- }
-
- unsigned Opcode = 0;
- SDVTList LdResVTs;
-
- switch (NumElts) {
- default:
- return;
- case 2:
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- Opcode = NVPTXISD::LDGV2;
- break;
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p:
- Opcode = NVPTXISD::LDUV2;
- break;
- }
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
- break;
- case 4: {
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- Opcode = NVPTXISD::LDGV4;
- break;
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p:
- Opcode = NVPTXISD::LDUV4;
- break;
- }
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- }
-
- SmallVector<SDValue, 8> OtherOps;
-
- // Copy regular operands
-
- OtherOps.push_back(Chain); // Chain
- // Skip operand 1 (intrinsic ID)
- // Others
- OtherOps.append(N->op_begin() + 2, N->op_end());
-
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
-
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
- MemSD->getMemoryVT(),
- MemSD->getMemOperand());
-
- SmallVector<SDValue, 4> ScalarRes;
-
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Res = NewLD.getValue(i);
- if (NeedTrunc)
- Res =
- DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
- ScalarRes.push_back(Res);
- }
-
- SDValue LoadChain = NewLD.getValue(NumElts);
-
- SDValue BuildVec =
- DAG.getBuildVector(ResVT, DL, ScalarRes);
-
- Results.push_back(BuildVec);
- Results.push_back(LoadChain);
- } else {
- // i8 LDG/LDU
- assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
- "Custom handling of non-i8 ldu/ldg?");
-
- // Just copy all operands as-is
- SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
-
- // Force output to i16
- SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
-
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
-
- // We make sure the memory type is i8, which will be used during isel
- // to select the proper instruction.
- SDValue NewLD =
- DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
- MVT::i8, MemSD->getMemOperand());
-
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
- NewLD.getValue(0)));
- Results.push_back(NewLD.getValue(1));
- }
- }
- }
-}
-
-void NVPTXTargetLowering::ReplaceNodeResults(
- SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
- switch (N->getOpcode()) {
- default:
- report_fatal_error("Unhandled custom legalization");
- case ISD::LOAD:
- ReplaceLoadVector(N, DAG, Results);
- return;
- case ISD::INTRINSIC_W_CHAIN:
- ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
- return;
- }
-}
-
-// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
-void NVPTXSection::anchor() {}
-
-NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
- delete static_cast<NVPTXSection *>(TextSection);
- delete static_cast<NVPTXSection *>(DataSection);
- delete static_cast<NVPTXSection *>(BSSSection);
- delete static_cast<NVPTXSection *>(ReadOnlySection);
-
- delete static_cast<NVPTXSection *>(StaticCtorSection);
- delete static_cast<NVPTXSection *>(StaticDtorSection);
- delete static_cast<NVPTXSection *>(LSDASection);
- delete static_cast<NVPTXSection *>(EHFrameSection);
- delete static_cast<NVPTXSection *>(DwarfAbbrevSection);
- delete static_cast<NVPTXSection *>(DwarfInfoSection);
- delete static_cast<NVPTXSection *>(DwarfLineSection);
- delete static_cast<NVPTXSection *>(DwarfFrameSection);
- delete static_cast<NVPTXSection *>(DwarfPubTypesSection);
- delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);
- delete static_cast<NVPTXSection *>(DwarfStrSection);
- delete static_cast<NVPTXSection *>(DwarfLocSection);
- delete static_cast<NVPTXSection *>(DwarfARangesSection);
- delete static_cast<NVPTXSection *>(DwarfRangesSection);
- delete static_cast<NVPTXSection *>(DwarfMacinfoSection);
-}
-
-MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
- const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
- return getDataSection();
-}
+//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that NVPTX uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "NVPTX.h"
+#include "NVPTXISelLowering.h"
+#include "NVPTXSection.h"
+#include "NVPTXSubtarget.h"
+#include "NVPTXTargetMachine.h"
+#include "NVPTXTargetObjectFile.h"
+#include "NVPTXUtilities.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetCallingConv.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "nvptx-lower"
+
+using namespace llvm;
+
+static unsigned int uniqueCallSite = 0;
+
+static cl::opt<bool> sched4reg(
+ "nvptx-sched4reg",
+ cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
+
+static cl::opt<unsigned>
+FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
+ " 1: do it 2: do it aggressively"),
+ cl::init(2));
+
+static cl::opt<int> UsePrecDivF32(
+ "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
+ " IEEE Compliant F32 div.rnd if available."),
+ cl::init(2));
+
+static cl::opt<bool> UsePrecSqrtF32(
+ "nvptx-prec-sqrtf32", cl::Hidden,
+ cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
+ cl::init(true));
+
+static cl::opt<bool> FtzEnabled(
+ "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
+ cl::init(false));
+
+int NVPTXTargetLowering::getDivF32Level() const {
+ if (UsePrecDivF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-div32=N is used on the command-line, always honor it
+ return UsePrecDivF32;
+ } else {
+ // Otherwise, use div.approx if fast math is enabled
+ if (getTargetMachine().Options.UnsafeFPMath)
+ return 0;
+ else
+ return 2;
+ }
+}
+
+bool NVPTXTargetLowering::usePrecSqrtF32() const {
+ if (UsePrecSqrtF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
+ return UsePrecSqrtF32;
+ } else {
+ // Otherwise, use sqrt.approx if fast math is enabled
+ return !getTargetMachine().Options.UnsafeFPMath;
+ }
+}
+
+bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
+ // TODO: Get rid of this flag; there can be only one way to do this.
+ if (FtzEnabled.getNumOccurrences() > 0) {
+ // If nvptx-f32ftz is used on the command-line, always honor it
+ return FtzEnabled;
+ } else {
+ const Function *F = MF.getFunction();
+ // Otherwise, check for an nvptx-f32ftz attribute on the function
+ if (F->hasFnAttribute("nvptx-f32ftz"))
+ return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
+ else
+ return false;
+ }
+}
+
+static bool IsPTXVectorType(MVT VT) {
+ switch (VT.SimpleTy) {
+ default:
+ return false;
+ case MVT::v2i1:
+ case MVT::v4i1:
+ case MVT::v2i8:
+ case MVT::v4i8:
+ case MVT::v2i16:
+ case MVT::v4i16:
+ case MVT::v2i32:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v4f16:
+ case MVT::v8f16: // <4 x f16x2>
+ case MVT::v2f32:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ return true;
+ }
+}
+
+/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
+/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
+/// into their primitive components.
+/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
+/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
+/// LowerCall, and LowerReturn.
+static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
+ Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
+ uint64_t StartingOffset = 0) {
+ SmallVector<EVT, 16> TempVTs;
+ SmallVector<uint64_t, 16> TempOffsets;
+
+ ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
+ for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
+ EVT VT = TempVTs[i];
+ uint64_t Off = TempOffsets[i];
+ // Split vectors into individual elements, except for v2f16, which
+ // we will pass as a single scalar.
+ if (VT.isVector()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ EVT EltVT = VT.getVectorElementType();
+ // Vectors with an even number of f16 elements will be passed to
+ // us as an array of v2f16 elements. We must match this so we
+ // stay in sync with Ins/Outs.
+ if (EltVT == MVT::f16 && NumElts % 2 == 0) {
+ EltVT = MVT::v2f16;
+ NumElts /= 2;
+ }
+ for (unsigned j = 0; j != NumElts; ++j) {
+ ValueVTs.push_back(EltVT);
+ if (Offsets)
+ Offsets->push_back(Off + j * EltVT.getStoreSize());
+ }
+ } else {
+ ValueVTs.push_back(VT);
+ if (Offsets)
+ Offsets->push_back(Off);
+ }
+ }
+}
+
+// Check whether we can merge loads/stores of some of the pieces of a
+// flattened function parameter or return value into a single vector
+// load/store.
+//
+// The flattened parameter is represented as a list of EVTs and
+// offsets, and the whole structure is aligned to ParamAlignment. This
+// function determines whether we can load/store pieces of the
+// parameter starting at index Idx using a single vectorized op of
+// size AccessSize. If so, it returns the number of param pieces
+// covered by the vector op. Otherwise, it returns 1.
+static unsigned CanMergeParamLoadStoresStartingAt(
+ unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {
+ assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");
+
+ // Can't vectorize if param alignment is not sufficient.
+ if (AccessSize > ParamAlignment)
+ return 1;
+ // Can't vectorize if offset is not aligned.
+ if (Offsets[Idx] & (AccessSize - 1))
+ return 1;
+
+ EVT EltVT = ValueVTs[Idx];
+ unsigned EltSize = EltVT.getStoreSize();
+
+ // Element is too large to vectorize.
+ if (EltSize >= AccessSize)
+ return 1;
+
+ unsigned NumElts = AccessSize / EltSize;
+ // Can't vectorize if AccessBytes if not a multiple of EltSize.
+ if (AccessSize != EltSize * NumElts)
+ return 1;
+
+ // We don't have enough elements to vectorize.
+ if (Idx + NumElts > ValueVTs.size())
+ return 1;
+
+ // PTX ISA can only deal with 2- and 4-element vector ops.
+ if (NumElts != 4 && NumElts != 2)
+ return 1;
+
+ for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
+ // Types do not match.
+ if (ValueVTs[j] != EltVT)
+ return 1;
+
+ // Elements are not contiguous.
+ if (Offsets[j] - Offsets[j - 1] != EltSize)
+ return 1;
+ }
+ // OK. We can vectorize ValueVTs[i..i+NumElts)
+ return NumElts;
+}
+
+// Flags for tracking per-element vectorization state of loads/stores
+// of a flattened function parameter or return value.
+enum ParamVectorizationFlags {
+ PVF_INNER = 0x0, // Middle elements of a vector.
+ PVF_FIRST = 0x1, // First element of the vector.
+ PVF_LAST = 0x2, // Last element of the vector.
+ // Scalar is effectively a 1-element vector.
+ PVF_SCALAR = PVF_FIRST | PVF_LAST
+};
+
+// Computes whether and how we can vectorize the loads/stores of a
+// flattened function parameter or return value.
+//
+// The flattened parameter is represented as the list of ValueVTs and
+// Offsets, and is aligned to ParamAlignment bytes. We return a vector
+// of the same size as ValueVTs indicating how each piece should be
+// loaded/stored (i.e. as a scalar, or as part of a vector
+// load/store).
+static SmallVector<ParamVectorizationFlags, 16>
+VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets,
+ unsigned ParamAlignment) {
+ // Set vector size to match ValueVTs and mark all elements as
+ // scalars by default.
+ SmallVector<ParamVectorizationFlags, 16> VectorInfo;
+ VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
+
+ // Check what we can vectorize using 128/64/32-bit accesses.
+ for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
+ // Skip elements we've already processed.
+ assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
+ for (unsigned AccessSize : {16, 8, 4, 2}) {
+ unsigned NumElts = CanMergeParamLoadStoresStartingAt(
+ I, AccessSize, ValueVTs, Offsets, ParamAlignment);
+ // Mark vectorized elements.
+ switch (NumElts) {
+ default:
+ llvm_unreachable("Unexpected return value");
+ case 1:
+ // Can't vectorize using this size, try next smaller size.
+ continue;
+ case 2:
+ assert(I + 1 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_LAST;
+ I += 1;
+ break;
+ case 4:
+ assert(I + 3 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_INNER;
+ VectorInfo[I + 2] = PVF_INNER;
+ VectorInfo[I + 3] = PVF_LAST;
+ I += 3;
+ break;
+ }
+ // Break out of the inner loop because we've already succeeded
+ // using largest possible AccessSize.
+ break;
+ }
+ }
+ return VectorInfo;
+}
+
+// NVPTXTargetLowering Constructor.
+NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
+ const NVPTXSubtarget &STI)
+ : TargetLowering(TM), nvTM(&TM), STI(STI) {
+ // always lower memset, memcpy, and memmove intrinsics to load/store
+ // instructions, rather
+ // then generating calls to memset, mempcy or memmove.
+ MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
+
+ setBooleanContents(ZeroOrNegativeOneBooleanContent);
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+
+ // Jump is Expensive. Don't create extra control flow for 'and', 'or'
+ // condition branches.
+ setJumpIsExpensive(true);
+
+ // Wide divides are _very_ slow. Try to reduce the width of the divide if
+ // possible.
+ addBypassSlowDiv(64, 32);
+
+ // By default, use the Source scheduling
+ if (sched4reg)
+ setSchedulingPreference(Sched::RegPressure);
+ else
+ setSchedulingPreference(Sched::Source);
+
+ auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
+ LegalizeAction NoF16Action) {
+ setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
+ };
+
+ addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
+ addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
+ addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
+ addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
+ addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
+ addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
+ addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
+ addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
+
+ // Conversion to/from FP16/FP16x2 is always legal.
+ setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
+
+ setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
+ setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
+
+ // Operations not directly supported by NVPTX.
+ setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i8, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+ // Some SIGN_EXTEND_INREG can be done using cvt instruction.
+ // For others we will expand to a SHL/SRA pair.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
+
+ setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+ setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
+
+ if (STI.hasROT64()) {
+ setOperationAction(ISD::ROTL, MVT::i64, Legal);
+ setOperationAction(ISD::ROTR, MVT::i64, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+ }
+ if (STI.hasROT32()) {
+ setOperationAction(ISD::ROTL, MVT::i32, Legal);
+ setOperationAction(ISD::ROTR, MVT::i32, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ }
+
+ setOperationAction(ISD::ROTL, MVT::i16, Expand);
+ setOperationAction(ISD::ROTR, MVT::i16, Expand);
+ setOperationAction(ISD::ROTL, MVT::i8, Expand);
+ setOperationAction(ISD::ROTR, MVT::i8, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i16, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
+ // Indirect branch is not supported.
+ // This also disables Jump Table creation.
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+
+ // We want to legalize constant related memmove and memcopy
+ // intrinsics.
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+
+ // Turn FP extload into load/fpextend
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
+ // Turn FP truncstore into trunc + store.
+ // FIXME: vector types should also be expanded
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // PTX does not support load / store predicate registers
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+ setOperationAction(ISD::STORE, MVT::i1, Custom);
+
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setTruncStoreAction(VT, MVT::i1, Expand);
+ }
+
+ // This is legal in NVPTX
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
+
+ // TRAP can be lowered to PTX trap
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+
+ // Register custom handling for vector loads/stores
+ for (MVT VT : MVT::vector_valuetypes()) {
+ if (IsPTXVectorType(VT)) {
+ setOperationAction(ISD::LOAD, VT, Custom);
+ setOperationAction(ISD::STORE, VT, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
+ }
+ }
+
+ // Custom handling for i8 intrinsics
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
+
+ for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
+ setOperationAction(ISD::ABS, Ty, Legal);
+ setOperationAction(ISD::SMIN, Ty, Legal);
+ setOperationAction(ISD::SMAX, Ty, Legal);
+ setOperationAction(ISD::UMIN, Ty, Legal);
+ setOperationAction(ISD::UMAX, Ty, Legal);
+
+ setOperationAction(ISD::CTPOP, Ty, Legal);
+ setOperationAction(ISD::CTLZ, Ty, Legal);
+ }
+
+ setOperationAction(ISD::CTTZ, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Expand);
+
+ // PTX does not directly support SELP of i1, so promote to i32 first
+ setOperationAction(ISD::SELECT, MVT::i1, Custom);
+
+ // PTX cannot multiply two i64s in a single instruction.
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
+ // We have some custom DAG combine patterns for these nodes
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::FADD);
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine(ISD::SREM);
+ setTargetDAGCombine(ISD::UREM);
+
+ // setcc for f16x2 needs special handling to prevent legalizer's
+ // attempt to scalarize it due to v2i1 not being legal.
+ if (STI.allowFP16Math())
+ setTargetDAGCombine(ISD::SETCC);
+
+ // Promote fp16 arithmetic if fp16 hardware isn't available or the
+ // user passed --nvptx-no-fp16-math. The flag is useful because,
+ // although sm_53+ GPUs have some sort of FP16 support in
+ // hardware, only sm_53 and sm_60 have full implementation. Others
+ // only have token amount of hardware and are likely to run faster
+ // by using fp32 units instead.
+ for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
+ setFP16OperationAction(Op, MVT::f16, Legal, Promote);
+ setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
+ }
+
+ // There's no neg.f16 instruction. Expand to (0-x).
+ setOperationAction(ISD::FNEG, MVT::f16, Expand);
+ setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
+
+ // (would be) Library functions.
+
+ // These map to conversion instructions for scalar FP types.
+ for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
+ ISD::FROUND, ISD::FTRUNC}) {
+ setOperationAction(Op, MVT::f16, Legal);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+
+ // 'Expand' implements FCOPYSIGN without calling an external library.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+
+ // These map to corresponding instructions for f32/f64. f16 must be
+ // promoted to f32. v2f16 is expanded to f16, which is then promoted
+ // to f32.
+ for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
+ ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
+ setOperationAction(Op, MVT::f16, Promote);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+ setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMINNAN, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);
+
+ // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
+ // No FPOW or FREM in PTX.
+
+ // Now deduce the information based on the above mentioned
+ // actions
+ computeRegisterProperties(STI.getRegisterInfo());
+}
+
+const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch ((NVPTXISD::NodeType)Opcode) {
+ case NVPTXISD::FIRST_NUMBER:
+ break;
+ case NVPTXISD::CALL:
+ return "NVPTXISD::CALL";
+ case NVPTXISD::RET_FLAG:
+ return "NVPTXISD::RET_FLAG";
+ case NVPTXISD::LOAD_PARAM:
+ return "NVPTXISD::LOAD_PARAM";
+ case NVPTXISD::Wrapper:
+ return "NVPTXISD::Wrapper";
+ case NVPTXISD::DeclareParam:
+ return "NVPTXISD::DeclareParam";
+ case NVPTXISD::DeclareScalarParam:
+ return "NVPTXISD::DeclareScalarParam";
+ case NVPTXISD::DeclareRet:
+ return "NVPTXISD::DeclareRet";
+ case NVPTXISD::DeclareScalarRet:
+ return "NVPTXISD::DeclareScalarRet";
+ case NVPTXISD::DeclareRetParam:
+ return "NVPTXISD::DeclareRetParam";
+ case NVPTXISD::PrintCall:
+ return "NVPTXISD::PrintCall";
+ case NVPTXISD::PrintConvergentCall:
+ return "NVPTXISD::PrintConvergentCall";
+ case NVPTXISD::PrintCallUni:
+ return "NVPTXISD::PrintCallUni";
+ case NVPTXISD::PrintConvergentCallUni:
+ return "NVPTXISD::PrintConvergentCallUni";
+ case NVPTXISD::LoadParam:
+ return "NVPTXISD::LoadParam";
+ case NVPTXISD::LoadParamV2:
+ return "NVPTXISD::LoadParamV2";
+ case NVPTXISD::LoadParamV4:
+ return "NVPTXISD::LoadParamV4";
+ case NVPTXISD::StoreParam:
+ return "NVPTXISD::StoreParam";
+ case NVPTXISD::StoreParamV2:
+ return "NVPTXISD::StoreParamV2";
+ case NVPTXISD::StoreParamV4:
+ return "NVPTXISD::StoreParamV4";
+ case NVPTXISD::StoreParamS32:
+ return "NVPTXISD::StoreParamS32";
+ case NVPTXISD::StoreParamU32:
+ return "NVPTXISD::StoreParamU32";
+ case NVPTXISD::CallArgBegin:
+ return "NVPTXISD::CallArgBegin";
+ case NVPTXISD::CallArg:
+ return "NVPTXISD::CallArg";
+ case NVPTXISD::LastCallArg:
+ return "NVPTXISD::LastCallArg";
+ case NVPTXISD::CallArgEnd:
+ return "NVPTXISD::CallArgEnd";
+ case NVPTXISD::CallVoid:
+ return "NVPTXISD::CallVoid";
+ case NVPTXISD::CallVal:
+ return "NVPTXISD::CallVal";
+ case NVPTXISD::CallSymbol:
+ return "NVPTXISD::CallSymbol";
+ case NVPTXISD::Prototype:
+ return "NVPTXISD::Prototype";
+ case NVPTXISD::MoveParam:
+ return "NVPTXISD::MoveParam";
+ case NVPTXISD::StoreRetval:
+ return "NVPTXISD::StoreRetval";
+ case NVPTXISD::StoreRetvalV2:
+ return "NVPTXISD::StoreRetvalV2";
+ case NVPTXISD::StoreRetvalV4:
+ return "NVPTXISD::StoreRetvalV4";
+ case NVPTXISD::PseudoUseParam:
+ return "NVPTXISD::PseudoUseParam";
+ case NVPTXISD::RETURN:
+ return "NVPTXISD::RETURN";
+ case NVPTXISD::CallSeqBegin:
+ return "NVPTXISD::CallSeqBegin";
+ case NVPTXISD::CallSeqEnd:
+ return "NVPTXISD::CallSeqEnd";
+ case NVPTXISD::CallPrototype:
+ return "NVPTXISD::CallPrototype";
+ case NVPTXISD::LoadV2:
+ return "NVPTXISD::LoadV2";
+ case NVPTXISD::LoadV4:
+ return "NVPTXISD::LoadV4";
+ case NVPTXISD::LDGV2:
+ return "NVPTXISD::LDGV2";
+ case NVPTXISD::LDGV4:
+ return "NVPTXISD::LDGV4";
+ case NVPTXISD::LDUV2:
+ return "NVPTXISD::LDUV2";
+ case NVPTXISD::LDUV4:
+ return "NVPTXISD::LDUV4";
+ case NVPTXISD::StoreV2:
+ return "NVPTXISD::StoreV2";
+ case NVPTXISD::StoreV4:
+ return "NVPTXISD::StoreV4";
+ case NVPTXISD::FUN_SHFL_CLAMP:
+ return "NVPTXISD::FUN_SHFL_CLAMP";
+ case NVPTXISD::FUN_SHFR_CLAMP:
+ return "NVPTXISD::FUN_SHFR_CLAMP";
+ case NVPTXISD::IMAD:
+ return "NVPTXISD::IMAD";
+ case NVPTXISD::SETP_F16X2:
+ return "NVPTXISD::SETP_F16X2";
+ case NVPTXISD::Dummy:
+ return "NVPTXISD::Dummy";
+ case NVPTXISD::MUL_WIDE_SIGNED:
+ return "NVPTXISD::MUL_WIDE_SIGNED";
+ case NVPTXISD::MUL_WIDE_UNSIGNED:
+ return "NVPTXISD::MUL_WIDE_UNSIGNED";
+ case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
+ case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
+ case NVPTXISD::Tex1DFloatFloatLevel:
+ return "NVPTXISD::Tex1DFloatFloatLevel";
+ case NVPTXISD::Tex1DFloatFloatGrad:
+ return "NVPTXISD::Tex1DFloatFloatGrad";
+ case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
+ case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
+ case NVPTXISD::Tex1DS32FloatLevel:
+ return "NVPTXISD::Tex1DS32FloatLevel";
+ case NVPTXISD::Tex1DS32FloatGrad:
+ return "NVPTXISD::Tex1DS32FloatGrad";
+ case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
+ case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
+ case NVPTXISD::Tex1DU32FloatLevel:
+ return "NVPTXISD::Tex1DU32FloatLevel";
+ case NVPTXISD::Tex1DU32FloatGrad:
+ return "NVPTXISD::Tex1DU32FloatGrad";
+ case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
+ case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
+ case NVPTXISD::Tex1DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex1DArrayFloatFloatLevel";
+ case NVPTXISD::Tex1DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex1DArrayFloatFloatGrad";
+ case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
+ case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
+ case NVPTXISD::Tex1DArrayS32FloatLevel:
+ return "NVPTXISD::Tex1DArrayS32FloatLevel";
+ case NVPTXISD::Tex1DArrayS32FloatGrad:
+ return "NVPTXISD::Tex1DArrayS32FloatGrad";
+ case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
+ case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
+ case NVPTXISD::Tex1DArrayU32FloatLevel:
+ return "NVPTXISD::Tex1DArrayU32FloatLevel";
+ case NVPTXISD::Tex1DArrayU32FloatGrad:
+ return "NVPTXISD::Tex1DArrayU32FloatGrad";
+ case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
+ case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
+ case NVPTXISD::Tex2DFloatFloatLevel:
+ return "NVPTXISD::Tex2DFloatFloatLevel";
+ case NVPTXISD::Tex2DFloatFloatGrad:
+ return "NVPTXISD::Tex2DFloatFloatGrad";
+ case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
+ case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
+ case NVPTXISD::Tex2DS32FloatLevel:
+ return "NVPTXISD::Tex2DS32FloatLevel";
+ case NVPTXISD::Tex2DS32FloatGrad:
+ return "NVPTXISD::Tex2DS32FloatGrad";
+ case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
+ case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
+ case NVPTXISD::Tex2DU32FloatLevel:
+ return "NVPTXISD::Tex2DU32FloatLevel";
+ case NVPTXISD::Tex2DU32FloatGrad:
+ return "NVPTXISD::Tex2DU32FloatGrad";
+ case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
+ case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
+ case NVPTXISD::Tex2DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex2DArrayFloatFloatLevel";
+ case NVPTXISD::Tex2DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex2DArrayFloatFloatGrad";
+ case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
+ case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
+ case NVPTXISD::Tex2DArrayS32FloatLevel:
+ return "NVPTXISD::Tex2DArrayS32FloatLevel";
+ case NVPTXISD::Tex2DArrayS32FloatGrad:
+ return "NVPTXISD::Tex2DArrayS32FloatGrad";
+ case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
+ case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
+ case NVPTXISD::Tex2DArrayU32FloatLevel:
+ return "NVPTXISD::Tex2DArrayU32FloatLevel";
+ case NVPTXISD::Tex2DArrayU32FloatGrad:
+ return "NVPTXISD::Tex2DArrayU32FloatGrad";
+ case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
+ case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
+ case NVPTXISD::Tex3DFloatFloatLevel:
+ return "NVPTXISD::Tex3DFloatFloatLevel";
+ case NVPTXISD::Tex3DFloatFloatGrad:
+ return "NVPTXISD::Tex3DFloatFloatGrad";
+ case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
+ case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
+ case NVPTXISD::Tex3DS32FloatLevel:
+ return "NVPTXISD::Tex3DS32FloatLevel";
+ case NVPTXISD::Tex3DS32FloatGrad:
+ return "NVPTXISD::Tex3DS32FloatGrad";
+ case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
+ case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
+ case NVPTXISD::Tex3DU32FloatLevel:
+ return "NVPTXISD::Tex3DU32FloatLevel";
+ case NVPTXISD::Tex3DU32FloatGrad:
+ return "NVPTXISD::Tex3DU32FloatGrad";
+ case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
+ case NVPTXISD::TexCubeFloatFloatLevel:
+ return "NVPTXISD::TexCubeFloatFloatLevel";
+ case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
+ case NVPTXISD::TexCubeS32FloatLevel:
+ return "NVPTXISD::TexCubeS32FloatLevel";
+ case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
+ case NVPTXISD::TexCubeU32FloatLevel:
+ return "NVPTXISD::TexCubeU32FloatLevel";
+ case NVPTXISD::TexCubeArrayFloatFloat:
+ return "NVPTXISD::TexCubeArrayFloatFloat";
+ case NVPTXISD::TexCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexCubeArrayS32Float:
+ return "NVPTXISD::TexCubeArrayS32Float";
+ case NVPTXISD::TexCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexCubeArrayS32FloatLevel";
+ case NVPTXISD::TexCubeArrayU32Float:
+ return "NVPTXISD::TexCubeArrayU32Float";
+ case NVPTXISD::TexCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4R2DFloatFloat:
+ return "NVPTXISD::Tld4R2DFloatFloat";
+ case NVPTXISD::Tld4G2DFloatFloat:
+ return "NVPTXISD::Tld4G2DFloatFloat";
+ case NVPTXISD::Tld4B2DFloatFloat:
+ return "NVPTXISD::Tld4B2DFloatFloat";
+ case NVPTXISD::Tld4A2DFloatFloat:
+ return "NVPTXISD::Tld4A2DFloatFloat";
+ case NVPTXISD::Tld4R2DS64Float:
+ return "NVPTXISD::Tld4R2DS64Float";
+ case NVPTXISD::Tld4G2DS64Float:
+ return "NVPTXISD::Tld4G2DS64Float";
+ case NVPTXISD::Tld4B2DS64Float:
+ return "NVPTXISD::Tld4B2DS64Float";
+ case NVPTXISD::Tld4A2DS64Float:
+ return "NVPTXISD::Tld4A2DS64Float";
+ case NVPTXISD::Tld4R2DU64Float:
+ return "NVPTXISD::Tld4R2DU64Float";
+ case NVPTXISD::Tld4G2DU64Float:
+ return "NVPTXISD::Tld4G2DU64Float";
+ case NVPTXISD::Tld4B2DU64Float:
+ return "NVPTXISD::Tld4B2DU64Float";
+ case NVPTXISD::Tld4A2DU64Float:
+ return "NVPTXISD::Tld4A2DU64Float";
+
+ case NVPTXISD::TexUnified1DFloatS32:
+ return "NVPTXISD::TexUnified1DFloatS32";
+ case NVPTXISD::TexUnified1DFloatFloat:
+ return "NVPTXISD::TexUnified1DFloatFloat";
+ case NVPTXISD::TexUnified1DFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DFloatFloatLevel";
+ case NVPTXISD::TexUnified1DFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DFloatFloatGrad";
+ case NVPTXISD::TexUnified1DS32S32:
+ return "NVPTXISD::TexUnified1DS32S32";
+ case NVPTXISD::TexUnified1DS32Float:
+ return "NVPTXISD::TexUnified1DS32Float";
+ case NVPTXISD::TexUnified1DS32FloatLevel:
+ return "NVPTXISD::TexUnified1DS32FloatLevel";
+ case NVPTXISD::TexUnified1DS32FloatGrad:
+ return "NVPTXISD::TexUnified1DS32FloatGrad";
+ case NVPTXISD::TexUnified1DU32S32:
+ return "NVPTXISD::TexUnified1DU32S32";
+ case NVPTXISD::TexUnified1DU32Float:
+ return "NVPTXISD::TexUnified1DU32Float";
+ case NVPTXISD::TexUnified1DU32FloatLevel:
+ return "NVPTXISD::TexUnified1DU32FloatLevel";
+ case NVPTXISD::TexUnified1DU32FloatGrad:
+ return "NVPTXISD::TexUnified1DU32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayFloatS32:
+ return "NVPTXISD::TexUnified1DArrayFloatS32";
+ case NVPTXISD::TexUnified1DArrayFloatFloat:
+ return "NVPTXISD::TexUnified1DArrayFloatFloat";
+ case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified1DArrayS32S32:
+ return "NVPTXISD::TexUnified1DArrayS32S32";
+ case NVPTXISD::TexUnified1DArrayS32Float:
+ return "NVPTXISD::TexUnified1DArrayS32Float";
+ case NVPTXISD::TexUnified1DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayU32S32:
+ return "NVPTXISD::TexUnified1DArrayU32S32";
+ case NVPTXISD::TexUnified1DArrayU32Float:
+ return "NVPTXISD::TexUnified1DArrayU32Float";
+ case NVPTXISD::TexUnified1DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified2DFloatS32:
+ return "NVPTXISD::TexUnified2DFloatS32";
+ case NVPTXISD::TexUnified2DFloatFloat:
+ return "NVPTXISD::TexUnified2DFloatFloat";
+ case NVPTXISD::TexUnified2DFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DFloatFloatLevel";
+ case NVPTXISD::TexUnified2DFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DFloatFloatGrad";
+ case NVPTXISD::TexUnified2DS32S32:
+ return "NVPTXISD::TexUnified2DS32S32";
+ case NVPTXISD::TexUnified2DS32Float:
+ return "NVPTXISD::TexUnified2DS32Float";
+ case NVPTXISD::TexUnified2DS32FloatLevel:
+ return "NVPTXISD::TexUnified2DS32FloatLevel";
+ case NVPTXISD::TexUnified2DS32FloatGrad:
+ return "NVPTXISD::TexUnified2DS32FloatGrad";
+ case NVPTXISD::TexUnified2DU32S32:
+ return "NVPTXISD::TexUnified2DU32S32";
+ case NVPTXISD::TexUnified2DU32Float:
+ return "NVPTXISD::TexUnified2DU32Float";
+ case NVPTXISD::TexUnified2DU32FloatLevel:
+ return "NVPTXISD::TexUnified2DU32FloatLevel";
+ case NVPTXISD::TexUnified2DU32FloatGrad:
+ return "NVPTXISD::TexUnified2DU32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayFloatS32:
+ return "NVPTXISD::TexUnified2DArrayFloatS32";
+ case NVPTXISD::TexUnified2DArrayFloatFloat:
+ return "NVPTXISD::TexUnified2DArrayFloatFloat";
+ case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified2DArrayS32S32:
+ return "NVPTXISD::TexUnified2DArrayS32S32";
+ case NVPTXISD::TexUnified2DArrayS32Float:
+ return "NVPTXISD::TexUnified2DArrayS32Float";
+ case NVPTXISD::TexUnified2DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayU32S32:
+ return "NVPTXISD::TexUnified2DArrayU32S32";
+ case NVPTXISD::TexUnified2DArrayU32Float:
+ return "NVPTXISD::TexUnified2DArrayU32Float";
+ case NVPTXISD::TexUnified2DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified3DFloatS32:
+ return "NVPTXISD::TexUnified3DFloatS32";
+ case NVPTXISD::TexUnified3DFloatFloat:
+ return "NVPTXISD::TexUnified3DFloatFloat";
+ case NVPTXISD::TexUnified3DFloatFloatLevel:
+ return "NVPTXISD::TexUnified3DFloatFloatLevel";
+ case NVPTXISD::TexUnified3DFloatFloatGrad:
+ return "NVPTXISD::TexUnified3DFloatFloatGrad";
+ case NVPTXISD::TexUnified3DS32S32:
+ return "NVPTXISD::TexUnified3DS32S32";
+ case NVPTXISD::TexUnified3DS32Float:
+ return "NVPTXISD::TexUnified3DS32Float";
+ case NVPTXISD::TexUnified3DS32FloatLevel:
+ return "NVPTXISD::TexUnified3DS32FloatLevel";
+ case NVPTXISD::TexUnified3DS32FloatGrad:
+ return "NVPTXISD::TexUnified3DS32FloatGrad";
+ case NVPTXISD::TexUnified3DU32S32:
+ return "NVPTXISD::TexUnified3DU32S32";
+ case NVPTXISD::TexUnified3DU32Float:
+ return "NVPTXISD::TexUnified3DU32Float";
+ case NVPTXISD::TexUnified3DU32FloatLevel:
+ return "NVPTXISD::TexUnified3DU32FloatLevel";
+ case NVPTXISD::TexUnified3DU32FloatGrad:
+ return "NVPTXISD::TexUnified3DU32FloatGrad";
+ case NVPTXISD::TexUnifiedCubeFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeFloatFloat";
+ case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeS32Float:
+ return "NVPTXISD::TexUnifiedCubeS32Float";
+ case NVPTXISD::TexUnifiedCubeS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeU32Float:
+ return "NVPTXISD::TexUnifiedCubeU32Float";
+ case NVPTXISD::TexUnifiedCubeU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayS32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayS32Float";
+ case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayU32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayU32Float";
+ case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4UnifiedR2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedG2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedB2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedA2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedR2DS64Float:
+ return "NVPTXISD::Tld4UnifiedR2DS64Float";
+ case NVPTXISD::Tld4UnifiedG2DS64Float:
+ return "NVPTXISD::Tld4UnifiedG2DS64Float";
+ case NVPTXISD::Tld4UnifiedB2DS64Float:
+ return "NVPTXISD::Tld4UnifiedB2DS64Float";
+ case NVPTXISD::Tld4UnifiedA2DS64Float:
+ return "NVPTXISD::Tld4UnifiedA2DS64Float";
+ case NVPTXISD::Tld4UnifiedR2DU64Float:
+ return "NVPTXISD::Tld4UnifiedR2DU64Float";
+ case NVPTXISD::Tld4UnifiedG2DU64Float:
+ return "NVPTXISD::Tld4UnifiedG2DU64Float";
+ case NVPTXISD::Tld4UnifiedB2DU64Float:
+ return "NVPTXISD::Tld4UnifiedB2DU64Float";
+ case NVPTXISD::Tld4UnifiedA2DU64Float:
+ return "NVPTXISD::Tld4UnifiedA2DU64Float";
+
+ case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
+ case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
+ case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
+ case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
+ case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
+ case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
+ case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
+ case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
+ case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
+ case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
+ case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
+
+ case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
+ case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
+ case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
+ case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
+ case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
+ case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
+ case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
+ case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
+ case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
+ case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
+ case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
+ case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
+ case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
+ case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
+ case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
+ case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
+ case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
+ case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
+ case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
+ case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
+ case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
+
+ case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
+ case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
+ case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
+ case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
+ case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
+ case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
+ case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
+ case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
+ case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
+ case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
+ case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
+ case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
+ case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
+ case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
+ case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
+ case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
+ case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
+ case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
+ case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
+ case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
+ case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
+
+ case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
+ case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
+ case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
+ case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
+ case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
+ case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
+ case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
+ case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
+ case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
+ case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
+ case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
+
+ case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
+ case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
+ case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
+ case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
+ case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
+ case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
+ case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
+ case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
+ case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
+ case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
+ case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
+
+ case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
+ case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
+ case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
+ case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
+ case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
+ case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
+ case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
+ case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
+ case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
+ case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
+ case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
+
+ case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
+ case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
+ case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
+ case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
+ case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
+ case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
+ case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
+ case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
+ case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
+ case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
+ case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
+
+ case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
+ case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
+ case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
+ case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
+ case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
+ case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
+ case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
+ case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
+ case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
+ case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
+ case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
+
+ case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
+ case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
+ case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
+ case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
+ case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
+ case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
+ case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
+ case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
+ case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
+ case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
+ case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
+
+ case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
+ case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
+ case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
+ case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
+ case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
+ case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
+ case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
+ case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
+ case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
+ case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
+ case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
+
+ case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
+ case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
+ case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
+ case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
+ case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
+ case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
+ case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
+ case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
+ case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
+ case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
+ case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
+
+ case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
+ case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
+ case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
+ case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
+ case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
+ case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
+ case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
+ case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
+ case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
+ case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
+ case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
+
+ case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
+ case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
+ case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
+ case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
+ case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
+ case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
+ case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
+ case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
+ case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
+ case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
+ case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
+ }
+ return nullptr;
+}
+
+TargetLoweringBase::LegalizeTypeAction
+NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
+ if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
+ return TypeSplitVector;
+ if (VT == MVT::v2f16)
+ return TypeLegal;
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
+SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
+ int Enabled, int &ExtraSteps,
+ bool &UseOneConst,
+ bool Reciprocal) const {
+ if (!(Enabled == ReciprocalEstimate::Enabled ||
+ (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
+ return SDValue();
+
+ if (ExtraSteps == ReciprocalEstimate::Unspecified)
+ ExtraSteps = 0;
+
+ SDLoc DL(Operand);
+ EVT VT = Operand.getValueType();
+ bool Ftz = useF32FTZ(DAG.getMachineFunction());
+
+ auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(IID, DL, MVT::i32), Operand);
+ };
+
+ // The sqrt and rsqrt refinement processes assume we always start out with an
+ // approximation of the rsqrt. Therefore, if we're going to do any refinement
+ // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
+ // any refinement, we must return a regular sqrt.
+ if (Reciprocal || ExtraSteps > 0) {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
+ : Intrinsic::nvvm_rsqrt_approx_f);
+ else if (VT == MVT::f64)
+ return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
+ else
+ return SDValue();
+ } else {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
+ : Intrinsic::nvvm_sqrt_approx_f);
+ else {
+ // There's no sqrt.approx.f64 instruction, so we emit
+ // reciprocal(rsqrt(x)). This is faster than
+ // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
+ // x * rsqrt(x).)
+ return DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
+ MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
+ }
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+ Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
+ return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
+}
+
+std::string NVPTXTargetLowering::getPrototype(
+ const DataLayout &DL, Type *retTy, const ArgListTy &Args,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
+ const ImmutableCallSite *CS) const {
+ auto PtrVT = getPointerTy(DL);
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return "";
+
+ std::stringstream O;
+ O << "prototype_" << uniqueCallSite << " : .callprototype ";
+
+ if (retTy->getTypeID() == Type::VoidTyID) {
+ O << "()";
+ } else {
+ O << "(";
+ if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
+ unsigned size = 0;
+ if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
+ size = ITy->getBitWidth();
+ } else {
+ assert(retTy->isFloatingPointTy() &&
+ "Floating point type expected here");
+ size = retTy->getPrimitiveSizeInBits();
+ }
+ // PTX ABI requires all scalar return values to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type in
+ // PTX, so its size must be adjusted here, too.
+ if (size < 32)
+ size = 32;
+
+ O << ".param .b" << size << " _";
+ } else if (isa<PointerType>(retTy)) {
+ O << ".param .b" << PtrVT.getSizeInBits() << " _";
+ } else if (retTy->isAggregateType() || retTy->isVectorTy()) {
+ auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
+ O << ".param .align " << retAlignment << " .b8 _["
+ << DL.getTypeAllocSize(retTy) << "]";
+ } else {
+ llvm_unreachable("Unknown return type");
+ }
+ O << ") ";
+ }
+ O << "_ (";
+
+ bool first = true;
+
+ unsigned OIdx = 0;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ Type *Ty = Args[i].Ty;
+ if (!first) {
+ O << ", ";
+ }
+ first = false;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ unsigned align = 0;
+ const CallInst *CallI = cast<CallInst>(CS->getInstruction());
+ // +1 because index 0 is reserved for return type alignment
+ if (!getAlign(*CallI, i + 1, align))
+ align = DL.getABITypeAlignment(Ty);
+ unsigned sz = DL.getTypeAllocSize(Ty);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ // update the index for Outs
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, DL, Ty, vtparts);
+ if (unsigned len = vtparts.size())
+ OIdx += len - 1;
+ continue;
+ }
+ // i8 types in IR will be i16 types in SDAG
+ assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
+ (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
+ "type mismatch between callee prototype and arguments");
+ // scalar type
+ unsigned sz = 0;
+ if (isa<IntegerType>(Ty)) {
+ sz = cast<IntegerType>(Ty)->getBitWidth();
+ if (sz < 32)
+ sz = 32;
+ } else if (isa<PointerType>(Ty)) {
+ sz = PtrVT.getSizeInBits();
+ } else if (Ty->isHalfTy())
+ // PTX ABI requires all scalar parameters to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type
+ // in PTX, so its size must be adjusted here, too.
+ sz = 32;
+ else
+ sz = Ty->getPrimitiveSizeInBits();
+ O << ".param .b" << sz << " ";
+ O << "_";
+ continue;
+ }
+ auto *PTy = dyn_cast<PointerType>(Ty);
+ assert(PTy && "Param with byval attribute should be a pointer type");
+ Type *ETy = PTy->getElementType();
+
+ unsigned align = Outs[OIdx].Flags.getByValAlign();
+ unsigned sz = DL.getTypeAllocSize(ETy);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ }
+ O << ");";
+ return O.str();
+}
+
+unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
+ const ImmutableCallSite *CS,
+ Type *Ty, unsigned Idx,
+ const DataLayout &DL) const {
+ if (!CS) {
+ // CallSite is zero, fallback to ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+ }
+
+ unsigned Align = 0;
+ const Value *DirectCallee = CS->getCalledFunction();
+
+ if (!DirectCallee) {
+ // We don't have a direct function symbol, but that may be because of
+ // constant cast instructions in the call.
+ const Instruction *CalleeI = CS->getInstruction();
+ assert(CalleeI && "Call target is not a function or derived value?");
+
+ // With bitcast'd call targets, the instruction will be the call
+ if (isa<CallInst>(CalleeI)) {
+ // Check if we have call alignment metadata
+ if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))
+ return Align;
+
+ const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
+ // Ignore any bitcast instructions
+ while (isa<ConstantExpr>(CalleeV)) {
+ const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
+ if (!CE->isCast())
+ break;
+ // Look through the bitcast
+ CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
+ }
+
+ // We have now looked past all of the bitcasts. Do we finally have a
+ // Function?
+ if (isa<Function>(CalleeV))
+ DirectCallee = CalleeV;
+ }
+ }
+
+ // Check for function alignment information if we found that the
+ // ultimate target is a Function
+ if (DirectCallee)
+ if (getAlign(*cast<Function>(DirectCallee), Idx, Align))
+ return Align;
+
+ // Call is indirect or alignment information is not available, fall back to
+ // the ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+}
+
+SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ ArgListTy &Args = CLI.getArgs();
+ Type *RetTy = CLI.RetTy;
+ ImmutableCallSite *CS = CLI.CS;
+ const DataLayout &DL = DAG.getDataLayout();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ SDValue tempChain = Chain;
+ Chain = DAG.getCALLSEQ_START(
+ Chain, DAG.getIntPtrConstant(uniqueCallSite, dl, true), dl);
+ SDValue InFlag = Chain.getValue(1);
+
+ unsigned paramCount = 0;
+ // Args.size() and Outs.size() need not match.
+ // Outs.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Outs)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Outs.
+ // So a different index should be used for indexing into Outs/OutVals.
+ // See similar issue in LowerFormalArguments.
+ unsigned OIdx = 0;
+ // Declare the .params or .reg need to pass values
+ // to the function
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ EVT VT = Outs[OIdx].VT;
+ Type *Ty = Args[i].Ty;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
+ unsigned ArgAlign =
+ getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
+ unsigned AllocSize = DL.getTypeAllocSize(Ty);
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ bool NeedAlign; // Does argument declaration specify alignment?
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ SDValue DeclareParamOps[] = {
+ Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ NeedAlign = true;
+ } else {
+ // declare .param .b<size> .param<n>;
+ if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
+ // PTX ABI requires integral types to be at least 32 bits in
+ // size. FP16 is loaded/stored using i16, so it's handled
+ // here as well.
+ AllocSize = 4;
+ }
+ SDValue DeclareScalarParamOps[] = {
+ Chain, DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize * 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
+ DeclareScalarParamOps);
+ NeedAlign = false;
+ }
+ InFlag = Chain.getValue(1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
+ // than 32-bits are sign extended or zero extended, depending on
+ // whether they are signed or unsigned types. This case applies
+ // only to scalar parameters and not to aggregate values.
+ bool ExtendIntegerParam =
+ Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
+
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ // New store.
+ if (VectorInfo[j] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Unfinished preceeding store.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
+ StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
+ }
+
+ EVT EltVT = VTs[j];
+ SDValue StVal = OutVals[OIdx];
+ if (ExtendIntegerParam) {
+ assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
+ // zext/sext to i32
+ StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, StVal);
+ } else if (EltVT.getSizeInBits() < 16) {
+ // Use 16-bit registers for small stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
+ }
+
+ // Record the value to store.
+ StoreOperands.push_back(StVal);
+
+ if (VectorInfo[j] & PVF_LAST) {
+ unsigned NumElts = StoreOperands.size() - 3;
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreParam;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ StoreOperands.push_back(InFlag);
+
+ // Adjust type of the store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
+ unsigned EltAlign =
+ NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;
+
+ Chain = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
+ TheStoreType, MachinePointerInfo(), EltAlign);
+ InFlag = Chain.getValue(1);
+
+ // Cleanup.
+ StoreOperands.clear();
+ }
+ ++OIdx;
+ }
+ assert(StoreOperands.empty() && "Unfinished parameter store.");
+ if (VTs.size() > 0)
+ --OIdx;
+ ++paramCount;
+ continue;
+ }
+
+ // ByVal arguments
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
+ assert(PTy && "Type of a byval parameter should be pointer");
+ ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
+
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ unsigned sz = Outs[OIdx].Flags.getByValSize();
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
+ // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
+ // so we don't need to worry about natural alignment or not.
+ // See TargetLowering::LowerCallTo().
+
+ // Enforce minumum alignment of 4 to work around ptxas miscompile
+ // for sm_50+. See corresponding alignment adjustment in
+ // emitFunctionParamList() for details.
+ if (ArgAlign < 4)
+ ArgAlign = 4;
+ SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ InFlag = Chain.getValue(1);
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ EVT elemtype = VTs[j];
+ int curOffset = Offsets[j];
+ unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
+ auto PtrVT = getPointerTy(DL);
+ SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
+ DAG.getConstant(curOffset, dl, PtrVT));
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), PartAlign);
+ if (elemtype.getSizeInBits() < 16) {
+ theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
+ }
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(curOffset, dl, MVT::i32),
+ theVal, InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
+ CopyParamOps, elemtype,
+ MachinePointerInfo());
+
+ InFlag = Chain.getValue(1);
+ }
+ ++paramCount;
+ }
+
+ GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
+ unsigned retAlignment = 0;
+
+ // Handle Result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> resvtparts;
+ ComputeValueVTs(*this, DL, RetTy, resvtparts);
+
+ // Declare
+ // .param .align 16 .b8 retval0[<size-in-bytes>], or
+ // .param .b<size-in-bits> retval0
+ unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
+ // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
+ // these three types to match the logic in
+ // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
+ // Plus, this behavior is consistent with nvcc's.
+ if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||
+ RetTy->isPointerTy()) {
+ // Scalar needs to be at least 32bit wide
+ if (resultsz < 32)
+ resultsz = 32;
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(resultsz, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ } else {
+ retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain,
+ DAG.getConstant(retAlignment, dl, MVT::i32),
+ DAG.getConstant(resultsz / 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ }
+ }
+
+ if (!Func) {
+ // This is indirect function call case : PTX requires a prototype of the
+ // form
+ // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
+ // to be emitted, and the label has to used as the last arg of call
+ // instruction.
+ // The prototype is embedded in a string and put as the operand for a
+ // CallPrototype SDNode which will print out to the value of the string.
+ SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
+ const char *ProtoStr =
+ nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
+ SDValue ProtoOps[] = {
+ Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
+ };
+ Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
+ InFlag = Chain.getValue(1);
+ }
+ // Op to just print "call"
+ SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrintCallOps[] = {
+ Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
+ };
+ // We model convergent calls as separate opcodes.
+ unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
+ if (CLI.IsConvergent)
+ Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
+ : NVPTXISD::PrintConvergentCall;
+ Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the function name
+ SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallVoidOps[] = { Chain, Callee, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the param list
+ SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgBeginOps[] = { Chain, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
+ CallArgBeginOps);
+ InFlag = Chain.getValue(1);
+
+ for (unsigned i = 0, e = paramCount; i != e; ++i) {
+ unsigned opcode;
+ if (i == (e - 1))
+ opcode = NVPTXISD::LastCallArg;
+ else
+ opcode = NVPTXISD::CallArg;
+ SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(i, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
+ InFlag = Chain.getValue(1);
+ }
+ SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgEndOps[] = { Chain,
+ DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
+ InFlag = Chain.getValue(1);
+
+ if (!Func) {
+ SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrototypeOps[] = { Chain,
+ DAG.getConstant(uniqueCallSite, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
+ InFlag = Chain.getValue(1);
+ }
+
+ // Generate loads from param memory/moves from registers for result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
+ assert(VTs.size() == Ins.size() && "Bad value decomposition");
+
+ unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
+
+ SmallVector<EVT, 6> LoadVTs;
+ int VecIdx = -1; // Index of the first element of the vector.
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ bool needTruncate = false;
+ EVT TheLoadType = VTs[i];
+ EVT EltType = Ins[i].VT;
+ unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);
+ if (ExtendIntegerRetVal) {
+ TheLoadType = MVT::i32;
+ EltType = MVT::i32;
+ needTruncate = true;
+ } else if (TheLoadType.getSizeInBits() < 16) {
+ if (VTs[i].isInteger())
+ needTruncate = true;
+ EltType = MVT::i16;
+ }
+
+ // Record index of the very first element of the vector.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
+ VecIdx = i;
+ }
+
+ LoadVTs.push_back(EltType);
+
+ if (VectorInfo[i] & PVF_LAST) {
+ unsigned NumElts = LoadVTs.size();
+ LoadVTs.push_back(MVT::Other);
+ LoadVTs.push_back(MVT::Glue);
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::LoadParam;
+ break;
+ case 2:
+ Op = NVPTXISD::LoadParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::LoadParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ SDValue LoadOperands[] = {
+ Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
+ SDValue RetVal = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
+ MachinePointerInfo(), EltAlign);
+
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Ret = RetVal.getValue(j);
+ if (needTruncate)
+ Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
+ InVals.push_back(Ret);
+ }
+ Chain = RetVal.getValue(NumElts);
+ InFlag = RetVal.getValue(NumElts + 1);
+
+ // Cleanup
+ VecIdx = -1;
+ LoadVTs.clear();
+ }
+ }
+ }
+
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, dl, true),
+ DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
+ true),
+ InFlag, dl);
+ uniqueCallSite++;
+
+ // set isTailCall to false for now, until we figure out how to express
+ // tail call optimization in PTX
+ isTailCall = false;
+ return Chain;
+}
+
+// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
+// (see LegalizeDAG.cpp). This is slow and uses local memory.
+// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
+SDValue
+NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ SmallVector<SDValue, 8> Ops;
+ unsigned NumOperands = Node->getNumOperands();
+ for (unsigned i = 0; i < NumOperands; ++i) {
+ SDValue SubOp = Node->getOperand(i);
+ EVT VVT = SubOp.getNode()->getValueType(0);
+ EVT EltVT = VVT.getVectorElementType();
+ unsigned NumSubElem = VVT.getVectorNumElements();
+ for (unsigned j = 0; j < NumSubElem; ++j) {
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
+ DAG.getIntPtrConstant(j, dl)));
+ }
+ }
+ return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
+}
+
+// We can init constant f16x2 with a single .b32 move. Normally it
+// would get lowered as two constant loads and vector-packing move.
+// mov.b16 %h1, 0x4000;
+// mov.b16 %h2, 0x3C00;
+// mov.b32 %hh2, {%h2, %h1};
+// Instead we want just a constant move:
+// mov.b32 %hh2, 0x40003C00
+//
+// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
+// generates good SASS in both cases.
+SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ //return Op;
+ if (!(Op->getValueType(0) == MVT::v2f16 &&
+ isa<ConstantFPSDNode>(Op->getOperand(0)) &&
+ isa<ConstantFPSDNode>(Op->getOperand(1))))
+ return Op;
+
+ APInt E0 =
+ cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
+ APInt E1 =
+ cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
+ SDValue Const =
+ DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
+ return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
+}
+
+SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Index = Op->getOperand(1);
+ // Constant index will be matched by tablegen.
+ if (isa<ConstantSDNode>(Index.getNode()))
+ return Op;
+
+ // Extract individual elements and select one of them.
+ SDValue Vector = Op->getOperand(0);
+ EVT VectorVT = Vector.getValueType();
+ assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
+ EVT EltVT = VectorVT.getVectorElementType();
+
+ SDLoc dl(Op.getNode());
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(0, dl));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(1, dl));
+ return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
+ ISD::CondCode::SETEQ);
+}
+
+/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+ unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // dHi = aHi >> Amt
+ // dLo = shf.r.clamp aLo, aHi, Amt
+
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // - if (Amt>=size) then
+ // dLo = aHi >> (Amt-size)
+ // dHi = aHi >> Amt (this is either all 0 or all 1)
+ // else
+ // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
+ // dHi = aHi >> Amt
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+/// LowerShiftLeftParts - Lower SHL_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SHL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // dHi = shf.l.clamp aLo, aHi, Amt
+ // dLo = aLo << Amt
+
+ SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // - if (Amt>=size) then
+ // dLo = aLo << Amt (all 0)
+ // dLo = aLo << (Amt-size)
+ // else
+ // dLo = aLo << Amt
+ // dHi = (aHi << Amt) | (aLo >> (size-Amt))
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+ SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::RETURNADDR:
+ return SDValue();
+ case ISD::FRAMEADDR:
+ return SDValue();
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN:
+ return Op;
+ case ISD::BUILD_VECTOR:
+ return LowerBUILD_VECTOR(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR:
+ return Op;
+ case ISD::EXTRACT_VECTOR_ELT:
+ return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::CONCAT_VECTORS:
+ return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::STORE:
+ return LowerSTORE(Op, DAG);
+ case ISD::LOAD:
+ return LowerLOAD(Op, DAG);
+ case ISD::SHL_PARTS:
+ return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ case ISD::SRL_PARTS:
+ return LowerShiftRightParts(Op, DAG);
+ case ISD::SELECT:
+ return LowerSelect(Op, DAG);
+ default:
+ llvm_unreachable("Custom lowering not defined for operation");
+ }
+}
+
+SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Op0 = Op->getOperand(0);
+ SDValue Op1 = Op->getOperand(1);
+ SDValue Op2 = Op->getOperand(2);
+ SDLoc DL(Op.getNode());
+
+ assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
+
+ Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
+ Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
+ SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
+
+ return Trunc;
+}
+
+SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ if (Op.getValueType() == MVT::i1)
+ return LowerLOADi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // loads and have to handle it here.
+ if (Op.getValueType() == MVT::v2f16) {
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
+ EVT MemVT = Load->getMemoryVT();
+ if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
+ Load->getAddressSpace(), Load->getAlignment())) {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
+ return DAG.getMergeValues(Ops, SDLoc(Op));
+ }
+ }
+
+ return SDValue();
+}
+
+// v = ld i1* addr
+// =>
+// v1 = ld i8* addr (-> i16)
+// v = trunc i16 to i1
+SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ LoadSDNode *LD = cast<LoadSDNode>(Node);
+ SDLoc dl(Node);
+ assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
+ assert(Node->getValueType(0) == MVT::i1 &&
+ "Custom lowering for i1 load only");
+ SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(), LD->getAlignment(),
+ LD->getMemOperand()->getFlags());
+ SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
+ // The legalizer (the caller) is expecting two values from the legalized
+ // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
+ // in LegalizeDAG.cpp which also uses MergeValues.
+ SDValue Ops[] = { result, LD->getChain() };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT VT = Store->getMemoryVT();
+
+ if (VT == MVT::i1)
+ return LowerSTOREi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // stores and have to handle it here.
+ if (VT == MVT::v2f16 &&
+ !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
+ Store->getAddressSpace(), Store->getAlignment()))
+ return expandUnalignedStore(Store, DAG);
+
+ if (VT.isVector())
+ return LowerSTOREVector(Op, DAG);
+
+ return SDValue();
+}
+
+SDValue
+NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *N = Op.getNode();
+ SDValue Val = N->getOperand(1);
+ SDLoc DL(N);
+ EVT ValVT = Val.getValueType();
+
+ if (ValVT.isVector()) {
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 stores of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ if (!ValVT.isSimple())
+ return SDValue();
+ switch (ValVT.getSimpleVT().SimpleTy) {
+ default:
+ return SDValue();
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ MemSDNode *MemSD = cast<MemSDNode>(N);
+ const DataLayout &TD = DAG.getDataLayout();
+
+ unsigned Align = MemSD->getAlignment();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This store is not sufficiently aligned, so bail out and let this vector
+ // store be scalarized. Note that we may still be able to emit smaller
+ // vector stores. For example, if we are storing a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return SDValue();
+ }
+
+ unsigned Opcode = 0;
+ EVT EltVT = ValVT.getVectorElementType();
+ unsigned NumElts = ValVT.getVectorNumElements();
+
+ // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // stored type to i16 and propagate the "real" type as the memory type.
+ bool NeedExt = false;
+ if (EltVT.getSizeInBits() < 16)
+ NeedExt = true;
+
+ bool StoreF16x2 = false;
+ switch (NumElts) {
+ default:
+ return SDValue();
+ case 2:
+ Opcode = NVPTXISD::StoreV2;
+ break;
+ case 4:
+ Opcode = NVPTXISD::StoreV4;
+ break;
+ case 8:
+ // v8f16 is a special case. PTX doesn't have st.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // store them with st.v4.b32.
+ assert(EltVT == MVT::f16 && "Wrong type for the vector.");
+ Opcode = NVPTXISD::StoreV4;
+ StoreF16x2 = true;
+ break;
+ }
+
+ SmallVector<SDValue, 8> Ops;
+
+ // First is the chain
+ Ops.push_back(N->getOperand(0));
+
+ if (StoreF16x2) {
+ // Combine f16,f16 -> v2f16
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2 + 1, DL));
+ SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
+ Ops.push_back(V2);
+ }
+ } else {
+ // Then the split values
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
+ DAG.getIntPtrConstant(i, DL));
+ if (NeedExt)
+ ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
+ Ops.push_back(ExtVal);
+ }
+ }
+
+ // Then any remaining arguments
+ Ops.append(N->op_begin() + 2, N->op_end());
+
+ SDValue NewSt =
+ DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
+ MemSD->getMemoryVT(), MemSD->getMemOperand());
+
+ // return DCI.CombineTo(N, NewSt, true);
+ return NewSt;
+ }
+
+ return SDValue();
+}
+
+// st i1 v, addr
+// =>
+// v1 = zxt v to i16
+// st.u8 i16, addr
+SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ StoreSDNode *ST = cast<StoreSDNode>(Node);
+ SDValue Tmp1 = ST->getChain();
+ SDValue Tmp2 = ST->getBasePtr();
+ SDValue Tmp3 = ST->getValue();
+ assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
+ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
+ SDValue Result =
+ DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
+ ST->getAlignment(), ST->getMemOperand()->getFlags());
+ return Result;
+}
+
+SDValue
+NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
+ std::string ParamSym;
+ raw_string_ostream ParamStr(ParamSym);
+
+ ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
+ ParamStr.flush();
+
+ std::string *SavedStr =
+ nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
+ return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
+}
+
+// Check to see if the kernel argument is image*_t or sampler_t
+
+static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
+ static const char *const specialTypes[] = { "struct._image2d_t",
+ "struct._image3d_t",
+ "struct._sampler_t" };
+
+ Type *Ty = arg->getType();
+ auto *PTy = dyn_cast<PointerType>(Ty);
+
+ if (!PTy)
+ return false;
+
+ if (!context)
+ return false;
+
+ auto *STy = dyn_cast<StructType>(PTy->getElementType());
+ if (!STy || STy->isLiteral())
+ return false;
+
+ return std::find(std::begin(specialTypes), std::end(specialTypes),
+ STy->getName()) != std::end(specialTypes);
+}
+
+SDValue NVPTXTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const DataLayout &DL = DAG.getDataLayout();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ const Function *F = MF.getFunction();
+ const AttributeList &PAL = F->getAttributes();
+ const TargetLowering *TLI = STI.getTargetLowering();
+
+ SDValue Root = DAG.getRoot();
+ std::vector<SDValue> OutChains;
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ std::vector<Type *> argTypes;
+ std::vector<const Argument *> theArgs;
+ for (const Argument &I : F->args()) {
+ theArgs.push_back(&I);
+ argTypes.push_back(I.getType());
+ }
+ // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
+ // Ins.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Ins)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Ins.
+ // So a different index should be used for indexing into Ins.
+ // See similar issue in LowerCall.
+ unsigned InsIdx = 0;
+
+ int idx = 0;
+ for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
+ Type *Ty = argTypes[i];
+
+ // If the kernel argument is image*_t or sampler_t, convert it to
+ // a i32 constant holding the parameter position. This can later
+ // matched in the AsmPrinter to output the correct mangled name.
+ if (isImageOrSamplerVal(
+ theArgs[i],
+ (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
+ : nullptr))) {
+ assert(isKernelFunction(*F) &&
+ "Only kernels can have image/sampler params");
+ InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
+ continue;
+ }
+
+ if (theArgs[i]->use_empty()) {
+ // argument is dead
+ if (Ty->isAggregateType()) {
+ SmallVector<EVT, 16> vtparts;
+
+ ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
+ assert(vtparts.size() > 0 && "empty aggregate type not expected");
+ for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
+ ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (vtparts.size() > 0)
+ --InsIdx;
+ continue;
+ }
+ if (Ty->isVectorTy()) {
+ EVT ObjectVT = getValueType(DL, Ty);
+ unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
+ for (unsigned parti = 0; parti < NumRegs; ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (NumRegs > 0)
+ --InsIdx;
+ continue;
+ }
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ continue;
+ }
+
+ // In the following cases, assign a node order of "idx+1"
+ // to newly created nodes. The SDNodes for params have to
+ // appear in the same order as their order of appearance
+ // in the original function. "idx+1" holds that order.
+ if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
+ bool aggregateIsPacked = false;
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ aggregateIsPacked = STy->isPacked();
+
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
+ assert(VTs.size() > 0 && "Unexpected empty type.");
+ auto VectorInfo =
+ VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));
+
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ int VecIdx = -1; // Index of the first element of the current vector.
+ for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
+ if (VectorInfo[parti] & PVF_FIRST) {
+ assert(VecIdx == -1 && "Orphaned vector.");
+ VecIdx = parti;
+ }
+
+ // That's the last element of this store op.
+ if (VectorInfo[parti] & PVF_LAST) {
+ unsigned NumElts = parti - VecIdx + 1;
+ EVT EltVT = VTs[parti];
+ // i1 is loaded/stored as i8.
+ EVT LoadVT = EltVT;
+ if (EltVT == MVT::i1)
+ LoadVT = MVT::i8;
+ else if (EltVT == MVT::v2f16)
+ // getLoad needs a vector type, but it can't handle
+ // vectors which contain v2f16 elements. So we must load
+ // using i32 here and then bitcast back.
+ LoadVT = MVT::i32;
+
+ EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
+ SDValue VecAddr =
+ DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
+ DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
+ Value *srcValue = Constant::getNullValue(PointerType::get(
+ EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
+ SDValue P =
+ DAG.getLoad(VecVT, dl, Root, VecAddr,
+ MachinePointerInfo(srcValue), aggregateIsPacked,
+ MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant);
+ if (P.getNode())
+ P.getNode()->setIROrder(idx + 1);
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
+ DAG.getIntPtrConstant(j, dl));
+ // We've loaded i1 as an i8 and now must truncate it back to i1
+ if (EltVT == MVT::i1)
+ Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
+ // v2f16 was loaded as an i32. Now we must bitcast it back.
+ else if (EltVT == MVT::v2f16)
+ Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
+ // Extend the element if necesary (e.g. an i8 is loaded
+ // into an i16 register)
+ if (Ins[InsIdx].VT.isInteger() &&
+ Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
+ unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND;
+ Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
+ }
+ InVals.push_back(Elt);
+ }
+
+ // Reset vector tracking state.
+ VecIdx = -1;
+ }
+ ++InsIdx;
+ }
+ if (VTs.size() > 0)
+ --InsIdx;
+ continue;
+ }
+
+ // Param has ByVal attribute
+ // Return MoveParam(param symbol).
+ // Ideally, the param symbol can be returned directly,
+ // but when SDNode builder decides to use it in a CopyToReg(),
+ // machine instruction fails because TargetExternalSymbol
+ // (not lowered) is target dependent, and CopyToReg assumes
+ // the source is lowered.
+ EVT ObjectVT = getValueType(DL, Ty);
+ assert(ObjectVT == Ins[InsIdx].VT &&
+ "Ins type did not match function type");
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
+ if (p.getNode())
+ p.getNode()->setIROrder(idx + 1);
+ InVals.push_back(p);
+ }
+
+ // Clang will check explicit VarArg and issue error if any. However, Clang
+ // will let code with
+ // implicit var arg like f() pass. See bug 617733.
+ // We treat this case as if the arg list is empty.
+ // if (F.isVarArg()) {
+ // assert(0 && "VarArg not supported yet!");
+ //}
+
+ if (!OutChains.empty())
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
+
+ return Chain;
+}
+
+SDValue
+NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ Type *RetTy = MF.getFunction()->getReturnType();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ const DataLayout DL = DAG.getDataLayout();
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
+ assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
+
+ auto VectorInfo = VectorizePTXValueVTs(
+ VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ // New load/store. Record chain and offset operands.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Orphaned operand list.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
+ }
+
+ SDValue RetVal = OutVals[i];
+ if (ExtendIntegerRetVal) {
+ RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, RetVal);
+ } else if (RetVal.getValueSizeInBits() < 16) {
+ // Use 16-bit registers for small load-stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
+ }
+
+ // Record the value to return.
+ StoreOperands.push_back(RetVal);
+
+ // That's the last element of this store op.
+ if (VectorInfo[i] & PVF_LAST) {
+ NVPTXISD::NodeType Op;
+ unsigned NumElts = StoreOperands.size() - 2;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreRetval;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreRetvalV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreRetvalV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ // Adjust type of load/store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
+ Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),
+ StoreOperands, TheStoreType,
+ MachinePointerInfo(), 1);
+ // Cleanup vector state.
+ StoreOperands.clear();
+ }
+ }
+
+ return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
+}
+
+void NVPTXTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ if (Constraint.length() > 1)
+ return;
+ else
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ return NVPTXISD::Tex1DFloatS32;
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloat;
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ return NVPTXISD::Tex1DS32S32;
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ return NVPTXISD::Tex1DS32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ return NVPTXISD::Tex1DU32S32;
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ return NVPTXISD::Tex1DU32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ return NVPTXISD::Tex1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ return NVPTXISD::Tex1DArrayS32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ return NVPTXISD::Tex1DArrayU32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ return NVPTXISD::Tex2DFloatS32;
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloat;
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ return NVPTXISD::Tex2DS32S32;
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ return NVPTXISD::Tex2DS32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ return NVPTXISD::Tex2DU32S32;
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ return NVPTXISD::Tex2DU32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ return NVPTXISD::Tex2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ return NVPTXISD::Tex2DArrayS32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ return NVPTXISD::Tex2DArrayU32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ return NVPTXISD::Tex3DFloatS32;
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloat;
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ return NVPTXISD::Tex3DS32S32;
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ return NVPTXISD::Tex3DS32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ return NVPTXISD::Tex3DU32S32;
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ return NVPTXISD::Tex3DU32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloat;
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ return NVPTXISD::TexCubeS32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ return NVPTXISD::TexCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ return NVPTXISD::TexCubeU32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ return NVPTXISD::TexCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4R2DFloatFloat;
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4G2DFloatFloat;
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4B2DFloatFloat;
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4A2DFloatFloat;
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4R2DS64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4G2DS64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4B2DS64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4A2DS64Float;
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4R2DU64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4G2DU64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4B2DU64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4A2DU64Float;
+
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ return NVPTXISD::TexUnified1DFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ return NVPTXISD::TexUnified1DS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ return NVPTXISD::TexUnified1DU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ return NVPTXISD::TexUnified1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ return NVPTXISD::TexUnified1DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ return NVPTXISD::TexUnified1DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ return NVPTXISD::TexUnified2DFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ return NVPTXISD::TexUnified2DS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ return NVPTXISD::TexUnified2DU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ return NVPTXISD::TexUnified2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ return NVPTXISD::TexUnified2DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ return NVPTXISD::TexUnified2DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ return NVPTXISD::TexUnified3DFloatS32;
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ return NVPTXISD::TexUnified3DS32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ return NVPTXISD::TexUnified3DU32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedR2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedG2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedB2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedA2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedR2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedG2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedB2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedA2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedR2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedG2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedB2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedA2DU64Float;
+ }
+}
+
+static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ return NVPTXISD::Suld1DI8Clamp;
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ return NVPTXISD::Suld1DI16Clamp;
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ return NVPTXISD::Suld1DI32Clamp;
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ return NVPTXISD::Suld1DI64Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ return NVPTXISD::Suld1DV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ return NVPTXISD::Suld1DV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ return NVPTXISD::Suld1DV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ return NVPTXISD::Suld1DV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ return NVPTXISD::Suld1DV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ return NVPTXISD::Suld1DV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ return NVPTXISD::Suld1DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ return NVPTXISD::Suld1DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ return NVPTXISD::Suld1DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ return NVPTXISD::Suld1DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ return NVPTXISD::Suld1DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ return NVPTXISD::Suld1DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ return NVPTXISD::Suld1DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ return NVPTXISD::Suld1DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ return NVPTXISD::Suld1DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ return NVPTXISD::Suld1DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ return NVPTXISD::Suld1DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ return NVPTXISD::Suld1DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ return NVPTXISD::Suld2DI8Clamp;
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ return NVPTXISD::Suld2DI16Clamp;
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ return NVPTXISD::Suld2DI32Clamp;
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ return NVPTXISD::Suld2DI64Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ return NVPTXISD::Suld2DV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ return NVPTXISD::Suld2DV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ return NVPTXISD::Suld2DV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ return NVPTXISD::Suld2DV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ return NVPTXISD::Suld2DV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ return NVPTXISD::Suld2DV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ return NVPTXISD::Suld2DV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ return NVPTXISD::Suld2DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ return NVPTXISD::Suld2DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ return NVPTXISD::Suld2DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ return NVPTXISD::Suld2DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ return NVPTXISD::Suld2DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ return NVPTXISD::Suld2DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ return NVPTXISD::Suld2DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ return NVPTXISD::Suld2DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ return NVPTXISD::Suld2DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ return NVPTXISD::Suld2DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ return NVPTXISD::Suld2DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ return NVPTXISD::Suld3DI8Clamp;
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ return NVPTXISD::Suld3DI16Clamp;
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ return NVPTXISD::Suld3DI32Clamp;
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ return NVPTXISD::Suld3DI64Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ return NVPTXISD::Suld3DV2I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ return NVPTXISD::Suld3DV2I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ return NVPTXISD::Suld3DV2I32Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ return NVPTXISD::Suld3DV2I64Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ return NVPTXISD::Suld3DV4I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ return NVPTXISD::Suld3DV4I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ return NVPTXISD::Suld3DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ return NVPTXISD::Suld1DI8Trap;
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ return NVPTXISD::Suld1DI16Trap;
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ return NVPTXISD::Suld1DI32Trap;
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ return NVPTXISD::Suld1DI64Trap;
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ return NVPTXISD::Suld1DV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ return NVPTXISD::Suld1DV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ return NVPTXISD::Suld1DV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ return NVPTXISD::Suld1DV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ return NVPTXISD::Suld1DV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ return NVPTXISD::Suld1DV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ return NVPTXISD::Suld1DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ return NVPTXISD::Suld1DArrayI8Trap;
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ return NVPTXISD::Suld1DArrayI16Trap;
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ return NVPTXISD::Suld1DArrayI32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ return NVPTXISD::Suld1DArrayI64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ return NVPTXISD::Suld1DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ return NVPTXISD::Suld1DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ return NVPTXISD::Suld1DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ return NVPTXISD::Suld1DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ return NVPTXISD::Suld1DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ return NVPTXISD::Suld1DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ return NVPTXISD::Suld1DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ return NVPTXISD::Suld2DI8Trap;
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ return NVPTXISD::Suld2DI16Trap;
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ return NVPTXISD::Suld2DI32Trap;
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ return NVPTXISD::Suld2DI64Trap;
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ return NVPTXISD::Suld2DV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ return NVPTXISD::Suld2DV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ return NVPTXISD::Suld2DV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ return NVPTXISD::Suld2DV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ return NVPTXISD::Suld2DV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ return NVPTXISD::Suld2DV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ return NVPTXISD::Suld2DV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ return NVPTXISD::Suld2DArrayI8Trap;
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ return NVPTXISD::Suld2DArrayI16Trap;
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ return NVPTXISD::Suld2DArrayI32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ return NVPTXISD::Suld2DArrayI64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ return NVPTXISD::Suld2DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ return NVPTXISD::Suld2DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ return NVPTXISD::Suld2DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ return NVPTXISD::Suld2DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ return NVPTXISD::Suld2DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ return NVPTXISD::Suld2DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ return NVPTXISD::Suld2DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ return NVPTXISD::Suld3DI8Trap;
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ return NVPTXISD::Suld3DI16Trap;
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ return NVPTXISD::Suld3DI32Trap;
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ return NVPTXISD::Suld3DI64Trap;
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ return NVPTXISD::Suld3DV2I8Trap;
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ return NVPTXISD::Suld3DV2I16Trap;
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ return NVPTXISD::Suld3DV2I32Trap;
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ return NVPTXISD::Suld3DV2I64Trap;
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ return NVPTXISD::Suld3DV4I8Trap;
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ return NVPTXISD::Suld3DV4I16Trap;
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ return NVPTXISD::Suld3DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ return NVPTXISD::Suld1DI8Zero;
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ return NVPTXISD::Suld1DI16Zero;
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ return NVPTXISD::Suld1DI32Zero;
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ return NVPTXISD::Suld1DI64Zero;
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ return NVPTXISD::Suld1DV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ return NVPTXISD::Suld1DV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ return NVPTXISD::Suld1DV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ return NVPTXISD::Suld1DV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ return NVPTXISD::Suld1DV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ return NVPTXISD::Suld1DV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ return NVPTXISD::Suld1DV4I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ return NVPTXISD::Suld1DArrayI8Zero;
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ return NVPTXISD::Suld1DArrayI16Zero;
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ return NVPTXISD::Suld1DArrayI32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ return NVPTXISD::Suld1DArrayI64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ return NVPTXISD::Suld1DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ return NVPTXISD::Suld1DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ return NVPTXISD::Suld1DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ return NVPTXISD::Suld1DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ return NVPTXISD::Suld1DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ return NVPTXISD::Suld1DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ return NVPTXISD::Suld1DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ return NVPTXISD::Suld2DI8Zero;
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ return NVPTXISD::Suld2DI16Zero;
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ return NVPTXISD::Suld2DI32Zero;
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ return NVPTXISD::Suld2DI64Zero;
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ return NVPTXISD::Suld2DV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ return NVPTXISD::Suld2DV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ return NVPTXISD::Suld2DV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ return NVPTXISD::Suld2DV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ return NVPTXISD::Suld2DV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ return NVPTXISD::Suld2DV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ return NVPTXISD::Suld2DV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ return NVPTXISD::Suld2DArrayI8Zero;
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ return NVPTXISD::Suld2DArrayI16Zero;
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ return NVPTXISD::Suld2DArrayI32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ return NVPTXISD::Suld2DArrayI64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ return NVPTXISD::Suld2DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ return NVPTXISD::Suld2DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ return NVPTXISD::Suld2DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ return NVPTXISD::Suld2DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ return NVPTXISD::Suld2DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ return NVPTXISD::Suld2DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ return NVPTXISD::Suld2DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ return NVPTXISD::Suld3DI8Zero;
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ return NVPTXISD::Suld3DI16Zero;
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ return NVPTXISD::Suld3DI32Zero;
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ return NVPTXISD::Suld3DI64Zero;
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ return NVPTXISD::Suld3DV2I8Zero;
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ return NVPTXISD::Suld3DV2I16Zero;
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ return NVPTXISD::Suld3DV2I32Zero;
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ return NVPTXISD::Suld3DV2I64Zero;
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ return NVPTXISD::Suld3DV4I8Zero;
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ return NVPTXISD::Suld3DV4I16Zero;
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ return NVPTXISD::Suld3DV4I32Zero;
+ }
+}
+
+// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
+// TgtMemIntrinsic
+// because we need the information that is only available in the "Value" type
+// of destination
+// pointer. In particular, the address space information.
+bool NVPTXTargetLowering::getTgtMemIntrinsic(
+ IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ default:
+ return false;
+
+ case Intrinsic::nvvm_atomic_load_add_f32:
+ case Intrinsic::nvvm_atomic_load_inc_32:
+ case Intrinsic::nvvm_atomic_load_dec_32:
+
+ case Intrinsic::nvvm_atomic_add_gen_f_cta:
+ case Intrinsic::nvvm_atomic_add_gen_f_sys:
+ case Intrinsic::nvvm_atomic_add_gen_i_cta:
+ case Intrinsic::nvvm_atomic_add_gen_i_sys:
+ case Intrinsic::nvvm_atomic_and_gen_i_cta:
+ case Intrinsic::nvvm_atomic_and_gen_i_sys:
+ case Intrinsic::nvvm_atomic_cas_gen_i_cta:
+ case Intrinsic::nvvm_atomic_cas_gen_i_sys:
+ case Intrinsic::nvvm_atomic_dec_gen_i_cta:
+ case Intrinsic::nvvm_atomic_dec_gen_i_sys:
+ case Intrinsic::nvvm_atomic_inc_gen_i_cta:
+ case Intrinsic::nvvm_atomic_inc_gen_i_sys:
+ case Intrinsic::nvvm_atomic_max_gen_i_cta:
+ case Intrinsic::nvvm_atomic_max_gen_i_sys:
+ case Intrinsic::nvvm_atomic_min_gen_i_cta:
+ case Intrinsic::nvvm_atomic_min_gen_i_sys:
+ case Intrinsic::nvvm_atomic_or_gen_i_cta:
+ case Intrinsic::nvvm_atomic_or_gen_i_sys:
+ case Intrinsic::nvvm_atomic_exch_gen_i_cta:
+ case Intrinsic::nvvm_atomic_exch_gen_i_sys:
+ case Intrinsic::nvvm_atomic_xor_gen_i_cta:
+ case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = true;
+ Info.align = 0;
+ return true;
+ }
+
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4f32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i8;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i16;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i64;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ return false;
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented
+/// by AM is legal for this target, for a load/store of the specified type.
+/// Used to guide target specific optimizations, like loop strength reduction
+/// (LoopStrengthReduce.cpp) and memory optimization for address mode
+/// (CodeGenPrepare.cpp)
+bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
+ unsigned AS) const {
+ // AddrMode - This represents an addressing mode of:
+ // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+ //
+ // The legal address modes are
+ // - [avar]
+ // - [areg]
+ // - [areg+immoff]
+ // - [immAddr]
+
+ if (AM.BaseGV) {
+ return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
+ }
+
+ switch (AM.Scale) {
+ case 0: // "r", "r+i" or "i" is allowed
+ break;
+ case 1:
+ if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
+ return false;
+ // Otherwise we have r+i.
+ break;
+ default:
+ // No scale > 1 is allowed
+ return false;
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+NVPTXTargetLowering::ConstraintType
+NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'b':
+ case 'r':
+ case 'h':
+ case 'c':
+ case 'l':
+ case 'f':
+ case 'd':
+ case '0':
+ case 'N':
+ return C_RegisterClass;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'b':
+ return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
+ case 'c':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'h':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'r':
+ return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
+ case 'l':
+ case 'N':
+ return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
+ case 'f':
+ return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
+ case 'd':
+ return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
+ }
+ }
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX DAG Combining
+//===----------------------------------------------------------------------===//
+
+bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
+ CodeGenOpt::Level OptLevel) const {
+ // Always honor command-line argument
+ if (FMAContractLevelOpt.getNumOccurrences() > 0)
+ return FMAContractLevelOpt > 0;
+
+ // Do not contract if we're not optimizing the code.
+ if (OptLevel == 0)
+ return false;
+
+ // Honor TargetOptions flags that explicitly say fusion is okay.
+ if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
+ return true;
+
+ return allowUnsafeFPMath(MF);
+}
+
+bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
+ // Honor TargetOptions flags that explicitly say unsafe math is okay.
+ if (MF.getTarget().Options.UnsafeFPMath)
+ return true;
+
+ // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
+ const Function *F = MF.getFunction();
+ if (F->hasFnAttribute("unsafe-fp-math")) {
+ Attribute Attr = F->getFnAttribute("unsafe-fp-math");
+ StringRef Val = Attr.getValueAsString();
+ if (Val == "true")
+ return true;
+ }
+
+ return false;
+}
+
+/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
+/// operands N0 and N1. This is a helper for PerformADDCombine that is
+/// called with the default operands, and if that fails, with commuted
+/// operands.
+static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SelectionDAG &DAG = DCI.DAG;
+ // Skip non-integer, non-scalar case
+ EVT VT=N0.getValueType();
+ if (VT.isVector())
+ return SDValue();
+
+ // fold (add (mul a, b), c) -> (mad a, b, c)
+ //
+ if (N0.getOpcode() == ISD::MUL) {
+ assert (VT.isInteger());
+ // For integer:
+ // Since integer multiply-add costs the same as integer multiply
+ // but is more costly than integer add, do the fusion only when
+ // the mul is only used in the add.
+ if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
+ !N0.getNode()->hasOneUse())
+ return SDValue();
+
+ // Do the folding
+ return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ else if (N0.getOpcode() == ISD::FMUL) {
+ if (VT == MVT::f32 || VT == MVT::f64) {
+ const auto *TLI = static_cast<const NVPTXTargetLowering *>(
+ &DAG.getTargetLoweringInfo());
+ if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
+ return SDValue();
+
+ // For floating point:
+ // Do the fusion only when the mul has less than 5 uses and all
+ // are add.
+ // The heuristic is that if a use is not an add, then that use
+ // cannot be fused into fma, therefore mul is still needed anyway.
+ // If there are more than 4 uses, even if they are all add, fusing
+ // them will increase register pressue.
+ //
+ int numUses = 0;
+ int nonAddCount = 0;
+ for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
+ UE = N0.getNode()->use_end();
+ UI != UE; ++UI) {
+ numUses++;
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::FADD)
+ ++nonAddCount;
+ }
+ if (numUses >= 5)
+ return SDValue();
+ if (nonAddCount) {
+ int orderNo = N->getIROrder();
+ int orderNo2 = N0.getNode()->getIROrder();
+ // simple heuristics here for considering potential register
+ // pressure, the logics here is that the differnce are used
+ // to measure the distance between def and use, the longer distance
+ // more likely cause register pressure.
+ if (orderNo - orderNo2 < 500)
+ return SDValue();
+
+ // Now, check if at least one of the FMUL's operands is live beyond the node N,
+ // which guarantees that the FMA will not increase register pressure at node N.
+ bool opIsLive = false;
+ const SDNode *left = N0.getOperand(0).getNode();
+ const SDNode *right = N0.getOperand(1).getNode();
+
+ if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
+ opIsLive = true;
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::FMA, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ }
+
+ return SDValue();
+}
+
+/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
+///
+static SDValue PerformADDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ // First try with the default operand order.
+ if (SDValue Result =
+ PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
+ return Result;
+
+ // If that didn't work, try again with the operands commuted.
+ return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
+}
+
+static SDValue PerformANDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // The type legalizer turns a vector load of i8 values into a zextload to i16
+ // registers, optionally ANY_EXTENDs it (if target type is integer),
+ // and ANDs off the high 8 bits. Since we turn this load into a
+ // target-specific DAG node, the DAG combiner fails to eliminate these AND
+ // nodes. Do that here.
+ SDValue Val = N->getOperand(0);
+ SDValue Mask = N->getOperand(1);
+
+ if (isa<ConstantSDNode>(Val)) {
+ std::swap(Val, Mask);
+ }
+
+ SDValue AExt;
+ // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
+ if (Val.getOpcode() == ISD::ANY_EXTEND) {
+ AExt = Val;
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->getOpcode() == NVPTXISD::LoadV2 ||
+ Val->getOpcode() == NVPTXISD::LoadV4) {
+ ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
+ if (!MaskCnst) {
+ // Not an AND with a constant
+ return SDValue();
+ }
+
+ uint64_t MaskVal = MaskCnst->getZExtValue();
+ if (MaskVal != 0xff) {
+ // Not an AND that chops off top 8 bits
+ return SDValue();
+ }
+
+ MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
+ if (!Mem) {
+ // Not a MemSDNode?!?
+ return SDValue();
+ }
+
+ EVT MemVT = Mem->getMemoryVT();
+ if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
+ // We only handle the i8 case
+ return SDValue();
+ }
+
+ unsigned ExtType =
+ cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
+ getZExtValue();
+ if (ExtType == ISD::SEXTLOAD) {
+ // If for some reason the load is a sextload, the and is needed to zero
+ // out the high 8 bits
+ return SDValue();
+ }
+
+ bool AddTo = false;
+ if (AExt.getNode() != nullptr) {
+ // Re-insert the ext as a zext.
+ Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
+ AExt.getValueType(), Val);
+ AddTo = true;
+ }
+
+ // If we get here, the AND is unnecessary. Just replace it with the load
+ DCI.CombineTo(N, Val, AddTo);
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformREMCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
+
+ // Don't do anything at less than -O2.
+ if (OptLevel < CodeGenOpt::Default)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ bool IsSigned = N->getOpcode() == ISD::SREM;
+ unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
+
+ const SDValue &Num = N->getOperand(0);
+ const SDValue &Den = N->getOperand(1);
+
+ for (const SDNode *U : Num->uses()) {
+ if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
+ U->getOperand(1) == Den) {
+ // Num % Den -> Num - (Num / Den) * Den
+ return DAG.getNode(ISD::SUB, DL, VT, Num,
+ DAG.getNode(ISD::MUL, DL, VT,
+ DAG.getNode(DivOpc, DL, VT, Num, Den),
+ Den));
+ }
+ }
+ return SDValue();
+}
+
+enum OperandSignedness {
+ Signed = 0,
+ Unsigned,
+ Unknown
+};
+
+/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
+/// that can be demoted to \p OptSize bits without loss of information. The
+/// signedness of the operand, if determinable, is placed in \p S.
+static bool IsMulWideOperandDemotable(SDValue Op,
+ unsigned OptSize,
+ OperandSignedness &S) {
+ S = Unknown;
+
+ if (Op.getOpcode() == ISD::SIGN_EXTEND ||
+ Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Signed;
+ return true;
+ }
+ } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Unsigned;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
+/// be demoted to \p OptSize bits without loss of information. If the operands
+/// contain a constant, it should appear as the RHS operand. The signedness of
+/// the operands is placed in \p IsSigned.
+static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
+ unsigned OptSize,
+ bool &IsSigned) {
+ OperandSignedness LHSSign;
+
+ // The LHS operand must be a demotable op
+ if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
+ return false;
+
+ // We should have been able to determine the signedness from the LHS
+ if (LHSSign == Unknown)
+ return false;
+
+ IsSigned = (LHSSign == Signed);
+
+ // The RHS can be a demotable op or a constant
+ if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
+ const APInt &Val = CI->getAPIntValue();
+ if (LHSSign == Unsigned) {
+ return Val.isIntN(OptSize);
+ } else {
+ return Val.isSignedIntN(OptSize);
+ }
+ } else {
+ OperandSignedness RHSSign;
+ if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
+ return false;
+
+ return LHSSign == RHSSign;
+ }
+}
+
+/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
+/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
+/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
+/// amount.
+static SDValue TryMULWIDECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT MulType = N->getValueType(0);
+ if (MulType != MVT::i32 && MulType != MVT::i64) {
+ return SDValue();
+ }
+
+ SDLoc DL(N);
+ unsigned OptSize = MulType.getSizeInBits() >> 1;
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ // Canonicalize the multiply so the constant (if any) is on the right
+ if (N->getOpcode() == ISD::MUL) {
+ if (isa<ConstantSDNode>(LHS)) {
+ std::swap(LHS, RHS);
+ }
+ }
+
+ // If we have a SHL, determine the actual multiply amount
+ if (N->getOpcode() == ISD::SHL) {
+ ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
+ if (!ShlRHS) {
+ return SDValue();
+ }
+
+ APInt ShiftAmt = ShlRHS->getAPIntValue();
+ unsigned BitWidth = MulType.getSizeInBits();
+ if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
+ APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
+ RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
+ } else {
+ return SDValue();
+ }
+ }
+
+ bool Signed;
+ // Verify that our operands are demotable
+ if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
+ return SDValue();
+ }
+
+ EVT DemotedVT;
+ if (MulType == MVT::i32) {
+ DemotedVT = MVT::i16;
+ } else {
+ DemotedVT = MVT::i32;
+ }
+
+ // Truncate the operands to the correct size. Note that these are just for
+ // type consistency and will (likely) be eliminated in later phases.
+ SDValue TruncLHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
+ SDValue TruncRHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
+
+ unsigned Opc;
+ if (Signed) {
+ Opc = NVPTXISD::MUL_WIDE_SIGNED;
+ } else {
+ Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
+ }
+
+ return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
+}
+
+/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
+static SDValue PerformMULCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
+static SDValue PerformSHLCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformSETCCCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT CCType = N->getValueType(0);
+ SDValue A = N->getOperand(0);
+ SDValue B = N->getOperand(1);
+
+ if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
+ return SDValue();
+
+ SDLoc DL(N);
+ // setp.f16x2 returns two scalar predicates, which we need to
+ // convert back to v2i1. The returned result will be scalarized by
+ // the legalizer, but the comparison will remain a single vector
+ // instruction.
+ SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
+ DCI.DAG.getVTList(MVT::i1, MVT::i1),
+ {A, B, N->getOperand(2)});
+ return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
+ CCNode.getValue(1));
+}
+
+SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::FADD:
+ return PerformADDCombine(N, DCI, STI, OptLevel);
+ case ISD::MUL:
+ return PerformMULCombine(N, DCI, OptLevel);
+ case ISD::SHL:
+ return PerformSHLCombine(N, DCI, OptLevel);
+ case ISD::AND:
+ return PerformANDCombine(N, DCI);
+ case ISD::UREM:
+ case ISD::SREM:
+ return PerformREMCombine(N, DCI, OptLevel);
+ case ISD::SETCC:
+ return PerformSETCCCombine(N, DCI);
+ }
+ return SDValue();
+}
+
+/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
+static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ EVT ResVT = N->getValueType(0);
+ SDLoc DL(N);
+
+ assert(ResVT.isVector() && "Vector load must have vector type");
+
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 loads of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ assert(ResVT.isSimple() && "Can only handle simple types");
+ switch (ResVT.getSimpleVT().SimpleTy) {
+ default:
+ return;
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+
+ unsigned Align = LD->getAlignment();
+ auto &TD = DAG.getDataLayout();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This load is not sufficiently aligned, so bail out and let this vector
+ // load be scalarized. Note that we may still be able to emit smaller
+ // vector loads. For example, if we are loading a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return;
+ }
+
+ EVT EltVT = ResVT.getVectorElementType();
+ unsigned NumElts = ResVT.getVectorNumElements();
+
+ // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+ bool LoadF16x2 = false;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ Opcode = NVPTXISD::LoadV2;
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ case 8: {
+ // v8f16 is a special case. PTX doesn't have ld.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // load them with ld.v4.b32.
+ assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
+ LoadF16x2 = true;
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
+ MVT::Other};
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ // Copy regular operands
+ SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
+
+ // The select routine does not have access to the LoadSDNode instance, so
+ // pass along the extension information
+ OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ LD->getMemoryVT(),
+ LD->getMemOperand());
+
+ SmallVector<SDValue, 8> ScalarRes;
+ if (LoadF16x2) {
+ // Split v2f16 subvectors back into individual elements.
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue SubVector = NewLD.getValue(i);
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(1, DL));
+ ScalarRes.push_back(E0);
+ ScalarRes.push_back(E1);
+ }
+ } else {
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+}
+
+static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ SDValue Chain = N->getOperand(0);
+ SDValue Intrin = N->getOperand(1);
+ SDLoc DL(N);
+
+ // Get the intrinsic ID
+ unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ EVT ResVT = N->getValueType(0);
+
+ if (ResVT.isVector()) {
+ // Vector LDG/LDU
+
+ unsigned NumElts = ResVT.getVectorNumElements();
+ EVT EltVT = ResVT.getVectorElementType();
+
+ // Since LDU/LDG are target nodes, we cannot rely on DAG type
+ // legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV2;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV2;
+ break;
+ }
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV4;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV4;
+ break;
+ }
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ SmallVector<SDValue, 8> OtherOps;
+
+ // Copy regular operands
+
+ OtherOps.push_back(Chain); // Chain
+ // Skip operand 1 (intrinsic ID)
+ // Others
+ OtherOps.append(N->op_begin() + 2, N->op_end());
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ MemSD->getMemoryVT(),
+ MemSD->getMemOperand());
+
+ SmallVector<SDValue, 4> ScalarRes;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res =
+ DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec =
+ DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+ } else {
+ // i8 LDG/LDU
+ assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
+ "Custom handling of non-i8 ldu/ldg?");
+
+ // Just copy all operands as-is
+ SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
+
+ // Force output to i16
+ SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ // We make sure the memory type is i8, which will be used during isel
+ // to select the proper instruction.
+ SDValue NewLD =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
+ MVT::i8, MemSD->getMemOperand());
+
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
+ NewLD.getValue(0)));
+ Results.push_back(NewLD.getValue(1));
+ }
+ }
+ }
+}
+
+void NVPTXTargetLowering::ReplaceNodeResults(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ default:
+ report_fatal_error("Unhandled custom legalization");
+ case ISD::LOAD:
+ ReplaceLoadVector(N, DAG, Results);
+ return;
+ case ISD::INTRINSIC_W_CHAIN:
+ ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
+ return;
+ }
+}
+
+// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
+void NVPTXSection::anchor() {}
+
+NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
+ delete static_cast<NVPTXSection *>(TextSection);
+ delete static_cast<NVPTXSection *>(DataSection);
+ delete static_cast<NVPTXSection *>(BSSSection);
+ delete static_cast<NVPTXSection *>(ReadOnlySection);
+
+ delete static_cast<NVPTXSection *>(StaticCtorSection);
+ delete static_cast<NVPTXSection *>(StaticDtorSection);
+ delete static_cast<NVPTXSection *>(LSDASection);
+ delete static_cast<NVPTXSection *>(EHFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfAbbrevSection);
+ delete static_cast<NVPTXSection *>(DwarfInfoSection);
+ delete static_cast<NVPTXSection *>(DwarfLineSection);
+ delete static_cast<NVPTXSection *>(DwarfFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfPubTypesSection);
+ delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);
+ delete static_cast<NVPTXSection *>(DwarfStrSection);
+ delete static_cast<NVPTXSection *>(DwarfLocSection);
+ delete static_cast<NVPTXSection *>(DwarfARangesSection);
+ delete static_cast<NVPTXSection *>(DwarfRangesSection);
+ delete static_cast<NVPTXSection *>(DwarfMacinfoSection);
+}
+
+MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
+ const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
+ return getDataSection();
+}
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.td b/lib/Target/NVPTX/NVPTXInstrInfo.td
index 2b847414b8a8..9378b29a9d0e 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -1,3165 +1,3164 @@
-//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the PTX instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-include "NVPTXInstrFormats.td"
-
-// A NOP instruction
-let hasSideEffects = 0 in {
- def NOP : NVPTXInst<(outs), (ins), "", []>;
-}
-
-let OperandType = "OPERAND_IMMEDIATE" in {
- def f16imm : Operand<f16>;
-}
-
-// List of vector specific properties
-def isVecLD : VecInstTypeEnum<1>;
-def isVecST : VecInstTypeEnum<2>;
-def isVecBuild : VecInstTypeEnum<3>;
-def isVecShuffle : VecInstTypeEnum<4>;
-def isVecExtract : VecInstTypeEnum<5>;
-def isVecInsert : VecInstTypeEnum<6>;
-def isVecDest : VecInstTypeEnum<7>;
-def isVecOther : VecInstTypeEnum<15>;
-
-//===----------------------------------------------------------------------===//
-// NVPTX Operand Definitions.
-//===----------------------------------------------------------------------===//
-
-def brtarget : Operand<OtherVT>;
-
-// CVT conversion modes
-// These must match the enum in NVPTX.h
-def CvtNONE : PatLeaf<(i32 0x0)>;
-def CvtRNI : PatLeaf<(i32 0x1)>;
-def CvtRZI : PatLeaf<(i32 0x2)>;
-def CvtRMI : PatLeaf<(i32 0x3)>;
-def CvtRPI : PatLeaf<(i32 0x4)>;
-def CvtRN : PatLeaf<(i32 0x5)>;
-def CvtRZ : PatLeaf<(i32 0x6)>;
-def CvtRM : PatLeaf<(i32 0x7)>;
-def CvtRP : PatLeaf<(i32 0x8)>;
-
-def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
-def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
-def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
-def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
-def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
-def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
-def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
-def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
-def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
-
-def CvtSAT : PatLeaf<(i32 0x20)>;
-def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
-
-def CvtMode : Operand<i32> {
- let PrintMethod = "printCvtMode";
-}
-
-// Compare modes
-// These must match the enum in NVPTX.h
-def CmpEQ : PatLeaf<(i32 0)>;
-def CmpNE : PatLeaf<(i32 1)>;
-def CmpLT : PatLeaf<(i32 2)>;
-def CmpLE : PatLeaf<(i32 3)>;
-def CmpGT : PatLeaf<(i32 4)>;
-def CmpGE : PatLeaf<(i32 5)>;
-def CmpEQU : PatLeaf<(i32 10)>;
-def CmpNEU : PatLeaf<(i32 11)>;
-def CmpLTU : PatLeaf<(i32 12)>;
-def CmpLEU : PatLeaf<(i32 13)>;
-def CmpGTU : PatLeaf<(i32 14)>;
-def CmpGEU : PatLeaf<(i32 15)>;
-def CmpNUM : PatLeaf<(i32 16)>;
-def CmpNAN : PatLeaf<(i32 17)>;
-
-def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
-def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
-def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
-def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
-def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
-def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
-def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
-def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
-def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
-def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
-def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
-def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
-def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
-def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
-
-def CmpMode : Operand<i32> {
- let PrintMethod = "printCmpMode";
-}
-def VecElement : Operand<i32> {
- let PrintMethod = "printVecElement";
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Instruction Predicate Definitions
-//===----------------------------------------------------------------------===//
-
-
-def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
-def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
-def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
-def useAtomRedG32forGen32 :
- Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
-def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
-def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
-def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
-def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
-def useAtomRedG64forGen64 :
- Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
-def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
-def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
-def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
-def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
-def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
-def hasVote : Predicate<"Subtarget->hasVote()">;
-def hasDouble : Predicate<"Subtarget->hasDouble()">;
-def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
-def hasLDG : Predicate<"Subtarget->hasLDG()">;
-def hasLDU : Predicate<"Subtarget->hasLDU()">;
-def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
-
-def doF32FTZ : Predicate<"useF32FTZ()">;
-def doNoF32FTZ : Predicate<"!useF32FTZ()">;
-
-def doMulWide : Predicate<"doMulWide">;
-
-def allowFMA : Predicate<"allowFMA()">;
-def noFMA : Predicate<"!allowFMA()">;
-def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
-
-def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
-def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
-
-def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
-def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
-
-def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
-def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
-
-def true : Predicate<"true">;
-
-def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
-
-def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
-
-//===----------------------------------------------------------------------===//
-// Some Common Instruction Class Templates
-//===----------------------------------------------------------------------===//
-
-// Template for instructions which take three int64, int32, or int16 args.
-// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
-multiclass I3<string OpcStr, SDNode OpNode> {
- def i64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def i64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def i16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
-}
-
-// Template for instructions which take 3 int32 args. The instructions are
-// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
-multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
-}
-
-// Template for instructions which take three fp64 or fp32 args. The
-// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
-//
-// Also defines ftz (flush subnormal inputs and results to sign-preserving
-// zero) variants for fp32 functions.
-//
-// This multiclass should be used for nodes that cannot be folded into FMAs.
-// For nodes that can be folded into FMAs (i.e. adds and muls), use
-// F3_fma_component.
-multiclass F3<string OpcStr, SDNode OpNode> {
- def f64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
- def f64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
- def f32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ]>;
- def f32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ]>;
- def f32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
- def f32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
-}
-
-// Template for instructions which take three FP args. The
-// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
-//
-// Also defines ftz (flush subnormal inputs and results to sign-preserving
-// zero) variants for fp32/fp16 functions.
-//
-// This multiclass should be used for nodes that can be folded to make fma ops.
-// In this case, we use the ".rn" variant when FMA is disabled, as this behaves
-// just like the non ".rn" op, but prevents ptxas from creating FMAs.
-multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
- def f64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[allowFMA]>;
- def f64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
- def f32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA]>;
- def f32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
-
- def f16rr_ftz :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;
- def f16rr :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, allowFMA]>;
-
- def f16x2rr_ftz :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;
- def f16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, allowFMA]>;
-
- // These have strange names so we don't perturb existing mir tests.
- def _rnf64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[noFMA]>;
- def _rnf64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
- def _rnf32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def _rnf32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def _rnf32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA]>;
- def _rnf32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
- def _rnf16rr_ftz :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, noFMA, doF32FTZ]>;
- def _rnf16rr :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, noFMA]>;
- def _rnf16x2rr_ftz :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, noFMA, doF32FTZ]>;
- def _rnf16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, noFMA]>;
-}
-
-// Template for operations which take two f32 or f64 operands. Provides three
-// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
-// subnormal inputs and results to zero).
-multiclass F2<string OpcStr, SDNode OpNode> {
- def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
- !strconcat(OpcStr, ".f64 \t$dst, $a;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
- def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
- Requires<[doF32FTZ]>;
- def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Instructions.
-//===----------------------------------------------------------------------===//
-
-//-----------------------------------
-// Type Conversion
-//-----------------------------------
-
-let hasSideEffects = 0 in {
- // Generate a cvt to the given type from all possible types. Each instance
- // takes a CvtMode immediate that defines the conversion mode to use. It can
- // be CvtNONE to omit a conversion mode.
- multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
- def _s8 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s8 \t$dst, $src;"), []>;
- def _u8 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u8 \t$dst, $src;"), []>;
- def _s16 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s16 \t$dst, $src;"), []>;
- def _u16 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u16 \t$dst, $src;"), []>;
- def _s32 :
- NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s32 \t$dst, $src;"), []>;
- def _u32 :
- NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u32 \t$dst, $src;"), []>;
- def _s64 :
- NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s64 \t$dst, $src;"), []>;
- def _u64 :
- NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u64 \t$dst, $src;"), []>;
- def _f16 :
- NVPTXInst<(outs RC:$dst),
- (ins Float16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f16 \t$dst, $src;"), []>;
- def _f32 :
- NVPTXInst<(outs RC:$dst),
- (ins Float32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f32 \t$dst, $src;"), []>;
- def _f64 :
- NVPTXInst<(outs RC:$dst),
- (ins Float64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f64 \t$dst, $src;"), []>;
- }
-
- // Generate cvts from all types to all types.
- defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
- defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
- defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
- defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
- defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
- defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
- defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
- defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
- defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
- defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
- defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
-
- // These cvts are different from those above: The source and dest registers
- // are of the same type.
- def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.s16.s8 \t$dst, $src;", []>;
- def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s8 \t$dst, $src;", []>;
- def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s16 \t$dst, $src;", []>;
- def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s8 \t$dst, $src;", []>;
- def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s16 \t$dst, $src;", []>;
- def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s32 \t$dst, $src;", []>;
-}
-
-//-----------------------------------
-// Integer Arithmetic
-//-----------------------------------
-
-// Template for xor masquerading as int1 arithmetic.
-multiclass ADD_SUB_i1<SDNode OpNode> {
- def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
- def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
-}
-
-// int1 addition and subtraction are both just xor.
-defm ADD_i1 : ADD_SUB_i1<add>;
-defm SUB_i1 : ADD_SUB_i1<sub>;
-
-// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
-// also use these for unsigned arithmetic.
-defm ADD : I3<"add.s", add>;
-defm SUB : I3<"sub.s", sub>;
-
-// int32 addition and subtraction with carry-out.
-// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
-defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
-defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
-
-// int32 addition and subtraction with carry-in and carry-out.
-defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
-defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
-
-defm MULT : I3<"mul.lo.s", mul>;
-
-defm MULTHS : I3<"mul.hi.s", mulhs>;
-defm MULTHU : I3<"mul.hi.u", mulhu>;
-
-defm SDIV : I3<"div.s", sdiv>;
-defm UDIV : I3<"div.u", udiv>;
-
-// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
-// will lower it.
-defm SREM : I3<"rem.s", srem>;
-defm UREM : I3<"rem.u", urem>;
-
-// Integer absolute value. NumBits should be one minus the bit width of RC.
-// This idiom implements the algorithm at
-// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
-multiclass ABS<RegisterClass RC, int NumBits, string SizeName> {
- def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
- !strconcat("abs", SizeName, " \t$dst, $a;"),
- [(set RC:$dst, (xor (add (sra RC:$a, (i32 NumBits)), RC:$a),
- (sra RC:$a, (i32 NumBits))))]>;
-}
-defm ABS_16 : ABS<Int16Regs, 15, ".s16">;
-defm ABS_32 : ABS<Int32Regs, 31, ".s32">;
-defm ABS_64 : ABS<Int64Regs, 63, ".s64">;
-
-// Integer min/max.
-defm SMAX : I3<"max.s", smax>;
-defm UMAX : I3<"max.u", umax>;
-defm SMIN : I3<"min.s", smin>;
-defm UMIN : I3<"min.u", umin>;
-
-//
-// Wide multiplication
-//
-def MULWIDES64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-
-def MULWIDEU64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-
-def MULWIDES32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-
-def MULWIDEU32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-
-def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
-def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
-def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
-
-// Matchers for signed, unsigned mul.wide ISD nodes.
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
- (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
- (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
- (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
- (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-// Predicates used for converting some patterns to mul.wide.
-def SInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(32);
-}]>;
-
-def UInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(32);
-}]>;
-
-def SInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(16);
-}]>;
-
-def UInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(16);
-}]>;
-
-def Int5Const : PatLeaf<(imm), [{
- // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(32);
-}]>;
-
-def Int4Const : PatLeaf<(imm), [{
- // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(16);
-}]>;
-
-def SHL2MUL32 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(32, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
-}]>;
-
-def SHL2MUL16 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(16, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
-}]>;
-
-// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
-def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
- (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
-def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
- (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
- (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
-def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
- (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
-
-// Convert "sign/zero-extend then multiply" to mul.wide.
-def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
- (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
- (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
- (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
- (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
- Requires<[doMulWide]>;
-
-//
-// Integer multiply-add
-//
-def SDTIMAD :
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
- SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
-def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
-
-def MAD16rrr :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
-def MAD16rri :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
-def MAD16rir :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
-def MAD16rii :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD32rrr :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
-def MAD32rri :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
-def MAD32rir :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
-def MAD32rii :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD64rrr :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
-def MAD64rri :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
-def MAD64rir :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
-def MAD64rii :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
-
-def INEG16 :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "neg.s16 \t$dst, $src;",
- [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
-def INEG32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "neg.s32 \t$dst, $src;",
- [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
-def INEG64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "neg.s64 \t$dst, $src;",
- [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
-
-//-----------------------------------
-// Floating Point Arithmetic
-//-----------------------------------
-
-// Constant 1.0f
-def FloatConst1 : PatLeaf<(fpimm), [{
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
- N->getValueAPF().convertToFloat() == 1.0f;
-}]>;
-// Constant 1.0 (double)
-def DoubleConst1 : PatLeaf<(fpimm), [{
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
- N->getValueAPF().convertToDouble() == 1.0;
-}]>;
-
-// Loads FP16 constant into a register.
-//
-// ptxas does not have hex representation for fp16, so we can't use
-// fp16 immediate values in .f16 instructions. Instead we have to load
-// the constant into a register using mov.b16.
-def LOAD_CONST_F16 :
- NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
- "mov.b16 \t$dst, $a;", []>;
-
-defm FADD : F3_fma_component<"add", fadd>;
-defm FSUB : F3_fma_component<"sub", fsub>;
-defm FMUL : F3_fma_component<"mul", fmul>;
-
-defm FMIN : F3<"min", fminnum>;
-defm FMAX : F3<"max", fmaxnum>;
-
-defm FABS : F2<"abs", fabs>;
-defm FNEG : F2<"neg", fneg>;
-defm FSQRT : F2<"sqrt.rn", fsqrt>;
-
-//
-// F64 division
-//
-def FDIV641r :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins f64imm:$a, Float64Regs:$b),
- "rcp.rn.f64 \t$dst, $b;",
- [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
-def FDIV64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
-def FDIV64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
-
-//
-// F32 Approximate reciprocal
-//
-def FDIV321r_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV321r :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-//
-// F32 Approximate division
-//
-def FDIV32approxrr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxrr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-def FDIV32approxri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-//
-// F32 Semi-accurate reciprocal
-//
-// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
-//
-def FDIV321r_approx_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV321r_approx :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-//
-// F32 Semi-accurate division
-//
-def FDIV32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-def FDIV32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-//
-// F32 Accurate reciprocal
-//
-def FDIV321r_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20, doF32FTZ]>;
-def FDIV321r_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
-//
-// F32 Accurate division
-//
-def FDIV32rr_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32ri_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32rr_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
-def FDIV32ri_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[reqPTX20]>;
-
-//
-// FMA
-//
-
-multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
- Requires<[Pred]>;
- def rri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, ImmCls:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
- Requires<[Pred]>;
- def rir : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
- Requires<[Pred]>;
- def rii : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, ImmCls:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
- Requires<[Pred]>;
-}
-
-multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
- Requires<[useFP16Math, Pred]>;
-}
-
-defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
-defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
-defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
-defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
-defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
-defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
-defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
-
-// sin/cos
-def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "sin.approx.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
- Requires<[allowUnsafeFPMath]>;
-def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "cos.approx.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
- Requires<[allowUnsafeFPMath]>;
-
-// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
-// i.e. "poor man's fmod()"
-
-// frem - f32 FTZ
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
- (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
- Float32Regs:$y))>,
- Requires<[doF32FTZ]>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
- (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
- fpimm:$y))>,
- Requires<[doF32FTZ]>;
-
-// frem - f32
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
- (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
- (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
- Float32Regs:$y))>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
- (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
- (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
- fpimm:$y))>;
-
-// frem - f64
-def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
- (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
- (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
- Float64Regs:$y))>;
-def : Pat<(frem Float64Regs:$x, fpimm:$y),
- (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
- (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
- fpimm:$y))>;
-
-//-----------------------------------
-// Bitwise operations
-//-----------------------------------
-
-// Template for three-arg bitwise operations. Takes three args, Creates .b16,
-// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
-multiclass BITWISE<string OpcStr, SDNode OpNode> {
- def b1rr :
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
- def b1ri :
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
- def b16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def b16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
- def b32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def b32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def b64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def b64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
-}
-
-defm OR : BITWISE<"or", or>;
-defm AND : BITWISE<"and", and>;
-defm XOR : BITWISE<"xor", xor>;
-
-def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
- "not.pred \t$dst, $src;",
- [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
-def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "not.b16 \t$dst, $src;",
- [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
-def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "not.b32 \t$dst, $src;",
- [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
-def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "not.b64 \t$dst, $src;",
- [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
-
-// Template for left/right shifts. Takes three operands,
-// [dest (reg), src (reg), shift (reg or imm)].
-// dest and src may be int64, int32, or int16, but shift is always int32.
-//
-// This template also defines a 32-bit shift (imm, imm) instruction.
-multiclass SHIFT<string OpcStr, SDNode OpNode> {
- def i64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
- def i64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
- def i32ii :
- NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
- def i16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
- def i16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
-}
-
-defm SHL : SHIFT<"shl.b", shl>;
-defm SRA : SHIFT<"shr.s", sra>;
-defm SRL : SHIFT<"shr.u", srl>;
-
-// Bit-reverse
-def BREV32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
- "brev.b32 \t$dst, $a;",
- [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
-def BREV64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
- "brev.b64 \t$dst, $a;",
- [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
-
-//
-// Rotate: Use ptx shf instruction if available.
-//
-
-// 32 bit r2 = rotl r1, n
-// =>
-// r2 = shf.l r1, r1, n
-def ROTL32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTL32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
-
-// 32 bit r2 = rotr r1, n
-// =>
-// r2 = shf.r r1, r1, n
-def ROTR32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTR32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
-
-// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
-def ROT32imm_sw :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- "shl.b32 \t%lhs, $src, $amt1;\n\t"
- "shr.b32 \t%rhs, $src, $amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
- Requires<[noHWROT32]>;
-def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate left by register.
-def ROTL32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shl.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shr.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate right by register.
-def ROTR32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shr.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shl.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
-def ROT64imm_sw :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- "shl.b64 \t%lhs, $src, $amt1;\n\t"
- "shr.b64 \t%rhs, $src, $amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_64 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
-def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
-
-// 64-bit software rotate left by register.
-def ROTL64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "shl.b64 \t%lhs, $src, $amt;\n\t"
- "sub.u32 \t%amt2, 64, $amt;\n\t"
- "shr.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
-
-def ROTR64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "shr.b64 \t%lhs, $src, $amt;\n\t"
- "sub.u32 \t%amt2, 64, $amt;\n\t"
- "shl.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
-
-//
-// Funnnel shift in clamp mode
-//
-
-// Create SDNodes so they can be used in the DAG code, e.g.
-// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
-def SDTIntShiftDOp :
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
- SDTCisInt<0>, SDTCisInt<3>]>;
-def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
-def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
-
-def FUNSHFLCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
-
-def FUNSHFRCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
-
-//
-// BFE - bit-field extract
-//
-
-// Template for BFE instructions. Takes four args,
-// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
-// Start may be an imm only if end is also an imm. FIXME: Is this a
-// restriction in PTX?
-//
-// dest and src may be int32 or int64, but start and end are always int32.
-multiclass BFE<string TyStr, RegisterClass RC> {
- def rrr
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
- def rri
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, Int32Regs:$b, i32imm:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
- def rii
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, i32imm:$b, i32imm:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
-}
-
-let hasSideEffects = 0 in {
- defm BFE_S32 : BFE<"s32", Int32Regs>;
- defm BFE_U32 : BFE<"u32", Int32Regs>;
- defm BFE_S64 : BFE<"s64", Int64Regs>;
- defm BFE_U64 : BFE<"u64", Int64Regs>;
-}
-
-//-----------------------------------
-// Comparison instructions (setp, set)
-//-----------------------------------
-
-// FIXME: This doesn't cover versions of set and setp that combine with a
-// boolean predicate, e.g. setp.eq.and.b16.
-
-let hasSideEffects = 0 in {
- multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr :
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- def ri :
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- def ir :
- NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- }
-}
-
-defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
-defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
-defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
-defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
-defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
-defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
-defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
-defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
-defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
-defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
-defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
-def SETP_f16rr :
- NVPTXInst<(outs Int1Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
- "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
- []>, Requires<[useFP16Math]>;
-
-def SETP_f16x2rr :
- NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
- (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
- "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
- []>,
- Requires<[useFP16Math]>;
-
-
-// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
-// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
-// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
-
-let hasSideEffects = 0 in {
- multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- def ri : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- def ir : NVPTXInst<(outs Int32Regs:$dst),
- (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- }
-}
-
-defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
-defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
-defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
-defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
-defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
-defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
-defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
-defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
-defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
-defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
-defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
-defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
-
-//-----------------------------------
-// Selection instructions (selp)
-//-----------------------------------
-
-// FIXME: Missing slct
-
-// selp instructions that don't have any pattern matches; we explicitly use
-// them within this file.
-let hasSideEffects = 0 in {
- multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ir : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ii : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- }
-
- multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
- SDNode ImmNode> {
- def rr :
- NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
- def ri :
- NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
- def ir :
- NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
- def ii :
- NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
- }
-}
-
-// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
-// good.
-defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
-defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
-defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
-defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
-defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
-defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
-defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
-defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
-defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
-defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
-defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
-defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
-
-def SELP_f16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
- "selp.b32 \t$dst, $a, $b, $p;",
- [(set Float16x2Regs:$dst,
- (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
-
-//-----------------------------------
-// Data Movement (Load / Store, Move)
-//-----------------------------------
-
-def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
- [SDNPWantRoot]>;
-def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
- [SDNPWantRoot]>;
-
-def MEMri : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops Int32Regs, i32imm);
-}
-def MEMri64 : Operand<i64> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops Int64Regs, i64imm);
-}
-
-def imem : Operand<iPTR> {
- let PrintMethod = "printOperand";
-}
-
-def imemAny : Operand<iPTRAny> {
- let PrintMethod = "printOperand";
-}
-
-def LdStCode : Operand<i32> {
- let PrintMethod = "printLdStCode";
-}
-
-def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
-def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
-
-// Load a memory address into a u32 or u64 register.
-def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
- "mov.u32 \t$dst, $a;",
- [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
- "mov.u64 \t$dst, $a;",
- [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-
-// Get pointer to local stack.
-let hasSideEffects = 0 in {
- def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
- "mov.u32 \t$d, __local_depot$num;", []>;
- def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
- "mov.u64 \t$d, __local_depot$num;", []>;
-}
-
-
-// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
-let IsSimpleMove=1, hasSideEffects=0 in {
- def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
- "mov.pred \t$dst, $sss;", []>;
- def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
- "mov.u16 \t$dst, $sss;", []>;
- def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
- "mov.u32 \t$dst, $sss;", []>;
- def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
- "mov.u64 \t$dst, $sss;", []>;
-
- def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
- // We have to use .b16 here as there's no mov.f16.
- "mov.b16 \t$dst, $src;", []>;
- def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "mov.f32 \t$dst, $src;", []>;
- def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
- "mov.f64 \t$dst, $src;", []>;
-}
-
-def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
- "mov.pred \t$dst, $src;",
- [(set Int1Regs:$dst, imm:$src)]>;
-def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
- "mov.u16 \t$dst, $src;",
- [(set Int16Regs:$dst, imm:$src)]>;
-def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
- "mov.u32 \t$dst, $src;",
- [(set Int32Regs:$dst, imm:$src)]>;
-def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
- "mov.u64 \t$dst, $src;",
- [(set Int64Regs:$dst, imm:$src)]>;
-
-def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
- "mov.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, fpimm:$src)]>;
-def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
- "mov.f64 \t$dst, $src;",
- [(set Float64Regs:$dst, fpimm:$src)]>;
-
-def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
-
-//---- Copy Frame Index ----
-def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
- "add.u32 \t$dst, ${addr:add};",
- [(set Int32Regs:$dst, ADDRri:$addr)]>;
-def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
- "add.u64 \t$dst, ${addr:add};",
- [(set Int64Regs:$dst, ADDRri64:$addr)]>;
-
-//-----------------------------------
-// Comparison and Selection
-//-----------------------------------
-
-multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
- Instruction setp_16rr,
- Instruction setp_16ri,
- Instruction setp_16ir,
- Instruction setp_32rr,
- Instruction setp_32ri,
- Instruction setp_32ir,
- Instruction setp_64rr,
- Instruction setp_64ri,
- Instruction setp_64ir,
- Instruction set_16rr,
- Instruction set_16ri,
- Instruction set_16ir,
- Instruction set_32rr,
- Instruction set_32ri,
- Instruction set_32ir,
- Instruction set_64rr,
- Instruction set_64ri,
- Instruction set_64ir> {
- // i16 -> pred
- def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
- (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
- (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
- (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
- // i32 -> pred
- def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
- (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
- (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
- (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
- // i64 -> pred
- def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
- (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
- (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
- (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
-
- // i16 -> i32
- def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
- (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
- (set_16ri Int16Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
- (set_16ir imm:$a, Int16Regs:$b, Mode)>;
- // i32 -> i32
- def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
- (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
- (set_32ri Int32Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
- (set_32ir imm:$a, Int32Regs:$b, Mode)>;
- // i64 -> i32
- def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
- (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
- (set_64ri Int64Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
- (set_64ir imm:$a, Int64Regs:$b, Mode)>;
-}
-
-multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
- : ISET_FORMAT<OpNode, Mode,
- SETP_s16rr, SETP_s16ri, SETP_s16ir,
- SETP_s32rr, SETP_s32ri, SETP_s32ir,
- SETP_s64rr, SETP_s64ri, SETP_s64ir,
- SET_s16rr, SET_s16ri, SET_s16ir,
- SET_s32rr, SET_s32ri, SET_s32ir,
- SET_s64rr, SET_s64ri, SET_s64ir> {
- // TableGen doesn't like empty multiclasses.
- def : PatLeaf<(i32 0)>;
-}
-
-multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
- : ISET_FORMAT<OpNode, Mode,
- SETP_u16rr, SETP_u16ri, SETP_u16ir,
- SETP_u32rr, SETP_u32ri, SETP_u32ir,
- SETP_u64rr, SETP_u64ri, SETP_u64ir,
- SET_u16rr, SET_u16ri, SET_u16ir,
- SET_u32rr, SET_u32ri, SET_u32ir,
- SET_u64rr, SET_u64ri, SET_u64ir> {
- // TableGen doesn't like empty multiclasses.
- def : PatLeaf<(i32 0)>;
-}
-
-defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
-defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
-defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
-defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
-defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
-defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
-defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
-defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
-defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
-defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
-defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
-defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
-
-// i1 compares
-def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
-def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
-
-def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-
-// i1 compare -> i32
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
- (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
- (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-
-
-
-multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
- // f16 -> pred
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
-
- // f32 -> pred
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
- (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
- (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
- (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
- (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
-
- // f64 -> pred
- def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
- (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
- (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
- (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
-
- // f16 -> i32
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
-
- // f32 -> i32
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
- (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
- (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
- (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
- (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
-
- // f64 -> i32
- def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
- (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
- (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
- (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
-}
-
-defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
-defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
-defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
-defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
-defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
-defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
-
-defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
-defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
-defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
-defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
-defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
-defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
-
-defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
-defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
-defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
-defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
-defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
-defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
-
-defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
-defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
-
-// FIXME: What is this doing here? Can it be deleted?
-// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
-// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-
-def SDTDeclareParamProfile :
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTDeclareScalarParamProfile :
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
-def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
-def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
-def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
-def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
-def SDTCallValProfile : SDTypeProfile<1, 0, []>;
-def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
-def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
-def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
-def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
-def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
-
-def DeclareParam :
- SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareScalarParam :
- SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRetParam :
- SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRet :
- SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LoadParam :
- SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV2 :
- SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV4 :
- SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def PrintCall :
- SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintConvergentCall :
- SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintCallUni :
- SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintConvergentCallUni :
- SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParam :
- SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV2 :
- SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV4 :
- SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamU32 :
- SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamS32 :
- SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgBegin :
- SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArg :
- SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LastCallArg :
- SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgEnd :
- SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVoid :
- SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def Prototype :
- SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVal :
- SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def MoveParam :
- SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
-def StoreRetval :
- SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV2 :
- SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV4 :
- SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def PseudoUseParam :
- SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def RETURNNode :
- SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-
-let mayLoad = 1 in {
- class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
- []>;
-
- class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
- !strconcat("ld.param.v2", opstr,
- " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
-
- class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
- regclass:$dst4),
- (ins i32imm:$b),
- !strconcat("ld.param.v4", opstr,
- " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
- []>;
-}
-
-class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat("mov", opstr, " \t$dst, retval$b;"),
- [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
-
-let mayStore = 1 in {
- class StoreParamInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
- !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
- []>;
-
- class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
- i32imm:$a, i32imm:$b),
- !strconcat("st.param.v2", opstr,
- " \t[param$a+$b], {{$val, $val2}};"),
- []>;
-
- class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a,
- i32imm:$b),
- !strconcat("st.param.v4", opstr,
- " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
- []>;
-
- class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
- !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
- []>;
-
- class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
- !strconcat("st.param.v2", opstr,
- " \t[func_retval0+$a], {{$val, $val2}};"),
- []>;
-
- class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs),
- (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a),
- !strconcat("st.param.v4", opstr,
- " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
- []>;
-}
-
-let isCall=1 in {
- multiclass CALL<string OpcStr, SDNode OpNode> {
- def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
- def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
- def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
- def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
- def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
- [(OpNode (i32 4))]>;
- def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
- [(OpNode (i32 5))]>;
- def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5), "),
- [(OpNode (i32 6))]>;
- def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5, retval6), "),
- [(OpNode (i32 7))]>;
- def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5, retval6, retval7), "),
- [(OpNode (i32 8))]>;
- }
-}
-
-defm Call : CALL<"call", PrintCall>;
-defm CallUni : CALL<"call.uni", PrintCallUni>;
-
-// Convergent call instructions. These are identical to regular calls, except
-// they have the isConvergent bit set.
-let isConvergent=1 in {
- defm ConvergentCall : CALL<"call", PrintConvergentCall>;
- defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
-}
-
-def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
-def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
-def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
-def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
-def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
-def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
-def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
-def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
-def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
-def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
-def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
-def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
-def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
-def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
-def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
-def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
-def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
-def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
-def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
-def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
-def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
-def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
-
-def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
-def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
-
-def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
-def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
-def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
-def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
-def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
-def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
-
-def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
-def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
-def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
-
-def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
-def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
-def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
-def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
-def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
-def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
-def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
-def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
-def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
-def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
-def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
-
-def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
-def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
-def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
-def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
-def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
-def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
-def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
-def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
-def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
-def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
-def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
-
-def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
-def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
-def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
-def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
-def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
-def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
-def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
-def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
-def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
-def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
-def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
-
-def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
-def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
-def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
-def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
-
-class CallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a, ",
- [(CallArg (i32 0), regclass:$a)]>;
-
-class LastCallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a",
- [(LastCallArg (i32 0), regclass:$a)]>;
-
-def CallArgI64 : CallArgInst<Int64Regs>;
-def CallArgI32 : CallArgInst<Int32Regs>;
-def CallArgI16 : CallArgInst<Int16Regs>;
-def CallArgF64 : CallArgInst<Float64Regs>;
-def CallArgF32 : CallArgInst<Float32Regs>;
-
-def LastCallArgI64 : LastCallArgInst<Int64Regs>;
-def LastCallArgI32 : LastCallArgInst<Int32Regs>;
-def LastCallArgI16 : LastCallArgInst<Int16Regs>;
-def LastCallArgF64 : LastCallArgInst<Float64Regs>;
-def LastCallArgF32 : LastCallArgInst<Float32Regs>;
-
-def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
- [(CallArg (i32 0), (i32 imm:$a))]>;
-def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
- [(LastCallArg (i32 0), (i32 imm:$a))]>;
-
-def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
- [(CallArg (i32 1), (i32 imm:$a))]>;
-def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
- [(LastCallArg (i32 1), (i32 imm:$a))]>;
-
-def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
- [(CallVoid (Wrapper tglobaladdr:$addr))]>;
-def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
- [(CallVoid Int32Regs:$addr)]>;
-def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
- [(CallVoid Int64Regs:$addr)]>;
-def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
- [(Prototype (i32 imm:$val))]>;
-
-def DeclareRetMemInst :
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
- ".param .align $align .b8 retval$num[$size];",
- [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetScalarInst :
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".param .b$size retval$num;",
- [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetRegInst :
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".reg .b$size retval$num;",
- [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
-
-def DeclareParamInst :
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
- ".param .align $align .b8 param$a[$size];",
- [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
-def DeclareScalarParamInst :
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".param .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
-def DeclareScalarRegInst :
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".reg .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
-
-class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
- NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
- !strconcat("mov", asmstr, " \t$dst, $src;"),
- [(set regclass:$dst, (MoveParam regclass:$src))]>;
-
-def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
-def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
-def MoveParamI16 :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.u16.u32 \t$dst, $src;",
- [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
-def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
-def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
-def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
-
-class PseudoUseParamInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$src),
- "// Pseudo use of $src",
- [(PseudoUseParam regclass:$src)]>;
-
-def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
-def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
-def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
-def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
-def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
-
-
-//
-// Load / Store Handling
-//
-multiclass LD<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _areg : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _areg_64 : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _ari : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
- def _ari_64 : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
- def _asi : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
-}
-
-let mayLoad=1, hasSideEffects=0 in {
- defm LD_i8 : LD<Int16Regs>;
- defm LD_i16 : LD<Int16Regs>;
- defm LD_i32 : LD<Int32Regs>;
- defm LD_i64 : LD<Int64Regs>;
- defm LD_f16 : LD<Float16Regs>;
- defm LD_f16x2 : LD<Float16x2Regs>;
- defm LD_f32 : LD<Float32Regs>;
- defm LD_f64 : LD<Float64Regs>;
-}
-
-multiclass ST<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _areg : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _ari : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
- def _ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
- def _asi : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
-}
-
-let mayStore=1, hasSideEffects=0 in {
- defm ST_i8 : ST<Int16Regs>;
- defm ST_i16 : ST<Int16Regs>;
- defm ST_i32 : ST<Int32Regs>;
- defm ST_i64 : ST<Int64Regs>;
- defm ST_f16 : ST<Float16Regs>;
- defm ST_f16x2 : ST<Float16x2Regs>;
- defm ST_f32 : ST<Float32Regs>;
- defm ST_f64 : ST<Float64Regs>;
-}
-
-// The following is used only in and after vector elementizations. Vector
-// elementization happens at the machine instruction level, so the following
-// instructions never appear in the DAG.
-multiclass LD_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_areg : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_areg_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_ari : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v2_ari_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v2_asi : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v4_avar : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_areg : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_areg_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_ari : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
- def _v4_ari_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
- def _v4_asi : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
-}
-let mayLoad=1, hasSideEffects=0 in {
- defm LDV_i8 : LD_VEC<Int16Regs>;
- defm LDV_i16 : LD_VEC<Int16Regs>;
- defm LDV_i32 : LD_VEC<Int32Regs>;
- defm LDV_i64 : LD_VEC<Int64Regs>;
- defm LDV_f16 : LD_VEC<Float16Regs>;
- defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
- defm LDV_f32 : LD_VEC<Float32Regs>;
- defm LDV_f64 : LD_VEC<Float64Regs>;
-}
-
-multiclass ST_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_areg : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_ari : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v2_ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v2_asi : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v4_avar : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_areg : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_ari : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_asi : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
- "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
-}
-
-let mayStore=1, hasSideEffects=0 in {
- defm STV_i8 : ST_VEC<Int16Regs>;
- defm STV_i16 : ST_VEC<Int16Regs>;
- defm STV_i32 : ST_VEC<Int32Regs>;
- defm STV_i64 : ST_VEC<Int64Regs>;
- defm STV_f16 : ST_VEC<Float16Regs>;
- defm STV_f16x2 : ST_VEC<Float16x2Regs>;
- defm STV_f32 : ST_VEC<Float32Regs>;
- defm STV_f64 : ST_VEC<Float64Regs>;
-}
-
-//---- Conversion ----
-
-class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
- NVPTXRegClass regclassOut> :
- NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
- !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
- [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
-
-def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
-def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
-def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
-def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
-def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
-def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
-def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
-def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
-
-// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
-// we cannot specify floating-point literals in isel patterns. Therefore, we
-// use an integer selp to select either 1 or 0 and then cvt to floating-point.
-
-// sint -> f16
-def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
- (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
- (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
- (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
- (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f16
-def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
- (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
- (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
- (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
- (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
-
-// sint -> f32
-def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
- (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
- (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
- (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
- (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f32
-def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
- (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
- (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
- (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
- (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
-
-// sint -> f64
-def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
- (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
- (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
- (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
- (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f64
-def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
- (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
- (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
- (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
- (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
-
-
-// f16 -> sint
-def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
- (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
- (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
- (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
- (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
- (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
- (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
-
-// f16 -> uint
-def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
- (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
- (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
- (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
- (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
- (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
- (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
-
-// f32 -> sint
-def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
- (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
- (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
- (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
- (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
- (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
- (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
-
-// f32 -> uint
-def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
- (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
- (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
- (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
- (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
- (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
- (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
-
-// f64 -> sint
-def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
- (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
- (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
- (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
-
-// f64 -> uint
-def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
- (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
- (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
- (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
-
-// sext i1
-def : Pat<(i16 (sext Int1Regs:$a)),
- (SELP_s16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (sext Int1Regs:$a)),
- (SELP_s32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (sext Int1Regs:$a)),
- (SELP_s64ii -1, 0, Int1Regs:$a)>;
-
-// zext i1
-def : Pat<(i16 (zext Int1Regs:$a)),
- (SELP_u16ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (zext Int1Regs:$a)),
- (SELP_u32ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (zext Int1Regs:$a)),
- (SELP_u64ii 1, 0, Int1Regs:$a)>;
-
-// anyext i1
-def : Pat<(i16 (anyext Int1Regs:$a)),
- (SELP_u16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (anyext Int1Regs:$a)),
- (SELP_u32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (anyext Int1Regs:$a)),
- (SELP_u64ii -1, 0, Int1Regs:$a)>;
-
-// sext i16
-def : Pat<(i32 (sext Int16Regs:$a)),
- (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (sext Int16Regs:$a)),
- (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
-
-// zext i16
-def : Pat<(i32 (zext Int16Regs:$a)),
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (zext Int16Regs:$a)),
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
-
-// anyext i16
-def : Pat<(i32 (anyext Int16Regs:$a)),
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (anyext Int16Regs:$a)),
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
-
-// sext i32
-def : Pat<(i64 (sext Int32Regs:$a)),
- (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
-
-// zext i32
-def : Pat<(i64 (zext Int32Regs:$a)),
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
-
-// anyext i32
-def : Pat<(i64 (anyext Int32Regs:$a)),
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
-
-
-// truncate i64
-def : Pat<(i32 (trunc Int64Regs:$a)),
- (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i16 (trunc Int64Regs:$a)),
- (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int64Regs:$a)),
- (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
-
-// truncate i32
-def : Pat<(i16 (trunc Int32Regs:$a)),
- (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int32Regs:$a)),
- (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
-
-// truncate i16
-def : Pat<(i1 (trunc Int16Regs:$a)),
- (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
-
-// sext_inreg
-def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
-
-
-// Select instructions with 32-bit predicates
-def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
- (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
- (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
- (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
- (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
- (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
- (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-
-
-let hasSideEffects = 0 in {
- // pack a set of smaller int registers to a larger int register
- def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2,
- Int16Regs:$s3, Int16Regs:$s4),
- "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
- def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2),
- "mov.b32 \t$d, {{$s1, $s2}};", []>;
- def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int32Regs:$s1, Int32Regs:$s2),
- "mov.b64 \t$d, {{$s1, $s2}};", []>;
- def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
- (ins Float32Regs:$s1, Float32Regs:$s2),
- "mov.b64 \t$d, {{$s1, $s2}};", []>;
-
- // unpack a larger int register to a set of smaller int registers
- def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
- Int16Regs:$d3, Int16Regs:$d4),
- (ins Int64Regs:$s),
- "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
- def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
- (ins Int32Regs:$s),
- "mov.b32 \t{{$d1, $d2}}, $s;", []>;
- def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
- (ins Int64Regs:$s),
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;
- def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
- (ins Float64Regs:$s),
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;
-
-}
-
-let hasSideEffects = 0 in {
- // Extract element of f16x2 register. PTX does not provide any way
- // to access elements of f16x2 vector directly, so we need to
- // extract it using a temporary register.
- def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16x2Regs:$src),
- "{{ .reg .b16 \t%tmp_hi;\n\t"
- " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
- [(set Float16Regs:$dst,
- (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
- def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16x2Regs:$src),
- "{{ .reg .b16 \t%tmp_lo;\n\t"
- " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
- [(set Float16Regs:$dst,
- (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
-
- // Coalesce two f16 registers into f16x2
- def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- "mov.b32 \t$dst, {{$a, $b}};",
- [(set Float16x2Regs:$dst,
- (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
-
- // Directly initializing underlying the b32 register is one less SASS
- // instruction than than vector-packing move.
- def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
- "mov.b32 \t$dst, $src;",
- []>;
-
- // Split f16x2 into two f16 registers.
- def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
- (ins Float16x2Regs:$src),
- "mov.b32 \t{{$lo, $hi}}, $src;",
- []>;
- // Split an i32 into two f16
- def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
- (ins Int32Regs:$src),
- "mov.b32 \t{{$lo, $hi}}, $src;",
- []>;
-}
-
-// Count leading zeros
-let hasSideEffects = 0 in {
- def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "clz.b32 \t$d, $a;", []>;
- def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "clz.b64 \t$d, $a;", []>;
-}
-
-// 32-bit has a direct PTX instruction
-def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
-
-// The return type of the ctlz ISD node is the same as its input, but the PTX
-// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
-// ptx value to 64 bits to match the ISD node's semantics, unless we know we're
-// truncating back down to 32 bits.
-def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
-
-// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
-// result back to 16-bits if necessary. We also need to subtract 16 because
-// the high-order 16 zeros were counted.
-//
-// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
-// use to save one SASS instruction (on sm_35 anyway):
-//
-// mov.b32 $tmp, {0xffff, $a}
-// ctlz.b32 $result, $tmp
-//
-// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
-// and then ctlz that value. This way we don't have to subtract 16 from the
-// result. Unfortunately today we don't have a way to generate
-// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
-def : Pat<(ctlz Int16Regs:$a),
- (SUBi16ri (CVT_u16_u32
- (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
-def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
- (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
-
-// Population count
-let hasSideEffects = 0 in {
- def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "popc.b32 \t$d, $a;", []>;
- def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "popc.b64 \t$d, $a;", []>;
-}
-
-// 32-bit has a direct PTX instruction
-def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
-
-// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
-// to match the LLVM semantics. Just as with ctlz.i64, we provide a second
-// pattern that avoids the type conversion if we're truncating the result to
-// i32 anyway.
-def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
-
-// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
-// If we know that we're storing into an i32, we can avoid the final trunc.
-def : Pat<(ctpop Int16Regs:$a),
- (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
-def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
- (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
-
-// fpround f32 -> f16
-def : Pat<(f16 (fpround Float32Regs:$a)),
- (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fpround Float32Regs:$a)),
- (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
-
-// fpround f64 -> f16
-def : Pat<(f16 (fpround Float64Regs:$a)),
- (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fpround Float64Regs:$a)),
- (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
-
-// fpround f64 -> f32
-def : Pat<(f32 (fpround Float64Regs:$a)),
- (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpround Float64Regs:$a)),
- (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
-
-// fpextend f16 -> f32
-def : Pat<(f32 (fpextend Float16Regs:$a)),
- (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpextend Float16Regs:$a)),
- (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
-
-// fpextend f16 -> f64
-def : Pat<(f64 (fpextend Float16Regs:$a)),
- (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f64 (fpextend Float16Regs:$a)),
- (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
-
-// fpextend f32 -> f64
-def : Pat<(f64 (fpextend Float32Regs:$a)),
- (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f64 (fpextend Float32Regs:$a)),
- (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
-
-def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
-
-// fceil, ffloor, fround, ftrunc.
-
-def : Pat<(fceil Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fceil Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fceil Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fceil Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fceil Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
-
-def : Pat<(ffloor Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ffloor Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ffloor Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ffloor Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ffloor Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
-
-def : Pat<(fround Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fround Float16Regs:$a)),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fround Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fround Float32Regs:$a)),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(f64 (fround Float64Regs:$a)),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-def : Pat<(ftrunc Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ftrunc Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ftrunc Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ftrunc Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ftrunc Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
-
-// nearbyint and rint are implemented as rounding to nearest even. This isn't
-// strictly correct, because it causes us to ignore the rounding mode. But it
-// matches what CUDA's "libm" does.
-
-def : Pat<(fnearbyint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fnearbyint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fnearbyint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fnearbyint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fnearbyint Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-def : Pat<(frint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(frint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(frint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(frint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(frint Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-
-//-----------------------------------
-// Control-flow
-//-----------------------------------
-
-let isTerminator=1 in {
- let isReturn=1, isBarrier=1 in
- def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
-
- let isBranch=1 in
- def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@$a bra \t$target;",
- [(brcond Int1Regs:$a, bb:$target)]>;
- let isBranch=1 in
- def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@!$a bra \t$target;", []>;
-
- let isBranch=1, isBarrier=1 in
- def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
- "bra.uni \t$target;", [(br bb:$target)]>;
-}
-
-def : Pat<(brcond Int32Regs:$a, bb:$target),
- (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
-
-// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
-// conditional branch if the target block is the next block so that the code
-// can fall through to the target block. The invertion is done by 'xor
-// condition, 1', which will be translated to (setne condition, -1). Since ptx
-// supports '@!pred bra target', we should use it.
-def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
- (CBranchOther Int1Regs:$a, bb:$target)>;
-
-// Call
-def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
-def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
- [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPSideEffect]>;
-
-def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
-def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-def calltarget : Operand<i32>;
-let isCall=1 in {
- def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
-}
-
-def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
-def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
-
-// Pseudo instructions.
-class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
- : NVPTXInst<outs, ins, asmstr, pattern>;
-
-def Callseq_Start :
- NVPTXInst<(outs), (ins i32imm:$amt),
- "\\{ // callseq $amt\n"
- "\t.reg .b32 temp_param_reg;",
- [(callseq_start timm:$amt)]>;
-def Callseq_End :
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\\} // callseq $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
-
-// trap instruction
-def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
-
-// Call prototype wrapper
-def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def CallPrototype :
- SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def ProtoIdent : Operand<i32> {
- let PrintMethod = "printProtoIdent";
-}
-def CALL_PROTOTYPE :
- NVPTXInst<(outs), (ins ProtoIdent:$ident),
- "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
-
-
-include "NVPTXIntrinsics.td"
-
-
-//-----------------------------------
-// Notes
-//-----------------------------------
-// BSWAP is currently expanded. The following is a more efficient
-// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
-// - for sm_20, use pmpt (use vector scalar mov to get the pack and
-// unpack). sm_20 supports native 32-bit register, but not native 16-bit
-// register.
+//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the PTX instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "NVPTXInstrFormats.td"
+
+// A NOP instruction
+let hasSideEffects = 0 in {
+ def NOP : NVPTXInst<(outs), (ins), "", []>;
+}
+
+let OperandType = "OPERAND_IMMEDIATE" in {
+ def f16imm : Operand<f16>;
+}
+
+// List of vector specific properties
+def isVecLD : VecInstTypeEnum<1>;
+def isVecST : VecInstTypeEnum<2>;
+def isVecBuild : VecInstTypeEnum<3>;
+def isVecShuffle : VecInstTypeEnum<4>;
+def isVecExtract : VecInstTypeEnum<5>;
+def isVecInsert : VecInstTypeEnum<6>;
+def isVecDest : VecInstTypeEnum<7>;
+def isVecOther : VecInstTypeEnum<15>;
+
+//===----------------------------------------------------------------------===//
+// NVPTX Operand Definitions.
+//===----------------------------------------------------------------------===//
+
+def brtarget : Operand<OtherVT>;
+
+// CVT conversion modes
+// These must match the enum in NVPTX.h
+def CvtNONE : PatLeaf<(i32 0x0)>;
+def CvtRNI : PatLeaf<(i32 0x1)>;
+def CvtRZI : PatLeaf<(i32 0x2)>;
+def CvtRMI : PatLeaf<(i32 0x3)>;
+def CvtRPI : PatLeaf<(i32 0x4)>;
+def CvtRN : PatLeaf<(i32 0x5)>;
+def CvtRZ : PatLeaf<(i32 0x6)>;
+def CvtRM : PatLeaf<(i32 0x7)>;
+def CvtRP : PatLeaf<(i32 0x8)>;
+
+def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
+def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
+def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
+def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
+def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
+def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
+def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
+def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
+def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
+
+def CvtSAT : PatLeaf<(i32 0x20)>;
+def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
+
+def CvtMode : Operand<i32> {
+ let PrintMethod = "printCvtMode";
+}
+
+// Compare modes
+// These must match the enum in NVPTX.h
+def CmpEQ : PatLeaf<(i32 0)>;
+def CmpNE : PatLeaf<(i32 1)>;
+def CmpLT : PatLeaf<(i32 2)>;
+def CmpLE : PatLeaf<(i32 3)>;
+def CmpGT : PatLeaf<(i32 4)>;
+def CmpGE : PatLeaf<(i32 5)>;
+def CmpEQU : PatLeaf<(i32 10)>;
+def CmpNEU : PatLeaf<(i32 11)>;
+def CmpLTU : PatLeaf<(i32 12)>;
+def CmpLEU : PatLeaf<(i32 13)>;
+def CmpGTU : PatLeaf<(i32 14)>;
+def CmpGEU : PatLeaf<(i32 15)>;
+def CmpNUM : PatLeaf<(i32 16)>;
+def CmpNAN : PatLeaf<(i32 17)>;
+
+def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
+def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
+def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
+def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
+def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
+def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
+def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
+def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
+def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
+def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
+def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
+def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
+def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
+def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
+
+def CmpMode : Operand<i32> {
+ let PrintMethod = "printCmpMode";
+}
+def VecElement : Operand<i32> {
+ let PrintMethod = "printVecElement";
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instruction Predicate Definitions
+//===----------------------------------------------------------------------===//
+
+
+def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
+def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
+def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
+def useAtomRedG32forGen32 :
+ Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
+def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
+def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
+def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
+def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
+def useAtomRedG64forGen64 :
+ Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
+def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
+def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
+def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
+def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
+def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
+def hasVote : Predicate<"Subtarget->hasVote()">;
+def hasDouble : Predicate<"Subtarget->hasDouble()">;
+def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
+def hasLDG : Predicate<"Subtarget->hasLDG()">;
+def hasLDU : Predicate<"Subtarget->hasLDU()">;
+def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
+
+def doF32FTZ : Predicate<"useF32FTZ()">;
+def doNoF32FTZ : Predicate<"!useF32FTZ()">;
+
+def doMulWide : Predicate<"doMulWide">;
+
+def allowFMA : Predicate<"allowFMA()">;
+def noFMA : Predicate<"!allowFMA()">;
+def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
+
+def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
+def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
+
+def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
+def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
+
+def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
+def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
+
+def true : Predicate<"true">;
+
+def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
+
+def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
+
+//===----------------------------------------------------------------------===//
+// Some Common Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+// Template for instructions which take three int64, int32, or int16 args.
+// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
+multiclass I3<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+}
+
+// Template for instructions which take 3 int32 args. The instructions are
+// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
+multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+}
+
+// Template for instructions which take three fp64 or fp32 args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32 functions.
+//
+// This multiclass should be used for nodes that cannot be folded into FMAs.
+// For nodes that can be folded into FMAs (i.e. adds and muls), use
+// F3_fma_component.
+multiclass F3<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+}
+
+// Template for instructions which take three FP args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32/fp16 functions.
+//
+// This multiclass should be used for nodes that can be folded to make fma ops.
+// In this case, we use the ".rn" variant when FMA is disabled, as this behaves
+// just like the non ".rn" op, but prevents ptxas from creating FMAs.
+multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+
+ def f16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ def f16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ // These have strange names so we don't perturb existing mir tests.
+ def _rnf64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+ def _rnf16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+}
+
+// Template for operations which take two f32 or f64 operands. Provides three
+// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
+// subnormal inputs and results to zero).
+multiclass F2<string OpcStr, SDNode OpNode> {
+ def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
+ !strconcat(OpcStr, ".f64 \t$dst, $a;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
+ def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
+ Requires<[doF32FTZ]>;
+ def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instructions.
+//===----------------------------------------------------------------------===//
+
+//-----------------------------------
+// Type Conversion
+//-----------------------------------
+
+let hasSideEffects = 0 in {
+ // Generate a cvt to the given type from all possible types. Each instance
+ // takes a CvtMode immediate that defines the conversion mode to use. It can
+ // be CvtNONE to omit a conversion mode.
+ multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
+ def _s8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s8 \t$dst, $src;"), []>;
+ def _u8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u8 \t$dst, $src;"), []>;
+ def _s16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s16 \t$dst, $src;"), []>;
+ def _u16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u16 \t$dst, $src;"), []>;
+ def _s32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s32 \t$dst, $src;"), []>;
+ def _u32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u32 \t$dst, $src;"), []>;
+ def _s64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s64 \t$dst, $src;"), []>;
+ def _u64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u64 \t$dst, $src;"), []>;
+ def _f16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f16 \t$dst, $src;"), []>;
+ def _f32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f32 \t$dst, $src;"), []>;
+ def _f64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f64 \t$dst, $src;"), []>;
+ }
+
+ // Generate cvts from all types to all types.
+ defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
+ defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
+ defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
+ defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
+ defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
+ defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
+ defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
+ defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
+ defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
+ defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
+ defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
+
+ // These cvts are different from those above: The source and dest registers
+ // are of the same type.
+ def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.s16.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s32 \t$dst, $src;", []>;
+}
+
+//-----------------------------------
+// Integer Arithmetic
+//-----------------------------------
+
+// Template for xor masquerading as int1 arithmetic.
+multiclass ADD_SUB_i1<SDNode OpNode> {
+ def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
+}
+
+// int1 addition and subtraction are both just xor.
+defm ADD_i1 : ADD_SUB_i1<add>;
+defm SUB_i1 : ADD_SUB_i1<sub>;
+
+// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
+// also use these for unsigned arithmetic.
+defm ADD : I3<"add.s", add>;
+defm SUB : I3<"sub.s", sub>;
+
+// int32 addition and subtraction with carry-out.
+// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
+defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
+defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
+
+// int32 addition and subtraction with carry-in and carry-out.
+defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
+defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
+
+defm MULT : I3<"mul.lo.s", mul>;
+
+defm MULTHS : I3<"mul.hi.s", mulhs>;
+defm MULTHU : I3<"mul.hi.u", mulhu>;
+
+defm SDIV : I3<"div.s", sdiv>;
+defm UDIV : I3<"div.u", udiv>;
+
+// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
+// will lower it.
+defm SREM : I3<"rem.s", srem>;
+defm UREM : I3<"rem.u", urem>;
+
+// Integer absolute value. NumBits should be one minus the bit width of RC.
+// This idiom implements the algorithm at
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
+multiclass ABS<RegisterClass RC, string SizeName> {
+ def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
+ !strconcat("abs", SizeName, " \t$dst, $a;"),
+ [(set RC:$dst, (abs RC:$a))]>;
+}
+defm ABS_16 : ABS<Int16Regs, ".s16">;
+defm ABS_32 : ABS<Int32Regs, ".s32">;
+defm ABS_64 : ABS<Int64Regs, ".s64">;
+
+// Integer min/max.
+defm SMAX : I3<"max.s", smax>;
+defm UMAX : I3<"max.u", umax>;
+defm SMIN : I3<"min.s", smin>;
+defm UMIN : I3<"min.u", umin>;
+
+//
+// Wide multiplication
+//
+def MULWIDES64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+
+def MULWIDEU64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+
+def MULWIDES32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+
+def MULWIDEU32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+
+def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
+def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
+def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
+
+// Matchers for signed, unsigned mul.wide ISD nodes.
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
+ (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
+ (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+// Predicates used for converting some patterns to mul.wide.
+def SInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(32);
+}]>;
+
+def UInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(32);
+}]>;
+
+def SInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(16);
+}]>;
+
+def UInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(16);
+}]>;
+
+def Int5Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(32);
+}]>;
+
+def Int4Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(16);
+}]>;
+
+def SHL2MUL32 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(32, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
+}]>;
+
+def SHL2MUL16 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(16, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
+}]>;
+
+// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
+def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+
+// Convert "sign/zero-extend then multiply" to mul.wide.
+def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
+ (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
+ (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
+ (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
+ (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+//
+// Integer multiply-add
+//
+def SDTIMAD :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
+def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
+
+def MAD16rrr :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
+def MAD16rri :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
+def MAD16rir :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
+def MAD16rii :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD32rrr :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
+def MAD32rri :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
+def MAD32rir :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
+def MAD32rii :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD64rrr :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
+def MAD64rri :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
+def MAD64rir :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
+def MAD64rii :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
+
+def INEG16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "neg.s16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
+def INEG32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "neg.s32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
+def INEG64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "neg.s64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
+
+//-----------------------------------
+// Floating Point Arithmetic
+//-----------------------------------
+
+// Constant 1.0f
+def FloatConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
+ N->getValueAPF().convertToFloat() == 1.0f;
+}]>;
+// Constant 1.0 (double)
+def DoubleConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
+ N->getValueAPF().convertToDouble() == 1.0;
+}]>;
+
+// Loads FP16 constant into a register.
+//
+// ptxas does not have hex representation for fp16, so we can't use
+// fp16 immediate values in .f16 instructions. Instead we have to load
+// the constant into a register using mov.b16.
+def LOAD_CONST_F16 :
+ NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
+ "mov.b16 \t$dst, $a;", []>;
+
+defm FADD : F3_fma_component<"add", fadd>;
+defm FSUB : F3_fma_component<"sub", fsub>;
+defm FMUL : F3_fma_component<"mul", fmul>;
+
+defm FMIN : F3<"min", fminnum>;
+defm FMAX : F3<"max", fmaxnum>;
+
+defm FABS : F2<"abs", fabs>;
+defm FNEG : F2<"neg", fneg>;
+defm FSQRT : F2<"sqrt.rn", fsqrt>;
+
+//
+// F64 division
+//
+def FDIV641r :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b),
+ "rcp.rn.f64 \t$dst, $b;",
+ [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
+def FDIV64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
+def FDIV64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
+
+//
+// F32 Approximate reciprocal
+//
+def FDIV321r_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV321r :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Approximate division
+//
+def FDIV32approxrr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxrr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+def FDIV32approxri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Semi-accurate reciprocal
+//
+// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
+//
+def FDIV321r_approx_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV321r_approx :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Semi-accurate division
+//
+def FDIV32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+def FDIV32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Accurate reciprocal
+//
+def FDIV321r_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20, doF32FTZ]>;
+def FDIV321r_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+//
+// F32 Accurate division
+//
+def FDIV32rr_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32ri_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32rr_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+def FDIV32ri_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[reqPTX20]>;
+
+//
+// FMA
+//
+
+multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+ def rir : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rii : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+}
+
+multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[useFP16Math, Pred]>;
+}
+
+defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
+defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
+defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
+defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
+defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
+defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
+defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
+
+// sin/cos
+def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "sin.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "cos.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+
+// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
+// i.e. "poor man's fmod()"
+
+// frem - f32 FTZ
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
+ (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
+ Float32Regs:$y))>,
+ Requires<[doF32FTZ]>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
+ (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
+ fpimm:$y))>,
+ Requires<[doF32FTZ]>;
+
+// frem - f32
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
+ (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
+ Float32Regs:$y))>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
+ (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+// frem - f64
+def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
+ (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
+ Float64Regs:$y))>;
+def : Pat<(frem Float64Regs:$x, fpimm:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
+ (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+//-----------------------------------
+// Bitwise operations
+//-----------------------------------
+
+// Template for three-arg bitwise operations. Takes three args, Creates .b16,
+// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
+multiclass BITWISE<string OpcStr, SDNode OpNode> {
+ def b1rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def b1ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
+ def b16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def b16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def b32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def b32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def b64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def b64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+}
+
+defm OR : BITWISE<"or", or>;
+defm AND : BITWISE<"and", and>;
+defm XOR : BITWISE<"xor", xor>;
+
+def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
+ "not.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
+def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "not.b16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
+def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "not.b32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
+def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "not.b64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
+
+// Template for left/right shifts. Takes three operands,
+// [dest (reg), src (reg), shift (reg or imm)].
+// dest and src may be int64, int32, or int16, but shift is always int32.
+//
+// This template also defines a 32-bit shift (imm, imm) instruction.
+multiclass SHIFT<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
+ def i32ii :
+ NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
+}
+
+defm SHL : SHIFT<"shl.b", shl>;
+defm SRA : SHIFT<"shr.s", sra>;
+defm SRL : SHIFT<"shr.u", srl>;
+
+// Bit-reverse
+def BREV32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
+ "brev.b32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
+def BREV64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
+ "brev.b64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
+
+//
+// Rotate: Use ptx shf instruction if available.
+//
+
+// 32 bit r2 = rotl r1, n
+// =>
+// r2 = shf.l r1, r1, n
+def ROTL32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTL32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32 bit r2 = rotr r1, n
+// =>
+// r2 = shf.r r1, r1, n
+def ROTR32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTR32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
+def ROT32imm_sw :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ "shl.b32 \t%lhs, $src, $amt1;\n\t"
+ "shr.b32 \t%rhs, $src, $amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
+ Requires<[noHWROT32]>;
+def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate left by register.
+def ROTL32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shl.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shr.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate right by register.
+def ROTR32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shr.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shl.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
+def ROT64imm_sw :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ "shl.b64 \t%lhs, $src, $amt1;\n\t"
+ "shr.b64 \t%rhs, $src, $amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_64 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
+def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
+
+// 64-bit software rotate left by register.
+def ROTL64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shl.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shr.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
+
+def ROTR64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shr.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shl.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+
+//
+// Funnnel shift in clamp mode
+//
+
+// Create SDNodes so they can be used in the DAG code, e.g.
+// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
+def SDTIntShiftDOp :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisInt<0>, SDTCisInt<3>]>;
+def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
+def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
+
+def FUNSHFLCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+def FUNSHFRCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+//
+// BFE - bit-field extract
+//
+
+// Template for BFE instructions. Takes four args,
+// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
+// Start may be an imm only if end is also an imm. FIXME: Is this a
+// restriction in PTX?
+//
+// dest and src may be int32 or int64, but start and end are always int32.
+multiclass BFE<string TyStr, RegisterClass RC> {
+ def rrr
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rri
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rii
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, i32imm:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+}
+
+let hasSideEffects = 0 in {
+ defm BFE_S32 : BFE<"s32", Int32Regs>;
+ defm BFE_U32 : BFE<"u32", Int32Regs>;
+ defm BFE_S64 : BFE<"s64", Int64Regs>;
+ defm BFE_U64 : BFE<"u64", Int64Regs>;
+}
+
+//-----------------------------------
+// Comparison instructions (setp, set)
+//-----------------------------------
+
+// FIXME: This doesn't cover versions of set and setp that combine with a
+// boolean predicate, e.g. setp.eq.and.b16.
+
+let hasSideEffects = 0 in {
+ multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ir :
+ NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
+defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
+defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
+defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
+defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
+defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
+defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
+defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
+defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
+defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
+defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
+def SETP_f16rr :
+ NVPTXInst<(outs Int1Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
+ []>, Requires<[useFP16Math]>;
+
+def SETP_f16x2rr :
+ NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
+ []>,
+ Requires<[useFP16Math]>;
+
+
+// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
+// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
+// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
+
+let hasSideEffects = 0 in {
+ multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
+defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
+defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
+defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
+defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
+defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
+defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
+defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
+defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
+defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
+defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
+defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
+
+//-----------------------------------
+// Selection instructions (selp)
+//-----------------------------------
+
+// FIXME: Missing slct
+
+// selp instructions that don't have any pattern matches; we explicitly use
+// them within this file.
+let hasSideEffects = 0 in {
+ multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ir : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ii : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ }
+
+ multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
+ SDNode ImmNode> {
+ def rr :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
+ def ri :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
+ def ir :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
+ def ii :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+ }
+}
+
+// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
+// good.
+defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
+defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
+defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
+defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
+defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
+defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
+defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
+defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
+defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
+defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
+defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
+defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
+
+def SELP_f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Float16x2Regs:$dst,
+ (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
+
+//-----------------------------------
+// Data Movement (Load / Store, Move)
+//-----------------------------------
+
+def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
+ [SDNPWantRoot]>;
+def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
+ [SDNPWantRoot]>;
+
+def MEMri : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int32Regs, i32imm);
+}
+def MEMri64 : Operand<i64> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int64Regs, i64imm);
+}
+
+def imem : Operand<iPTR> {
+ let PrintMethod = "printOperand";
+}
+
+def imemAny : Operand<iPTRAny> {
+ let PrintMethod = "printOperand";
+}
+
+def LdStCode : Operand<i32> {
+ let PrintMethod = "printLdStCode";
+}
+
+def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
+
+// Load a memory address into a u32 or u64 register.
+def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
+ "mov.u32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
+ "mov.u64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+
+// Get pointer to local stack.
+let hasSideEffects = 0 in {
+ def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
+ "mov.u32 \t$d, __local_depot$num;", []>;
+ def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
+ "mov.u64 \t$d, __local_depot$num;", []>;
+}
+
+
+// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
+let IsSimpleMove=1, hasSideEffects=0 in {
+ def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
+ "mov.pred \t$dst, $sss;", []>;
+ def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
+ "mov.u16 \t$dst, $sss;", []>;
+ def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
+ "mov.u32 \t$dst, $sss;", []>;
+ def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
+ "mov.u64 \t$dst, $sss;", []>;
+
+ def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
+ // We have to use .b16 here as there's no mov.f16.
+ "mov.b16 \t$dst, $src;", []>;
+ def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "mov.f32 \t$dst, $src;", []>;
+ def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
+ "mov.f64 \t$dst, $src;", []>;
+}
+
+def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
+ "mov.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, imm:$src)]>;
+def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
+ "mov.u16 \t$dst, $src;",
+ [(set Int16Regs:$dst, imm:$src)]>;
+def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
+ "mov.u32 \t$dst, $src;",
+ [(set Int32Regs:$dst, imm:$src)]>;
+def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
+ "mov.u64 \t$dst, $src;",
+ [(set Int64Regs:$dst, imm:$src)]>;
+
+def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
+ "mov.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, fpimm:$src)]>;
+def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
+ "mov.f64 \t$dst, $src;",
+ [(set Float64Regs:$dst, fpimm:$src)]>;
+
+def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
+
+//---- Copy Frame Index ----
+def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
+ "add.u32 \t$dst, ${addr:add};",
+ [(set Int32Regs:$dst, ADDRri:$addr)]>;
+def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
+ "add.u64 \t$dst, ${addr:add};",
+ [(set Int64Regs:$dst, ADDRri64:$addr)]>;
+
+//-----------------------------------
+// Comparison and Selection
+//-----------------------------------
+
+multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
+ Instruction setp_16rr,
+ Instruction setp_16ri,
+ Instruction setp_16ir,
+ Instruction setp_32rr,
+ Instruction setp_32ri,
+ Instruction setp_32ir,
+ Instruction setp_64rr,
+ Instruction setp_64ri,
+ Instruction setp_64ir,
+ Instruction set_16rr,
+ Instruction set_16ri,
+ Instruction set_16ir,
+ Instruction set_32rr,
+ Instruction set_32ri,
+ Instruction set_32ir,
+ Instruction set_64rr,
+ Instruction set_64ri,
+ Instruction set_64ir> {
+ // i16 -> pred
+ def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
+ (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
+ (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> pred
+ def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
+ (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
+ (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> pred
+ def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
+ (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
+ (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
+
+ // i16 -> i32
+ def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
+ (set_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
+ (set_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> i32
+ def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
+ (set_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
+ (set_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> i32
+ def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
+ (set_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
+ (set_64ir imm:$a, Int64Regs:$b, Mode)>;
+}
+
+multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_s16rr, SETP_s16ri, SETP_s16ir,
+ SETP_s32rr, SETP_s32ri, SETP_s32ir,
+ SETP_s64rr, SETP_s64ri, SETP_s64ir,
+ SET_s16rr, SET_s16ri, SET_s16ir,
+ SET_s32rr, SET_s32ri, SET_s32ir,
+ SET_s64rr, SET_s64ri, SET_s64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_u16rr, SETP_u16ri, SETP_u16ir,
+ SETP_u32rr, SETP_u32ri, SETP_u32ir,
+ SETP_u64rr, SETP_u64ri, SETP_u64ir,
+ SET_u16rr, SET_u16ri, SET_u16ir,
+ SET_u32rr, SET_u32ri, SET_u32ir,
+ SET_u64rr, SET_u64ri, SET_u64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
+defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
+defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
+defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
+defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
+defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
+defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
+defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
+defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
+defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
+defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
+defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
+
+// i1 compares
+def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+
+def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+// i1 compare -> i32
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+
+
+multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
+ // f16 -> pred
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> pred
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> pred
+ def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+
+ // f16 -> i32
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> i32
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> i32
+ def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+}
+
+defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
+defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
+defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
+defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
+defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
+defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
+
+defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
+defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
+defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
+defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
+defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
+defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
+
+defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
+defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
+defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
+defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
+defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
+defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
+
+defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
+defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
+
+// FIXME: What is this doing here? Can it be deleted?
+// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
+// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+def SDTDeclareParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTDeclareScalarParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
+def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
+def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
+def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
+def SDTCallValProfile : SDTypeProfile<1, 0, []>;
+def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
+def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
+def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
+def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
+
+def DeclareParam :
+ SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareScalarParam :
+ SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRetParam :
+ SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRet :
+ SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LoadParam :
+ SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV2 :
+ SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV4 :
+ SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def PrintCall :
+ SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCall :
+ SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintCallUni :
+ SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCallUni :
+ SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParam :
+ SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV2 :
+ SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV4 :
+ SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamU32 :
+ SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamS32 :
+ SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgBegin :
+ SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArg :
+ SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LastCallArg :
+ SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgEnd :
+ SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVoid :
+ SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def Prototype :
+ SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVal :
+ SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def MoveParam :
+ SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
+def StoreRetval :
+ SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV2 :
+ SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV4 :
+ SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def PseudoUseParam :
+ SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def RETURNNode :
+ SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+let mayLoad = 1 in {
+ class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
+ []>;
+
+ class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
+ !strconcat("ld.param.v2", opstr,
+ " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
+
+ class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins i32imm:$b),
+ !strconcat("ld.param.v4", opstr,
+ " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
+ []>;
+}
+
+class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("mov", opstr, " \t$dst, retval$b;"),
+ [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
+
+let mayStore = 1 in {
+ class StoreParamInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
+ []>;
+
+ class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
+ i32imm:$a, i32imm:$b),
+ !strconcat("st.param.v2", opstr,
+ " \t[param$a+$b], {{$val, $val2}};"),
+ []>;
+
+ class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a,
+ i32imm:$b),
+ !strconcat("st.param.v4", opstr,
+ " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
+ []>;
+
+ class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
+ !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
+ []>;
+
+ class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
+ !strconcat("st.param.v2", opstr,
+ " \t[func_retval0+$a], {{$val, $val2}};"),
+ []>;
+
+ class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a),
+ !strconcat("st.param.v4", opstr,
+ " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
+ []>;
+}
+
+let isCall=1 in {
+ multiclass CALL<string OpcStr, SDNode OpNode> {
+ def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
+ def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
+ def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
+ def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
+ def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
+ [(OpNode (i32 4))]>;
+ def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
+ [(OpNode (i32 5))]>;
+ def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5), "),
+ [(OpNode (i32 6))]>;
+ def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6), "),
+ [(OpNode (i32 7))]>;
+ def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6, retval7), "),
+ [(OpNode (i32 8))]>;
+ }
+}
+
+defm Call : CALL<"call", PrintCall>;
+defm CallUni : CALL<"call.uni", PrintCallUni>;
+
+// Convergent call instructions. These are identical to regular calls, except
+// they have the isConvergent bit set.
+let isConvergent=1 in {
+ defm ConvergentCall : CALL<"call", PrintConvergentCall>;
+ defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
+}
+
+def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
+def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
+def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
+def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
+def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
+def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
+def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
+def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
+def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
+def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
+def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
+def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
+def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
+def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
+def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
+def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
+def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
+def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
+def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
+def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
+
+def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
+def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
+
+def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
+def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
+def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
+def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
+def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
+def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
+
+def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
+def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
+def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
+
+def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
+def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
+def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
+def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
+def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
+def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
+def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
+def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
+def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
+def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
+def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
+
+def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
+def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
+def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
+def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
+def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
+def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
+def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
+def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
+def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
+def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
+def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
+
+def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
+def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
+def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
+def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
+def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
+def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
+def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
+def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
+def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
+def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
+def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
+
+def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
+def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
+def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
+def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
+
+class CallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a, ",
+ [(CallArg (i32 0), regclass:$a)]>;
+
+class LastCallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a",
+ [(LastCallArg (i32 0), regclass:$a)]>;
+
+def CallArgI64 : CallArgInst<Int64Regs>;
+def CallArgI32 : CallArgInst<Int32Regs>;
+def CallArgI16 : CallArgInst<Int16Regs>;
+def CallArgF64 : CallArgInst<Float64Regs>;
+def CallArgF32 : CallArgInst<Float32Regs>;
+
+def LastCallArgI64 : LastCallArgInst<Int64Regs>;
+def LastCallArgI32 : LastCallArgInst<Int32Regs>;
+def LastCallArgI16 : LastCallArgInst<Int16Regs>;
+def LastCallArgF64 : LastCallArgInst<Float64Regs>;
+def LastCallArgF32 : LastCallArgInst<Float32Regs>;
+
+def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
+ [(CallArg (i32 0), (i32 imm:$a))]>;
+def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
+ [(LastCallArg (i32 0), (i32 imm:$a))]>;
+
+def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
+ [(CallArg (i32 1), (i32 imm:$a))]>;
+def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
+ [(LastCallArg (i32 1), (i32 imm:$a))]>;
+
+def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
+ [(CallVoid (Wrapper tglobaladdr:$addr))]>;
+def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
+ [(CallVoid Int32Regs:$addr)]>;
+def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
+ [(CallVoid Int64Regs:$addr)]>;
+def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
+ [(Prototype (i32 imm:$val))]>;
+
+def DeclareRetMemInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
+ ".param .align $align .b8 retval$num[$size];",
+ [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetScalarInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".param .b$size retval$num;",
+ [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetRegInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".reg .b$size retval$num;",
+ [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
+
+def DeclareParamInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
+ ".param .align $align .b8 param$a[$size];",
+ [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
+def DeclareScalarParamInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".param .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
+def DeclareScalarRegInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".reg .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
+
+class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
+ NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
+ !strconcat("mov", asmstr, " \t$dst, $src;"),
+ [(set regclass:$dst, (MoveParam regclass:$src))]>;
+
+def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
+def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
+def MoveParamI16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.u16.u32 \t$dst, $src;",
+ [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
+def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
+def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
+def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
+
+class PseudoUseParamInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$src),
+ "// Pseudo use of $src",
+ [(PseudoUseParam regclass:$src)]>;
+
+def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
+def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
+def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
+def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
+def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
+
+
+//
+// Load / Store Handling
+//
+multiclass LD<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _ari : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _ari_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _asi : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+}
+
+let mayLoad=1, hasSideEffects=0 in {
+ defm LD_i8 : LD<Int16Regs>;
+ defm LD_i16 : LD<Int16Regs>;
+ defm LD_i32 : LD<Int32Regs>;
+ defm LD_i64 : LD<Int64Regs>;
+ defm LD_f16 : LD<Float16Regs>;
+ defm LD_f16x2 : LD<Float16x2Regs>;
+ defm LD_f32 : LD<Float32Regs>;
+ defm LD_f64 : LD<Float64Regs>;
+}
+
+multiclass ST<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm ST_i8 : ST<Int16Regs>;
+ defm ST_i16 : ST<Int16Regs>;
+ defm ST_i32 : ST<Int32Regs>;
+ defm ST_i64 : ST<Int64Regs>;
+ defm ST_f16 : ST<Float16Regs>;
+ defm ST_f16x2 : ST<Float16x2Regs>;
+ defm ST_f32 : ST<Float32Regs>;
+ defm ST_f64 : ST<Float64Regs>;
+}
+
+// The following is used only in and after vector elementizations. Vector
+// elementization happens at the machine instruction level, so the following
+// instructions never appear in the DAG.
+multiclass LD_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v4_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+}
+let mayLoad=1, hasSideEffects=0 in {
+ defm LDV_i8 : LD_VEC<Int16Regs>;
+ defm LDV_i16 : LD_VEC<Int16Regs>;
+ defm LDV_i32 : LD_VEC<Int32Regs>;
+ defm LDV_i64 : LD_VEC<Int64Regs>;
+ defm LDV_f16 : LD_VEC<Float16Regs>;
+ defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
+ defm LDV_f32 : LD_VEC<Float32Regs>;
+ defm LDV_f64 : LD_VEC<Float64Regs>;
+}
+
+multiclass ST_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v4_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm STV_i8 : ST_VEC<Int16Regs>;
+ defm STV_i16 : ST_VEC<Int16Regs>;
+ defm STV_i32 : ST_VEC<Int32Regs>;
+ defm STV_i64 : ST_VEC<Int64Regs>;
+ defm STV_f16 : ST_VEC<Float16Regs>;
+ defm STV_f16x2 : ST_VEC<Float16x2Regs>;
+ defm STV_f32 : ST_VEC<Float32Regs>;
+ defm STV_f64 : ST_VEC<Float64Regs>;
+}
+
+//---- Conversion ----
+
+class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
+ NVPTXRegClass regclassOut> :
+ NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
+ !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
+ [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
+
+def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
+def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
+def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
+def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
+def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
+def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
+def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
+def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
+
+// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
+// we cannot specify floating-point literals in isel patterns. Therefore, we
+// use an integer selp to select either 1 or 0 and then cvt to floating-point.
+
+// sint -> f16
+def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
+ (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
+ (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
+ (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
+ (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f16
+def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
+ (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
+ (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
+ (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
+ (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f32
+def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
+ (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
+ (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
+ (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
+ (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f32
+def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
+ (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
+ (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
+ (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
+ (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f64
+def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
+ (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
+ (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
+ (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
+ (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f64
+def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
+ (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
+ (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
+ (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
+ (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
+
+
+// f16 -> sint
+def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f16 -> uint
+def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f32 -> sint
+def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f32 -> uint
+def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f64 -> sint
+def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
+ (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
+ (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
+ (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
+
+// f64 -> uint
+def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
+ (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
+ (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
+ (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
+
+// sext i1
+def : Pat<(i16 (sext Int1Regs:$a)),
+ (SELP_s16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (sext Int1Regs:$a)),
+ (SELP_s32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (sext Int1Regs:$a)),
+ (SELP_s64ii -1, 0, Int1Regs:$a)>;
+
+// zext i1
+def : Pat<(i16 (zext Int1Regs:$a)),
+ (SELP_u16ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (zext Int1Regs:$a)),
+ (SELP_u32ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (zext Int1Regs:$a)),
+ (SELP_u64ii 1, 0, Int1Regs:$a)>;
+
+// anyext i1
+def : Pat<(i16 (anyext Int1Regs:$a)),
+ (SELP_u16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (anyext Int1Regs:$a)),
+ (SELP_u32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (anyext Int1Regs:$a)),
+ (SELP_u64ii -1, 0, Int1Regs:$a)>;
+
+// sext i16
+def : Pat<(i32 (sext Int16Regs:$a)),
+ (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (sext Int16Regs:$a)),
+ (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
+
+// zext i16
+def : Pat<(i32 (zext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (zext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// anyext i16
+def : Pat<(i32 (anyext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (anyext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// sext i32
+def : Pat<(i64 (sext Int32Regs:$a)),
+ (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
+
+// zext i32
+def : Pat<(i64 (zext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+// anyext i32
+def : Pat<(i64 (anyext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+
+// truncate i64
+def : Pat<(i32 (trunc Int64Regs:$a)),
+ (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i16 (trunc Int64Regs:$a)),
+ (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int64Regs:$a)),
+ (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i32
+def : Pat<(i16 (trunc Int32Regs:$a)),
+ (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int32Regs:$a)),
+ (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i16
+def : Pat<(i1 (trunc Int16Regs:$a)),
+ (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
+
+// sext_inreg
+def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
+
+
+// Select instructions with 32-bit predicates
+def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
+ (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
+ (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
+ (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
+ (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
+ (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
+ (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+
+
+let hasSideEffects = 0 in {
+ // pack a set of smaller int registers to a larger int register
+ def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2,
+ Int16Regs:$s3, Int16Regs:$s4),
+ "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
+ def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2),
+ "mov.b32 \t$d, {{$s1, $s2}};", []>;
+ def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int32Regs:$s1, Int32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+ def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
+ (ins Float32Regs:$s1, Float32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+
+ // unpack a larger int register to a set of smaller int registers
+ def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
+ Int16Regs:$d3, Int16Regs:$d4),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
+ def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
+ (ins Int32Regs:$s),
+ "mov.b32 \t{{$d1, $d2}}, $s;", []>;
+ def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+ def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
+ (ins Float64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+
+}
+
+let hasSideEffects = 0 in {
+ // Extract element of f16x2 register. PTX does not provide any way
+ // to access elements of f16x2 vector directly, so we need to
+ // extract it using a temporary register.
+ def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_hi;\n\t"
+ " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
+ def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_lo;\n\t"
+ " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
+
+ // Coalesce two f16 registers into f16x2
+ def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ "mov.b32 \t$dst, {{$a, $b}};",
+ [(set Float16x2Regs:$dst,
+ (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
+
+ // Directly initializing underlying the b32 register is one less SASS
+ // instruction than than vector-packing move.
+ def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
+ "mov.b32 \t$dst, $src;",
+ []>;
+
+ // Split f16x2 into two f16 registers.
+ def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Float16x2Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+ // Split an i32 into two f16
+ def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Int32Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+}
+
+// Count leading zeros
+let hasSideEffects = 0 in {
+ def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "clz.b32 \t$d, $a;", []>;
+ def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "clz.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
+
+// The return type of the ctlz ISD node is the same as its input, but the PTX
+// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
+// ptx value to 64 bits to match the ISD node's semantics, unless we know we're
+// truncating back down to 32 bits.
+def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
+
+// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
+// result back to 16-bits if necessary. We also need to subtract 16 because
+// the high-order 16 zeros were counted.
+//
+// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
+// use to save one SASS instruction (on sm_35 anyway):
+//
+// mov.b32 $tmp, {0xffff, $a}
+// ctlz.b32 $result, $tmp
+//
+// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
+// and then ctlz that value. This way we don't have to subtract 16 from the
+// result. Unfortunately today we don't have a way to generate
+// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
+def : Pat<(ctlz Int16Regs:$a),
+ (SUBi16ri (CVT_u16_u32
+ (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
+def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
+ (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
+
+// Population count
+let hasSideEffects = 0 in {
+ def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "popc.b32 \t$d, $a;", []>;
+ def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "popc.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
+
+// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
+// to match the LLVM semantics. Just as with ctlz.i64, we provide a second
+// pattern that avoids the type conversion if we're truncating the result to
+// i32 anyway.
+def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
+
+// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
+// If we know that we're storing into an i32, we can avoid the final trunc.
+def : Pat<(ctpop Int16Regs:$a),
+ (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
+def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
+ (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
+
+// fpround f32 -> f16
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
+
+// fpround f64 -> f16
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
+
+// fpround f64 -> f32
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
+
+// fpextend f16 -> f32
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f16 -> f64
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f32 -> f64
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
+
+def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+// fceil, ffloor, fround, ftrunc.
+
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
+
+def : Pat<(fround Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fround Float16Regs:$a)),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fround Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fround Float32Regs:$a)),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(f64 (fround Float64Regs:$a)),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
+
+// nearbyint and rint are implemented as rounding to nearest even. This isn't
+// strictly correct, because it causes us to ignore the rounding mode. But it
+// matches what CUDA's "libm" does.
+
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+
+//-----------------------------------
+// Control-flow
+//-----------------------------------
+
+let isTerminator=1 in {
+ let isReturn=1, isBarrier=1 in
+ def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
+
+ let isBranch=1 in
+ def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@$a bra \t$target;",
+ [(brcond Int1Regs:$a, bb:$target)]>;
+ let isBranch=1 in
+ def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@!$a bra \t$target;", []>;
+
+ let isBranch=1, isBarrier=1 in
+ def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
+ "bra.uni \t$target;", [(br bb:$target)]>;
+}
+
+def : Pat<(brcond Int32Regs:$a, bb:$target),
+ (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
+
+// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
+// conditional branch if the target block is the next block so that the code
+// can fall through to the target block. The invertion is done by 'xor
+// condition, 1', which will be translated to (setne condition, -1). Since ptx
+// supports '@!pred bra target', we should use it.
+def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
+ (CBranchOther Int1Regs:$a, bb:$target)>;
+
+// Call
+def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
+def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPSideEffect]>;
+
+def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def calltarget : Operand<i32>;
+let isCall=1 in {
+ def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
+}
+
+def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
+def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
+
+// Pseudo instructions.
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : NVPTXInst<outs, ins, asmstr, pattern>;
+
+def Callseq_Start :
+ NVPTXInst<(outs), (ins i32imm:$amt),
+ "\\{ // callseq $amt\n"
+ "\t.reg .b32 temp_param_reg;",
+ [(callseq_start timm:$amt)]>;
+def Callseq_End :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\} // callseq $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
+// trap instruction
+def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
+
+// Call prototype wrapper
+def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def CallPrototype :
+ SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def ProtoIdent : Operand<i32> {
+ let PrintMethod = "printProtoIdent";
+}
+def CALL_PROTOTYPE :
+ NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
+
+
+include "NVPTXIntrinsics.td"
+
+
+//-----------------------------------
+// Notes
+//-----------------------------------
+// BSWAP is currently expanded. The following is a more efficient
+// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
+// - for sm_20, use pmpt (use vector scalar mov to get the pack and
+// unpack). sm_20 supports native 32-bit register, but not native 16-bit
+// register.
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index 2a402deccbca..40bfe3a449f7 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -1459,8 +1459,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
}
if (FI->usesPICBase())
- BuildMI(MBB, MBBI, dl, LoadInst)
- .addReg(PPC::R30)
+ BuildMI(MBB, MBBI, dl, LoadInst, PPC::R30)
.addImm(PBPOffset)
.addReg(RBReg);
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 483e9b171d57..685f24cb502e 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12031,7 +12031,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case PPCISD::LBRX: {
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index c44e371856a5..acb34d5baaa8 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1881,7 +1881,7 @@ void SparcTargetLowering::computeKnownBitsForTargetNode
const SelectionDAG &DAG,
unsigned Depth) const {
KnownBits Known2;
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index fee008b9572a..a30bf34857b5 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -850,12 +850,18 @@ void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
- // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
+ // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
+ // super register in case one of the subregs is undefined.
+ // This handles ADDR128 too.
if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
+ MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
+ .addReg(SrcReg, RegState::Implicit);
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
+ MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
+ .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
return;
}
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index c1cfc82b4a81..32ab475f1186 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -776,11 +776,6 @@ private:
bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
- /// MS-compatibility:
- /// Obtain an appropriate size qualifier, when facing its absence,
- /// upon AVX512 vector/broadcast memory operand
- unsigned AdjustAVX512Mem(unsigned Size, X86Operand* UnsizedMemOpNext);
-
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[X86::Mode64Bit];
@@ -1206,27 +1201,16 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
Identifier, Info.OpDecl);
}
+
// We either have a direct symbol reference, or an offset from a symbol. The
// parser always puts the symbol on the LHS, so look there for size
// calculation purposes.
+ unsigned FrontendSize = 0;
const MCBinaryExpr *BinOp = dyn_cast<MCBinaryExpr>(Disp);
bool IsSymRef =
isa<MCSymbolRefExpr>(BinOp ? BinOp->getLHS() : Disp);
- if (IsSymRef) {
- if (!Size) {
- Size = Info.Type * 8; // Size is in terms of bits in this context.
- if (Size)
- InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
- /*Len=*/0, Size);
- if (AllowBetterSizeMatch)
- // Handle cases where size qualifier is absent, upon an indirect symbol
- // reference - e.g. "vaddps zmm1, zmm2, [var]"
- // set Size to zero to allow matching mechansim to try and find a better
- // size qualifier than our initial guess, based on available variants of
- // the given instruction
- Size = 0;
- }
- }
+ if (IsSymRef && !Size && Info.Type)
+ FrontendSize = Info.Type * 8; // Size is in terms of bits in this context.
// When parsing inline assembly we set the base register to a non-zero value
// if we don't know the actual value at this time. This is necessary to
@@ -1234,7 +1218,7 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
BaseReg = BaseReg ? BaseReg : 1;
return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
IndexReg, Scale, Start, End, Size, Identifier,
- Info.OpDecl);
+ Info.OpDecl, FrontendSize);
}
static void
@@ -2884,23 +2868,6 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
return true;
}
-unsigned X86AsmParser::AdjustAVX512Mem(unsigned Size,
- X86Operand* UnsizedMemOpNext) {
- // Check for the existence of an AVX512 platform
- if (!getSTI().getFeatureBits()[X86::FeatureAVX512])
- return 0;
- // Allow adjusting upon a (x|y|z)mm
- if (Size == 512 || Size == 256 || Size == 128)
- return Size;
- // This is an allegadly broadcasting mem op adjustment,
- // allow some more inquiring to validate it
- if (Size == 64 || Size == 32)
- return UnsizedMemOpNext && UnsizedMemOpNext->isToken() &&
- UnsizedMemOpNext->getToken().substr(0, 4).equals("{1to") ? Size : 0;
- // Do not allow any other type of adjustments
- return 0;
-}
-
bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
@@ -2920,19 +2887,14 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// Find one unsized memory operand, if present.
X86Operand *UnsizedMemOp = nullptr;
- // If unsized memory operand was found - obtain following operand.
- // For use in AdjustAVX512Mem
- X86Operand *UnsizedMemOpNext = nullptr;
for (const auto &Op : Operands) {
X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
- if (UnsizedMemOp) {
- UnsizedMemOpNext = X86Op;
+ if (X86Op->isMemUnsized()) {
+ UnsizedMemOp = X86Op;
// Have we found an unqualified memory operand,
// break. IA allows only one memory operand.
break;
}
- if (X86Op->isMemUnsized())
- UnsizedMemOp = X86Op;
}
// Allow some instructions to have implicitly pointer-sized operands. This is
@@ -2978,7 +2940,6 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// If an unsized memory operand is present, try to match with each memory
// operand size. In Intel assembly, the size is not part of the instruction
// mnemonic.
- unsigned MatchedSize = 0;
if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
for (unsigned Size : MopSizes) {
@@ -2993,17 +2954,10 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
- if (M == Match_Success)
- // MS-compatability:
- // Adjust AVX512 vector/broadcast memory operand,
- // when facing the absence of a size qualifier.
- // Match GCC behavior on respective cases.
- MatchedSize = AdjustAVX512Mem(Size, UnsizedMemOpNext);
}
// Restore the size of the unsized memory operand if we modified it.
- if (UnsizedMemOp)
- UnsizedMemOp->Mem.Size = 0;
+ UnsizedMemOp->Mem.Size = 0;
}
// If we haven't matched anything yet, this is not a basic integer or FPU
@@ -3027,20 +2981,30 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
Op.getLocRange(), MatchingInlineAsm);
}
+ unsigned NumSuccessfulMatches =
+ std::count(std::begin(Match), std::end(Match), Match_Success);
+
+ // If matching was ambiguous and we had size information from the frontend,
+ // try again with that. This handles cases like "movxz eax, m8/m16".
+ if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
+ UnsizedMemOp->getMemFrontendSize()) {
+ UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
+ unsigned M = MatchInstruction(
+ Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax());
+ if (M == Match_Success)
+ NumSuccessfulMatches = 1;
+
+ // Add a rewrite that encodes the size information we used from the
+ // frontend.
+ InstInfo->AsmRewrites->emplace_back(
+ AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
+ /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
+ }
+
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
// matches won't have modified it).
- unsigned NumSuccessfulMatches =
- std::count(std::begin(Match), std::end(Match), Match_Success);
if (NumSuccessfulMatches == 1) {
- if (MatchedSize && isParsingInlineAsm() && isParsingIntelSyntax())
- // MS compatibility -
- // Fix the rewrite according to the matched memory size
- // MS inline assembly only
- for (AsmRewrite &AR : *InstInfo->AsmRewrites)
- if ((AR.Loc.getPointer() == UnsizedMemOp->StartLoc.getPointer()) &&
- (AR.Kind == AOK_SizeDirective))
- AR.Val = MatchedSize;
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the individual
// transformations can chain off each other.
@@ -3057,7 +3021,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
"multiple matches only possible with unsized memory operands");
return Error(UnsizedMemOp->getStartLoc(),
"ambiguous operand size for instruction '" + Mnemonic + "\'",
- UnsizedMemOp->getLocRange(), MatchingInlineAsm);
+ UnsizedMemOp->getLocRange());
}
// If one instruction matched with a missing feature, report this as a
diff --git a/lib/Target/X86/AsmParser/X86Operand.h b/lib/Target/X86/AsmParser/X86Operand.h
index 9f1fa6c65907..33eff14b8215 100644
--- a/lib/Target/X86/AsmParser/X86Operand.h
+++ b/lib/Target/X86/AsmParser/X86Operand.h
@@ -62,6 +62,10 @@ struct X86Operand : public MCParsedAsmOperand {
unsigned Scale;
unsigned Size;
unsigned ModeSize;
+
+ /// If the memory operand is unsized and there are multiple instruction
+ /// matches, prefer the one with this size.
+ unsigned FrontendSize;
};
union {
@@ -136,6 +140,10 @@ struct X86Operand : public MCParsedAsmOperand {
assert(Kind == Memory && "Invalid access!");
return Mem.ModeSize;
}
+ unsigned getMemFrontendSize() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.FrontendSize;
+ }
bool isToken() const override {return Kind == Token; }
@@ -512,7 +520,7 @@ struct X86Operand : public MCParsedAsmOperand {
static std::unique_ptr<X86Operand>
CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = nullptr) {
+ void *OpDecl = nullptr, unsigned FrontendSize = 0) {
auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
@@ -521,6 +529,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.Scale = 1;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
+ Res->Mem.FrontendSize = FrontendSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
@@ -532,7 +541,7 @@ struct X86Operand : public MCParsedAsmOperand {
CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = nullptr) {
+ void *OpDecl = nullptr, unsigned FrontendSize = 0) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
@@ -548,6 +557,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.Scale = Scale;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
+ Res->Mem.FrontendSize = FrontendSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 44bc373b0394..d7c3b74d3efb 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -91,6 +91,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
X86MCInstLower &MCIL);
void LowerPATCHABLE_RET(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index a94045cd536d..331e56976db7 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -2990,6 +2990,10 @@ unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF)
void X86FrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
+ // Mark the function as not having WinCFI. We will set it back to true in
+ // emitPrologue if it gets called and emits CFI.
+ MF.setHasWinCFI(false);
+
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
const Function *Fn = MF.getFunction();
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 83542aaa013b..9ee2234595f9 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1224,10 +1224,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i1, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i1, Custom);
- setOperationAction(ISD::VSELECT, MVT::v8i1, Expand);
- setOperationAction(ISD::VSELECT, MVT::v16i1, Expand);
+
if (Subtarget.hasDQI()) {
for (auto VT : { MVT::v2i64, MVT::v4i64, MVT::v8i64 }) {
setOperationAction(ISD::SINT_TO_FP, VT, Legal);
@@ -1243,8 +1240,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
if (Subtarget.hasVLX()) {
- setOperationAction(ISD::ABS, MVT::v4i64, Legal);
- setOperationAction(ISD::ABS, MVT::v2i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
@@ -1270,8 +1265,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
}
- setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
- setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
@@ -1304,33 +1297,34 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
- setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
- setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
-
setOperationAction(ISD::MUL, MVT::v8i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
- setOperationAction(ISD::SELECT, MVT::v16i1, Custom);
- setOperationAction(ISD::SELECT, MVT::v8i1, Custom);
-
- setOperationAction(ISD::ADD, MVT::v8i1, Custom);
- setOperationAction(ISD::ADD, MVT::v16i1, Custom);
- setOperationAction(ISD::SUB, MVT::v8i1, Custom);
- setOperationAction(ISD::SUB, MVT::v16i1, Custom);
- setOperationAction(ISD::MUL, MVT::v8i1, Custom);
- setOperationAction(ISD::MUL, MVT::v16i1, Custom);
setOperationAction(ISD::MUL, MVT::v16i32, Legal);
+ // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
+ setOperationAction(ISD::ABS, MVT::v4i64, Legal);
+ setOperationAction(ISD::ABS, MVT::v2i64, Legal);
+
+ for (auto VT : { MVT::v8i1, MVT::v16i1 }) {
+ setOperationAction(ISD::ADD, VT, Custom);
+ setOperationAction(ISD::SUB, VT, Custom);
+ setOperationAction(ISD::MUL, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::TRUNCATE, VT, Custom);
+
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Expand);
+ }
+
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
@@ -1352,33 +1346,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationPromotedToType(ISD::XOR, MVT::v16i32, MVT::v8i64);
if (Subtarget.hasCDI()) {
- setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
-
- setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
- setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
- setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
- setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
-
- if (Subtarget.hasVLX()) {
- setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
- } else {
- setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
+ // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
+ for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v16i32, MVT::v2i64,
+ MVT::v4i64, MVT::v8i64}) {
+ setOperationAction(ISD::CTLZ, VT, Legal);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
}
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
} // Subtarget.hasCDI()
if (Subtarget.hasDQI()) {
@@ -6070,7 +6043,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- if (NumNonZero > 8)
+ if (NumNonZero > 8 && !Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
@@ -6158,7 +6131,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- if (NumNonZero > 4)
+ if (NumNonZero > 4 && !Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
@@ -6241,7 +6214,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
Elt = Op->getOperand(EltIdx);
// By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
- EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
+ EltMaskIdx = Elt.getConstantOperandVal(1);
if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
break;
Mask[EltIdx] = EltIdx;
@@ -6272,8 +6245,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
SDValue SrcVector = Current->getOperand(0);
if (!V1.getNode())
V1 = SrcVector;
- CanFold = SrcVector == V1 &&
- cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
+ CanFold = (SrcVector == V1) && (Current.getConstantOperandVal(1) == i);
}
if (!CanFold)
@@ -20944,54 +20916,62 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
+// Split an unary integer op into 2 half sized ops.
+static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+ unsigned NumElems = VT.getVectorNumElements();
+ unsigned SizeInBits = VT.getSizeInBits();
+
+ // Extract the Lo/Hi vectors
+ SDLoc dl(Op);
+ SDValue Src = Op.getOperand(0);
+ SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
+ SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
+
+ MVT EltVT = VT.getVectorElementType();
+ MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
+ DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
+ DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
+}
+
+// Decompose 256-bit ops into smaller 128-bit ops.
+static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getSimpleValueType().is256BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
+ "Only handle AVX 256-bit vector integer operation");
+ return LowerVectorIntUnary(Op, DAG);
+}
+
+// Decompose 512-bit ops into smaller 256-bit ops.
+static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getSimpleValueType().is512BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
+ "Only handle AVX 512-bit vector integer operation");
+ return LowerVectorIntUnary(Op, DAG);
+}
+
/// \brief Lower a vector CTLZ using native supported vector CTLZ instruction.
//
-// 1. i32/i64 128/256-bit vector (native support require VLX) are expended
-// to 512-bit vector.
-// 2. i8/i16 vector implemented using dword LZCNT vector instruction
-// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
-// split the vector, perform operation on it's Lo a Hi part and
-// concatenate the results.
-static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
+// i8/i16 vector implemented using dword LZCNT vector instruction
+// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
+// split the vector, perform operation on it's Lo a Hi part and
+// concatenate the results.
+static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::CTLZ);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = VT.getVectorNumElements();
- if (EltVT == MVT::i64 || EltVT == MVT::i32) {
- // Extend to 512 bit vector.
- assert((VT.is256BitVector() || VT.is128BitVector()) &&
- "Unsupported value type for operation");
-
- MVT NewVT = MVT::getVectorVT(EltVT, 512 / VT.getScalarSizeInBits());
- SDValue Vec512 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
- DAG.getUNDEF(NewVT),
- Op.getOperand(0),
- DAG.getIntPtrConstant(0, dl));
- SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Vec512);
-
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CtlzNode,
- DAG.getIntPtrConstant(0, dl));
- }
-
assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
"Unsupported element type");
- if (16 < NumElems) {
- // Split vector, it's Lo and Hi parts will be handled in next iteration.
- SDValue Lo, Hi;
- std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
- MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
-
- Lo = DAG.getNode(ISD::CTLZ, dl, OutVT, Lo);
- Hi = DAG.getNode(ISD::CTLZ, dl, OutVT, Hi);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
- }
+ // Split vector, it's Lo and Hi parts will be handled in next iteration.
+ if (16 < NumElems)
+ return LowerVectorIntUnary(Op, DAG);
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
-
assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
"Unsupported value type for operation");
@@ -21078,23 +21058,17 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- SDValue Op0 = Op.getOperand(0);
- if (Subtarget.hasAVX512())
- return LowerVectorCTLZ_AVX512(Op, DAG);
+ if (Subtarget.hasCDI())
+ return LowerVectorCTLZ_AVX512CDI(Op, DAG);
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- unsigned NumElems = VT.getVectorNumElements();
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
- // Extract each 128-bit vector, perform ctlz and concat the result.
- SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- DAG.getNode(ISD::CTLZ, DL, LHS.getValueType(), LHS),
- DAG.getNode(ISD::CTLZ, DL, RHS.getValueType(), RHS));
- }
+ // Decompose 512-bit ops into smaller 256-bit ops.
+ if (VT.is512BitVector() && !Subtarget.hasBWI())
+ return Lower512IntUnary(Op, DAG);
assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
@@ -21258,19 +21232,7 @@ static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) {
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
- MVT VT = Op.getSimpleValueType();
- unsigned NumElems = VT.getVectorNumElements();
-
- SDLoc dl(Op);
- SDValue Src = Op.getOperand(0);
- SDValue Lo = extract128BitVector(Src, 0, DAG, dl);
- SDValue Hi = extract128BitVector(Src, NumElems / 2, DAG, dl);
-
- MVT EltVT = VT.getVectorElementType();
- MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- DAG.getNode(ISD::ABS, dl, NewVT, Lo),
- DAG.getNode(ISD::ABS, dl, NewVT, Hi));
+ return Lower256IntUnary(Op, DAG);
}
static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
@@ -23049,29 +23011,13 @@ static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
}
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extract each 128-bit vector, compute pop count and concat the result.
- SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
- LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
- }
-
- if (VT.is512BitVector() && !Subtarget.hasBWI()) {
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extract each 256-bit vector, compute pop count and concat the result.
- SDValue LHS = extract256BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract256BitVector(Op0, NumElems / 2, DAG, DL);
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
- LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
- }
+ // Decompose 512-bit ops into smaller 256-bit ops.
+ if (VT.is512BitVector() && !Subtarget.hasBWI())
+ return Lower512IntUnary(Op, DAG);
return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}
@@ -23098,20 +23044,12 @@ static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
DAG.getIntPtrConstant(0, DL));
}
- MVT SVT = VT.getVectorElementType();
int NumElts = VT.getVectorNumElements();
int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector()) {
- SDValue Lo = extract128BitVector(In, 0, DAG, DL);
- SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
-
- MVT HalfVT = MVT::getVectorVT(SVT, NumElts / 2);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo),
- DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi));
- }
+ if (VT.is256BitVector())
+ return Lower256IntUnary(Op, DAG);
assert(VT.is128BitVector() &&
"Only 128-bit vector bitreverse lowering supported.");
@@ -23152,14 +23090,8 @@ static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
"Only byte vector BITREVERSE supported");
// Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- MVT HalfVT = MVT::getVectorVT(MVT::i8, NumElts / 2);
- SDValue Lo = extract128BitVector(In, 0, DAG, DL);
- SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
- Lo = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo);
- Hi = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
- }
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
// Perform BITREVERSE using PSHUFB lookups. Each byte is split into
// two nibbles and a PSHUFB lookup to find the bitreverse of each
@@ -26585,6 +26517,10 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
+
+ case TargetOpcode::PATCHABLE_EVENT_CALL:
+ // Do nothing here, handle in xray instrumentation pass.
+ return BB;
case X86::LCMPXCHG8B: {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
@@ -26667,7 +26603,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Opc) {
default: break;
case X86ISD::ADD:
@@ -26697,7 +26633,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
- Known.Zero.setAllBits();
+ Known.setAllZero();
break;
}
@@ -26729,8 +26665,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Known = KnownBits(InBitWidth);
APInt DemandedSrcElts = APInt::getLowBitsSet(InNumElts, NumElts);
DAG.computeKnownBits(N0, Known, DemandedSrcElts, Depth + 1);
- Known.One = Known.One.zext(BitWidth);
- Known.Zero = Known.Zero.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBitWidth);
break;
}
@@ -31671,10 +31606,9 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
if (auto *AmtConst = AmtBV->getConstantSplatNode())
SraAmt = AmtConst->getZExtValue();
- } else if (Mask.getOpcode() == X86ISD::VSRAI) {
- SDValue SraC = Mask.getOperand(1);
- SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
- }
+ } else if (Mask.getOpcode() == X86ISD::VSRAI)
+ SraAmt = Mask.getConstantOperandVal(1);
+
if ((SraAmt + 1) != EltBits)
return SDValue();
@@ -31708,7 +31642,9 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
V = Y;
if (V) {
- assert(EltBits == 8 || EltBits == 16 || EltBits == 32);
+ if (EltBits != 8 && EltBits != 16 && EltBits != 32)
+ return SDValue();
+
SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
SDValue SubOp2 = Mask;
@@ -34488,8 +34424,7 @@ static SDValue combineX86ADD(SDNode *N, SelectionDAG &DAG,
if (Carry.getOpcode() == ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC_CARRY) {
- auto *Cond = cast<ConstantSDNode>(Carry.getOperand(0));
- if (Cond->getZExtValue() == X86::COND_B)
+ if (Carry.getConstantOperandVal(0) == X86::COND_B)
return DCI.CombineTo(N, SDValue(N, 0), Carry.getOperand(1));
}
}
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index c38c13bb9757..71d395244b4a 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -8631,6 +8631,20 @@ multiclass avx512_unary_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
defm VPABS : avx512_unary_rm_vl_all<0x1C, 0x1D, 0x1E, 0x1F, "vpabs", abs>;
+// VPABS: Use 512bit version to implement 128/256 bit in case NoVLX.
+let Predicates = [HasAVX512, NoVLX] in {
+ def : Pat<(v4i64 (abs VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPABSQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v2i64 (abs VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPABSQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+}
+
multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
defm NAME : avx512_unary_rm_vl_dq<opc, opc, OpcodeStr, ctlz, prd>;
@@ -8639,6 +8653,31 @@ multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
defm VPLZCNT : avx512_ctlz<0x44, "vplzcnt", HasCDI>;
defm VPCONFLICT : avx512_unary_rm_vl_dq<0xC4, 0xC4, "vpconflict", X86Conflict, HasCDI>;
+// VPLZCNT: Use 512bit version to implement 128/256 bit in case NoVLX.
+let Predicates = [HasCDI, NoVLX] in {
+ def : Pat<(v4i64 (ctlz VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v2i64 (ctlz VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+
+ def : Pat<(v8i32 (ctlz VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTDZrr
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v4i32 (ctlz VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTDZrr
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+}
+
//===---------------------------------------------------------------------===//
// Replicate Single FP - MOVSHDUP and MOVSLDUP
//===---------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index cdf7ce19cdc8..902b0c2c04e3 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -1995,11 +1995,11 @@ def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
Requires<[In64BitMode]>;
// Data16 instruction prefix
-def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>,
+def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>,
Requires<[Not16BitMode]>;
// Data instruction prefix
-def DATA32_PREFIX : I<0x66, RawFrm, (outs), (ins), "data32", []>,
+def DATA32_PREFIX : I<0x66, RawFrm, (outs), (ins), "data32", []>,
Requires<[In16BitMode]>;
// Repeat string operation instruction prefixes
@@ -2518,7 +2518,7 @@ let SchedRW = [ WriteSystem ] in {
}
let Uses = [ ECX, EAX, EBX ] in {
- def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
+ def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
[(int_x86_mwaitx ECX, EAX, EBX)], IIC_SSE_MWAITX>,
TB, Requires<[ HasMWAITX ]>;
}
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index f22a50200c9a..48da2fa607af 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -6718,22 +6718,23 @@ let Constraints = "$src1 = $dst" in {
SSE_INTMUL_ITINS_P, 1>;
}
-let Predicates = [HasAVX, NoVLX] in {
+let Predicates = [HasAVX, NoVLX] in
defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
VEX_4V, VEX_WIG;
+let Predicates = [HasAVX] in
defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_WIG;
-}
-let Predicates = [HasAVX2] in {
+
+let Predicates = [HasAVX2, NoVLX] in
defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
VEX_4V, VEX_L, VEX_WIG;
+let Predicates = [HasAVX2] in
defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L, VEX_WIG;
-}
let Constraints = "$src1 = $dst" in {
defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
diff --git a/lib/Target/X86/X86InstructionSelector.cpp b/lib/Target/X86/X86InstructionSelector.cpp
index 38f7bc0af5c7..d65eb1de8d09 100644
--- a/lib/Target/X86/X86InstructionSelector.cpp
+++ b/lib/Target/X86/X86InstructionSelector.cpp
@@ -65,8 +65,8 @@ private:
MachineFunction &MF) const;
bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
- bool selectFrameIndex(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
+ bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -235,7 +235,7 @@ bool X86InstructionSelector::select(MachineInstr &I) const {
return true;
if (selectLoadStoreOp(I, MRI, MF))
return true;
- if (selectFrameIndex(I, MRI, MF))
+ if (selectFrameIndexOrGep(I, MRI, MF))
return true;
if (selectConstant(I, MRI, MF))
return true;
@@ -427,27 +427,37 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
-bool X86InstructionSelector::selectFrameIndex(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_FRAME_INDEX)
+bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ unsigned Opc = I.getOpcode();
+
+ if (Opc != TargetOpcode::G_FRAME_INDEX && Opc != TargetOpcode::G_GEP)
return false;
const unsigned DefReg = I.getOperand(0).getReg();
LLT Ty = MRI.getType(DefReg);
- // Use LEA to calculate frame index.
+ // Use LEA to calculate frame index and GEP
unsigned NewOpc;
if (Ty == LLT::pointer(0, 64))
NewOpc = X86::LEA64r;
else if (Ty == LLT::pointer(0, 32))
NewOpc = STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
else
- llvm_unreachable("Can't select G_FRAME_INDEX, unsupported type.");
+ llvm_unreachable("Can't select G_FRAME_INDEX/G_GEP, unsupported type.");
I.setDesc(TII.get(NewOpc));
MachineInstrBuilder MIB(MF, I);
- addOffset(MIB, 0);
+
+ if (Opc == TargetOpcode::G_FRAME_INDEX) {
+ addOffset(MIB, 0);
+ } else {
+ MachineOperand &InxOp = I.getOperand(2);
+ I.addOperand(InxOp); // set IndexReg
+ InxOp.ChangeToImmediate(1); // set Scale
+ MIB.addImm(0).addReg(0);
+ }
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
diff --git a/lib/Target/X86/X86LegalizerInfo.cpp b/lib/Target/X86/X86LegalizerInfo.cpp
index a437f6bf4714..4f5e70414aa9 100644
--- a/lib/Target/X86/X86LegalizerInfo.cpp
+++ b/lib/Target/X86/X86LegalizerInfo.cpp
@@ -34,6 +34,11 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
setLegalizerInfo64bit();
setLegalizerInfoSSE1();
setLegalizerInfoSSE2();
+ setLegalizerInfoSSE41();
+ setLegalizerInfoAVX2();
+ setLegalizerInfoAVX512();
+ setLegalizerInfoAVX512DQ();
+ setLegalizerInfoAVX512BW();
computeTables();
}
@@ -50,7 +55,7 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
- for (unsigned BinOp : {G_ADD, G_SUB})
+ for (unsigned BinOp : {G_ADD, G_SUB, G_MUL})
for (auto Ty : {s8, s16, s32})
setAction({BinOp, Ty}, Legal);
@@ -65,6 +70,12 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GEP, p0}, Legal);
+ setAction({G_GEP, 1, s32}, Legal);
+
+ for (auto Ty : {s1, s8, s16})
+ setAction({G_GEP, 1, Ty}, WidenScalar);
+
// Constants
for (auto Ty : {s8, s16, s32, p0})
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
@@ -94,7 +105,7 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
- for (unsigned BinOp : {G_ADD, G_SUB})
+ for (unsigned BinOp : {G_ADD, G_SUB, G_MUL})
for (auto Ty : {s8, s16, s32, s64})
setAction({BinOp, Ty}, Legal);
@@ -109,6 +120,13 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GEP, p0}, Legal);
+ setAction({G_GEP, 1, s32}, Legal);
+ setAction({G_GEP, 1, s64}, Legal);
+
+ for (auto Ty : {s1, s8, s16})
+ setAction({G_GEP, 1, Ty}, WidenScalar);
+
// Constants
for (auto Ty : {s8, s16, s32, s64, p0})
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
@@ -149,6 +167,7 @@ void X86LegalizerInfo::setLegalizerInfoSSE2() {
return;
const LLT s64 = LLT::scalar(64);
+ const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
@@ -159,4 +178,83 @@ void X86LegalizerInfo::setLegalizerInfoSSE2() {
for (unsigned BinOp : {G_ADD, G_SUB})
for (auto Ty : {v4s32})
setAction({BinOp, Ty}, Legal);
+
+ setAction({G_MUL, v8s16}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoSSE41() {
+ if (!Subtarget.hasSSE41())
+ return;
+
+ const LLT v4s32 = LLT::vector(4, 32);
+
+ setAction({G_MUL, v4s32}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX2() {
+ if (!Subtarget.hasAVX2())
+ return;
+
+ const LLT v16s16 = LLT::vector(16, 16);
+ const LLT v8s32 = LLT::vector(8, 32);
+
+ for (auto Ty : {v16s16, v8s32})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512() {
+ if (!Subtarget.hasAVX512())
+ return;
+
+ const LLT v16s32 = LLT::vector(16, 32);
+
+ setAction({G_MUL, v16s32}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v4s32 = LLT::vector(4, 32);
+ const LLT v8s32 = LLT::vector(8, 32);
+
+ for (auto Ty : {v4s32, v8s32})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
+ if (!(Subtarget.hasAVX512() && Subtarget.hasDQI()))
+ return;
+
+ const LLT v8s64 = LLT::vector(8, 64);
+
+ setAction({G_MUL, v8s64}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v2s64 = LLT::vector(2, 64);
+ const LLT v4s64 = LLT::vector(4, 64);
+
+ for (auto Ty : {v2s64, v4s64})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
+ if (!(Subtarget.hasAVX512() && Subtarget.hasBWI()))
+ return;
+
+ const LLT v32s16 = LLT::vector(32, 16);
+
+ setAction({G_MUL, v32s16}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v8s16 = LLT::vector(8, 16);
+ const LLT v16s16 = LLT::vector(16, 16);
+
+ for (auto Ty : {v8s16, v16s16})
+ setAction({G_MUL, Ty}, Legal);
}
diff --git a/lib/Target/X86/X86LegalizerInfo.h b/lib/Target/X86/X86LegalizerInfo.h
index 3f00898b4232..ab5405a70427 100644
--- a/lib/Target/X86/X86LegalizerInfo.h
+++ b/lib/Target/X86/X86LegalizerInfo.h
@@ -38,6 +38,11 @@ private:
void setLegalizerInfo64bit();
void setLegalizerInfoSSE1();
void setLegalizerInfoSSE2();
+ void setLegalizerInfoSSE41();
+ void setLegalizerInfoAVX2();
+ void setLegalizerInfoAVX512();
+ void setLegalizerInfoAVX512DQ();
+ void setLegalizerInfoAVX512BW();
};
} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index 550e3543a71e..598d88d8b9c3 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -1040,6 +1040,83 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
getSubtargetInfo());
}
+void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ assert(Subtarget->is64Bit() && "XRay custom events only suports X86-64");
+
+ // We want to emit the following pattern, which follows the x86 calling
+ // convention to prepare for the trampoline call to be patched in.
+ //
+ // <args placement according SysV64 calling convention>
+ // .p2align 1, ...
+ // .Lxray_event_sled_N:
+ // jmp +N // jump across the call instruction
+ // callq __xray_CustomEvent // force relocation to symbol
+ // <args cleanup, jump to here>
+ //
+ // The relative jump needs to jump forward 24 bytes:
+ // 10 (args) + 5 (nops) + 9 (cleanup)
+ //
+ // After patching, it would look something like:
+ //
+ // nopw (2-byte nop)
+ // callq __xrayCustomEvent // already lowered
+ //
+ // ---
+ // First we emit the label and the jump.
+ auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true);
+ OutStreamer->AddComment("# XRay Custom Event Log");
+ OutStreamer->EmitCodeAlignment(2);
+ OutStreamer->EmitLabel(CurSled);
+
+ // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
+ // an operand (computed as an offset from the jmp instruction).
+ // FIXME: Find another less hacky way do force the relative jump.
+ OutStreamer->EmitBytes("\xeb\x14");
+
+ // The default C calling convention will place two arguments into %rcx and
+ // %rdx -- so we only work with those.
+ unsigned UsedRegs[] = {X86::RDI, X86::RSI, X86::RAX};
+
+ // Because we will use %rax, we preserve that across the call.
+ EmitAndCountInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RAX));
+
+ // Then we put the operands in the %rdi and %rsi registers.
+ for (unsigned I = 0; I < MI.getNumOperands(); ++I)
+ if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) {
+ if (Op->isImm())
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(UsedRegs[I])
+ .addImm(Op->getImm()));
+ else if (Op->isReg()) {
+ if (Op->getReg() != UsedRegs[I])
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(UsedRegs[I])
+ .addReg(Op->getReg()));
+ else
+ EmitNops(*OutStreamer, 3, Subtarget->is64Bit(), getSubtargetInfo());
+ }
+ }
+
+ // We emit a hard dependency on the __xray_CustomEvent symbol, which is the
+ // name of the trampoline to be implemented by the XRay runtime. We put this
+ // explicitly in the %rax register.
+ auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent");
+ MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(X86::RAX)
+ .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
+
+ // Emit the call instruction.
+ EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(X86::RAX));
+
+ // Restore caller-saved and used registers.
+ OutStreamer->AddComment("xray custom event end.");
+ EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RAX));
+
+ recordSled(CurSled, MI, SledKind::CUSTOM_EVENT);
+}
+
void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
X86MCInstLower &MCIL) {
// We want to emit the following pattern:
@@ -1415,6 +1492,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::PATCHABLE_TAIL_CALL:
return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);
+
+ case TargetOpcode::PATCHABLE_EVENT_CALL:
+ return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);
case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp
index 7be0a7fd4067..aabbf67a16b6 100644
--- a/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -223,8 +223,6 @@ public:
StringRef getPassName() const override { return "X86 LEA Optimize"; }
- bool doInitialization(Module &M) override;
-
/// \brief Loop over all of the basic blocks, replacing address
/// calculations in load and store instructions, if it's already
/// been calculated by LEA. Also, remove redundant LEAs.
@@ -280,7 +278,6 @@ private:
MachineRegisterInfo *MRI;
const X86InstrInfo *TII;
const X86RegisterInfo *TRI;
- Module *TheModule;
static char ID;
};
@@ -649,11 +646,6 @@ bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
return Changed;
}
-bool OptimizeLEAPass::doInitialization(Module &M) {
- TheModule = &M;
- return false;
-}
-
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
diff --git a/lib/Target/X86/X86RegisterBankInfo.cpp b/lib/Target/X86/X86RegisterBankInfo.cpp
index 0f8a750a0235..efd3df26dd42 100644
--- a/lib/Target/X86/X86RegisterBankInfo.cpp
+++ b/lib/Target/X86/X86RegisterBankInfo.cpp
@@ -139,8 +139,9 @@ bool X86RegisterBankInfo::getInstrValueMapping(
return true;
}
-RegisterBankInfo::InstructionMapping
-X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI, bool isFP) {
+const RegisterBankInfo::InstructionMapping &
+X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
+ bool isFP) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -152,10 +153,10 @@ X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI, bool isFP) {
llvm_unreachable("Unsupported operand mapping yet.");
auto Mapping = getValueMapping(getPartialMappingIdx(Ty, isFP), 3);
- return InstructionMapping{DefaultMappingID, 1, Mapping, NumOperands};
+ return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -164,7 +165,7 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -193,10 +194,10 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Finally construct the computed mapping.
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
- return InstructionMapping();
+ return getInvalidInstructionMapping();
- return InstructionMapping{DefaultMappingID, /* Cost */ 1,
- getOperandsMapping(OpdsMapping), NumOperands};
+ return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
+ getOperandsMapping(OpdsMapping), NumOperands);
}
void X86RegisterBankInfo::applyMappingImpl(
@@ -231,10 +232,10 @@ X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
break;
- RegisterBankInfo::InstructionMapping Mapping = InstructionMapping{
- /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands};
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
+ /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands);
InstructionMappings AltMappings;
- AltMappings.emplace_back(std::move(Mapping));
+ AltMappings.push_back(&Mapping);
return AltMappings;
}
default:
diff --git a/lib/Target/X86/X86RegisterBankInfo.h b/lib/Target/X86/X86RegisterBankInfo.h
index a1e01a9ab949..e227880427f3 100644
--- a/lib/Target/X86/X86RegisterBankInfo.h
+++ b/lib/Target/X86/X86RegisterBankInfo.h
@@ -46,8 +46,8 @@ private:
/// Get an instruction mapping.
/// \return An InstructionMappings with a statically allocated
/// OperandsMapping.
- static InstructionMapping getSameOperandsMapping(const MachineInstr &MI,
- bool isFP);
+ const InstructionMapping &getSameOperandsMapping(const MachineInstr &MI,
+ bool isFP) const;
/// Track the bank of each instruction operand(register)
static void
@@ -74,7 +74,8 @@ public:
/// See RegisterBankInfo::applyMapping.
void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // namespace llvm
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 9ab751e2b002..d66d39dcee17 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -139,12 +139,18 @@ X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
return X86II::MO_NO_FLAG;
assert(!isTargetCOFF());
+ const Function *F = dyn_cast_or_null<Function>(GV);
- if (isTargetELF())
+ if (isTargetELF()) {
+ if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
+ // According to psABI, PLT stub clobbers XMM8-XMM15.
+ // In Regcall calling convention those registers are used for passing
+ // parameters. Thus we need to prevent lazy binding in Regcall.
+ return X86II::MO_GOTPCREL;
return X86II::MO_PLT;
+ }
if (is64Bit()) {
- auto *F = dyn_cast_or_null<Function>(GV);
if (F && F->hasFnAttribute(Attribute::NonLazyBind))
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index b742fb472372..f3b619a2956a 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -1426,25 +1426,25 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
{ ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
};
static const CostTblEntry AVX1CostTbl[] = {
- { ISD::BITREVERSE, MVT::v4i64, 10 },
- { ISD::BITREVERSE, MVT::v8i32, 10 },
- { ISD::BITREVERSE, MVT::v16i16, 10 },
- { ISD::BITREVERSE, MVT::v32i8, 10 },
+ { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
{ ISD::BSWAP, MVT::v4i64, 4 },
{ ISD::BSWAP, MVT::v8i32, 4 },
{ ISD::BSWAP, MVT::v16i16, 4 },
- { ISD::CTLZ, MVT::v4i64, 46 },
- { ISD::CTLZ, MVT::v8i32, 36 },
- { ISD::CTLZ, MVT::v16i16, 28 },
- { ISD::CTLZ, MVT::v32i8, 18 },
- { ISD::CTPOP, MVT::v4i64, 14 },
- { ISD::CTPOP, MVT::v8i32, 22 },
- { ISD::CTPOP, MVT::v16i16, 18 },
- { ISD::CTPOP, MVT::v32i8, 12 },
- { ISD::CTTZ, MVT::v4i64, 20 },
- { ISD::CTTZ, MVT::v8i32, 28 },
- { ISD::CTTZ, MVT::v16i16, 24 },
- { ISD::CTTZ, MVT::v32i8, 18 },
+ { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
{ ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
{ ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
{ ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 4d3ecf25dc34..b8742683a0c8 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1825,7 +1825,7 @@ void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case XCoreISD::LADD: