aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/utils
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/utils')
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp16
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp236
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.cpp8
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h1
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp206
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp210
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/X86FoldTablesEmitter.cpp6
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/X86ManualCompressEVEXTables.def331
8 files changed, 793 insertions, 221 deletions
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
index 89aca87a28ec..348b3b3e0898 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
@@ -284,7 +284,8 @@ private:
/// succeed.
PatternType inferNamedOperandType(const InstructionPattern &IP,
StringRef OpName,
- const TypeEquivalenceClasses &TECs) const;
+ const TypeEquivalenceClasses &TECs,
+ bool AllowSelf = false) const;
const Record &RuleDef;
SmallVector<InstructionPattern *, 8> MatchPats;
@@ -427,8 +428,8 @@ PatternType CombineRuleOperandTypeChecker::inferImmediateType(
continue;
// Named operand with the same name, try to infer that.
- if (PatternType InferTy =
- inferNamedOperandType(IP, Op.getOperandName(), TECs))
+ if (PatternType InferTy = inferNamedOperandType(IP, Op.getOperandName(),
+ TECs, /*AllowSelf=*/true))
return InferTy;
}
}
@@ -438,16 +439,17 @@ PatternType CombineRuleOperandTypeChecker::inferImmediateType(
PatternType CombineRuleOperandTypeChecker::inferNamedOperandType(
const InstructionPattern &IP, StringRef OpName,
- const TypeEquivalenceClasses &TECs) const {
+ const TypeEquivalenceClasses &TECs, bool AllowSelf) const {
// This is the simplest possible case, we just need to find a TEC that
- // contains OpName. Look at all other operands in equivalence class and try to
- // find a suitable one.
+ // contains OpName. Look at all operands in equivalence class and try to
+ // find a suitable one. If `AllowSelf` is true, the operand itself is also
+ // considered suitable.
// Check for a def of a matched pattern. This is guaranteed to always
// be a register so we can blindly use that.
StringRef GoodOpName;
for (auto It = TECs.findLeader(OpName); It != TECs.member_end(); ++It) {
- if (*It == OpName)
+ if (!AllowSelf && *It == OpName)
continue;
const auto LookupRes = MatchOpTable.lookup(*It);
diff --git a/contrib/llvm-project/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
new file mode 100644
index 000000000000..78dcd4471ae7
--- /dev/null
+++ b/contrib/llvm-project/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
@@ -0,0 +1,236 @@
+//===------ MacroFusionPredicatorEmitter.cpp - Generator for Fusion ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// MacroFusionPredicatorEmitter implements a TableGen-driven predicators
+// generator for macro-op fusions.
+//
+// This TableGen backend processes `Fusion` definitions and generates
+// predicators for checking if input instructions can be fused. These
+// predicators can used in `MacroFusion` DAG mutation.
+//
+// The generated header file contains two parts: one for predicator
+// declarations and one for predicator implementations. The user can get them
+// by defining macro `GET_<TargetName>_MACRO_FUSION_PRED_DECL` or
+// `GET_<TargetName>_MACRO_FUSION_PRED_IMPL` and then including the generated
+// header file.
+//
+// The generated predicator will be like:
+//
+// ```
+// bool isNAME(const TargetInstrInfo &TII,
+// const TargetSubtargetInfo &STI,
+// const MachineInstr *FirstMI,
+// const MachineInstr &SecondMI) {
+// auto &MRI = SecondMI.getMF()->getRegInfo();
+// /* Predicates */
+// return true;
+// }
+// ```
+//
+// The `Predicates` part is generated from a list of `FusionPredicate`, which
+// can be predefined predicates, a raw code string or `MCInstPredicate` defined
+// in TargetInstrPredicate.td.
+//
+//===---------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "PredicateExpander.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <set>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "macro-fusion-predicator"
+
+namespace {
+class MacroFusionPredicatorEmitter {
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+
+ void emitMacroFusionDecl(std::vector<Record *> Fusions, PredicateExpander &PE,
+ raw_ostream &OS);
+ void emitMacroFusionImpl(std::vector<Record *> Fusions, PredicateExpander &PE,
+ raw_ostream &OS);
+ void emitPredicates(std::vector<Record *> &FirstPredicate,
+ PredicateExpander &PE, raw_ostream &OS);
+ void emitFirstPredicate(Record *SecondPredicate, PredicateExpander &PE,
+ raw_ostream &OS);
+ void emitSecondPredicate(Record *SecondPredicate, PredicateExpander &PE,
+ raw_ostream &OS);
+ void emitBothPredicate(Record *Predicates, PredicateExpander &PE,
+ raw_ostream &OS);
+
+public:
+ MacroFusionPredicatorEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ void run(raw_ostream &OS);
+};
+} // End anonymous namespace.
+
+void MacroFusionPredicatorEmitter::emitMacroFusionDecl(
+ std::vector<Record *> Fusions, PredicateExpander &PE, raw_ostream &OS) {
+ OS << "#ifdef GET_" << Target.getName() << "_MACRO_FUSION_PRED_DECL\n";
+ OS << "#undef GET_" << Target.getName() << "_MACRO_FUSION_PRED_DECL\n\n";
+ OS << "namespace llvm {\n";
+
+ for (Record *Fusion : Fusions) {
+ OS << "bool is" << Fusion->getName() << "(const TargetInstrInfo &, "
+ << "const TargetSubtargetInfo &, "
+ << "const MachineInstr *, "
+ << "const MachineInstr &);\n";
+ }
+
+ OS << "} // end namespace llvm\n";
+ OS << "\n#endif\n";
+}
+
+void MacroFusionPredicatorEmitter::emitMacroFusionImpl(
+ std::vector<Record *> Fusions, PredicateExpander &PE, raw_ostream &OS) {
+ OS << "#ifdef GET_" << Target.getName() << "_MACRO_FUSION_PRED_IMPL\n";
+ OS << "#undef GET_" << Target.getName() << "_MACRO_FUSION_PRED_IMPL\n\n";
+ OS << "namespace llvm {\n";
+
+ for (Record *Fusion : Fusions) {
+ std::vector<Record *> Predicates =
+ Fusion->getValueAsListOfDefs("Predicates");
+
+ OS << "bool is" << Fusion->getName() << "(\n";
+ OS.indent(4) << "const TargetInstrInfo &TII,\n";
+ OS.indent(4) << "const TargetSubtargetInfo &STI,\n";
+ OS.indent(4) << "const MachineInstr *FirstMI,\n";
+ OS.indent(4) << "const MachineInstr &SecondMI) {\n";
+ OS.indent(2) << "auto &MRI = SecondMI.getMF()->getRegInfo();\n";
+
+ emitPredicates(Predicates, PE, OS);
+
+ OS.indent(2) << "return true;\n";
+ OS << "}\n";
+ }
+
+ OS << "} // end namespace llvm\n";
+ OS << "\n#endif\n";
+}
+
+void MacroFusionPredicatorEmitter::emitPredicates(
+ std::vector<Record *> &Predicates, PredicateExpander &PE, raw_ostream &OS) {
+ for (Record *Predicate : Predicates) {
+ Record *Target = Predicate->getValueAsDef("Target");
+ if (Target->getName() == "first_fusion_target")
+ emitFirstPredicate(Predicate, PE, OS);
+ else if (Target->getName() == "second_fusion_target")
+ emitSecondPredicate(Predicate, PE, OS);
+ else if (Target->getName() == "both_fusion_target")
+ emitBothPredicate(Predicate, PE, OS);
+ else
+ PrintFatalError(Target->getLoc(),
+ "Unsupported 'FusionTarget': " + Target->getName());
+ }
+}
+
+void MacroFusionPredicatorEmitter::emitFirstPredicate(Record *Predicate,
+ PredicateExpander &PE,
+ raw_ostream &OS) {
+ if (Predicate->isSubClassOf("WildcardPred")) {
+ OS.indent(2) << "if (!FirstMI)\n";
+ OS.indent(2) << " return "
+ << (Predicate->getValueAsBit("ReturnValue") ? "true" : "false")
+ << ";\n";
+ } else if (Predicate->isSubClassOf("OneUsePred")) {
+ OS.indent(2) << "{\n";
+ OS.indent(4) << "Register FirstDest = FirstMI->getOperand(0).getReg();\n";
+ OS.indent(4)
+ << "if (FirstDest.isVirtual() && !MRI.hasOneNonDBGUse(FirstDest))\n";
+ OS.indent(4) << " return false;\n";
+ OS.indent(2) << "}\n";
+ } else if (Predicate->isSubClassOf(
+ "FirstFusionPredicateWithMCInstPredicate")) {
+ OS.indent(2) << "{\n";
+ OS.indent(4) << "const MachineInstr *MI = FirstMI;\n";
+ OS.indent(4) << "if (";
+ PE.setNegatePredicate(true);
+ PE.setIndentLevel(3);
+ PE.expandPredicate(OS, Predicate->getValueAsDef("Predicate"));
+ OS << ")\n";
+ OS.indent(4) << " return false;\n";
+ OS.indent(2) << "}\n";
+ } else {
+ PrintFatalError(Predicate->getLoc(),
+ "Unsupported predicate for first instruction: " +
+ Predicate->getType()->getAsString());
+ }
+}
+
+void MacroFusionPredicatorEmitter::emitSecondPredicate(Record *Predicate,
+ PredicateExpander &PE,
+ raw_ostream &OS) {
+ if (Predicate->isSubClassOf("SecondFusionPredicateWithMCInstPredicate")) {
+ OS.indent(2) << "{\n";
+ OS.indent(4) << "const MachineInstr *MI = &SecondMI;\n";
+ OS.indent(4) << "if (";
+ PE.setNegatePredicate(true);
+ PE.setIndentLevel(3);
+ PE.expandPredicate(OS, Predicate->getValueAsDef("Predicate"));
+ OS << ")\n";
+ OS.indent(4) << " return false;\n";
+ OS.indent(2) << "}\n";
+ } else {
+ PrintFatalError(Predicate->getLoc(),
+ "Unsupported predicate for first instruction: " +
+ Predicate->getType()->getAsString());
+ }
+}
+
+void MacroFusionPredicatorEmitter::emitBothPredicate(Record *Predicate,
+ PredicateExpander &PE,
+ raw_ostream &OS) {
+ if (Predicate->isSubClassOf("FusionPredicateWithCode"))
+ OS << Predicate->getValueAsString("Predicate");
+ else if (Predicate->isSubClassOf("BothFusionPredicateWithMCInstPredicate")) {
+ Record *MCPred = Predicate->getValueAsDef("Predicate");
+ emitFirstPredicate(MCPred, PE, OS);
+ emitSecondPredicate(MCPred, PE, OS);
+ } else if (Predicate->isSubClassOf("TieReg")) {
+ int FirstOpIdx = Predicate->getValueAsInt("FirstOpIdx");
+ int SecondOpIdx = Predicate->getValueAsInt("SecondOpIdx");
+ OS.indent(2) << "if (!(FirstMI->getOperand(" << FirstOpIdx
+ << ").isReg() &&\n";
+ OS.indent(2) << " SecondMI.getOperand(" << SecondOpIdx
+ << ").isReg() &&\n";
+ OS.indent(2) << " FirstMI->getOperand(" << FirstOpIdx
+ << ").getReg() == SecondMI.getOperand(" << SecondOpIdx
+ << ").getReg()))\n";
+ OS.indent(2) << " return false;\n";
+ } else
+ PrintFatalError(Predicate->getLoc(),
+ "Unsupported predicate for both instruction: " +
+ Predicate->getType()->getAsString());
+}
+
+void MacroFusionPredicatorEmitter::run(raw_ostream &OS) {
+ // Emit file header.
+ emitSourceFileHeader("Macro Fusion Predicators", OS);
+
+ PredicateExpander PE(Target.getName());
+ PE.setByRef(false);
+ PE.setExpandForMC(false);
+
+ std::vector<Record *> Fusions = Records.getAllDerivedDefinitions("Fusion");
+ // Sort macro fusions by name.
+ sort(Fusions, LessRecord());
+ emitMacroFusionDecl(Fusions, PE, OS);
+ OS << "\n";
+ emitMacroFusionImpl(Fusions, PE, OS);
+}
+
+static TableGen::Emitter::OptClass<MacroFusionPredicatorEmitter>
+ X("gen-macro-fusion-pred", "Generate macro fusion predicators.");
diff --git a/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.cpp b/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.cpp
index 8f96d3307ded..d3a73e02cd91 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.cpp
@@ -194,6 +194,11 @@ void PredicateExpander::expandCheckIsRegOperand(raw_ostream &OS, int OpIndex) {
<< "getOperand(" << OpIndex << ").isReg() ";
}
+void PredicateExpander::expandCheckIsVRegOperand(raw_ostream &OS, int OpIndex) {
+ OS << (shouldNegate() ? "!" : "") << "MI" << (isByRef() ? "." : "->")
+ << "getOperand(" << OpIndex << ").getReg().isVirtual()";
+}
+
void PredicateExpander::expandCheckIsImmOperand(raw_ostream &OS, int OpIndex) {
OS << (shouldNegate() ? "!" : "") << "MI" << (isByRef() ? "." : "->")
<< "getOperand(" << OpIndex << ").isImm() ";
@@ -319,6 +324,9 @@ void PredicateExpander::expandPredicate(raw_ostream &OS, const Record *Rec) {
if (Rec->isSubClassOf("CheckIsRegOperand"))
return expandCheckIsRegOperand(OS, Rec->getValueAsInt("OpIndex"));
+ if (Rec->isSubClassOf("CheckIsVRegOperand"))
+ return expandCheckIsVRegOperand(OS, Rec->getValueAsInt("OpIndex"));
+
if (Rec->isSubClassOf("CheckIsImmOperand"))
return expandCheckIsImmOperand(OS, Rec->getValueAsInt("OpIndex"));
diff --git a/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h b/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h
index 27f049a715aa..cfb0a3d51e67 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h
@@ -75,6 +75,7 @@ public:
bool IsCheckAll);
void expandTIIFunctionCall(raw_ostream &OS, StringRef MethodName);
void expandCheckIsRegOperand(raw_ostream &OS, int OpIndex);
+ void expandCheckIsVRegOperand(raw_ostream &OS, int OpIndex);
void expandCheckIsImmOperand(raw_ostream &OS, int OpIndex);
void expandCheckInvalidRegOperand(raw_ostream &OS, int OpIndex);
void expandCheckFunctionPredicate(raw_ostream &OS, StringRef MCInstFn,
diff --git a/contrib/llvm-project/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
new file mode 100644
index 000000000000..aa8527e75380
--- /dev/null
+++ b/contrib/llvm-project/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
@@ -0,0 +1,206 @@
+//==- utils/TableGen/X86CompressEVEXTablesEmitter.cpp - X86 backend-*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This tablegen backend is responsible for emitting the X86 backend EVEX
+/// compression tables.
+///
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "X86RecognizableInstr.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <map>
+#include <set>
+
+using namespace llvm;
+using namespace X86Disassembler;
+
+namespace {
+
+const std::map<StringRef, StringRef> ManualMap = {
+#define ENTRY(OLD, NEW) {#OLD, #NEW},
+#include "X86ManualCompressEVEXTables.def"
+};
+const std::set<StringRef> NoCompressSet = {
+#define NOCOMP(INSN) #INSN,
+#include "X86ManualCompressEVEXTables.def"
+};
+
+class X86CompressEVEXTablesEmitter {
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+
+ // Hold all pontentially compressible EVEX instructions
+ std::vector<const CodeGenInstruction *> PreCompressionInsts;
+ // Hold all compressed instructions. Divided into groups with same opcodes
+ // to make the search more efficient
+ std::map<uint64_t, std::vector<const CodeGenInstruction *>> CompressedInsts;
+
+ typedef std::pair<const CodeGenInstruction *, const CodeGenInstruction *>
+ Entry;
+
+ std::vector<Entry> Table;
+
+public:
+ X86CompressEVEXTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ // run - Output X86 EVEX compression tables.
+ void run(raw_ostream &OS);
+
+private:
+ // Prints the given table as a C++ array of type X86CompressEVEXTableEntry
+ void printTable(const std::vector<Entry> &Table, raw_ostream &OS);
+};
+
+void X86CompressEVEXTablesEmitter::printTable(const std::vector<Entry> &Table,
+ raw_ostream &OS) {
+
+ OS << "static const X86CompressEVEXTableEntry X86CompressEVEXTable[] = { \n";
+
+ // Print all entries added to the table
+ for (const auto &Pair : Table)
+ OS << " { X86::" << Pair.first->TheDef->getName()
+ << ", X86::" << Pair.second->TheDef->getName() << " },\n";
+
+ OS << "};\n\n";
+}
+
+static uint8_t byteFromBitsInit(const BitsInit *B) {
+ unsigned N = B->getNumBits();
+ assert(N <= 8 && "Field is too large for uint8_t!");
+
+ uint8_t Value = 0;
+ for (unsigned I = 0; I != N; ++I) {
+ BitInit *Bit = cast<BitInit>(B->getBit(I));
+ Value |= Bit->getValue() << I;
+ }
+ return Value;
+}
+
+class IsMatch {
+ const CodeGenInstruction *OldInst;
+
+public:
+ IsMatch(const CodeGenInstruction *OldInst) : OldInst(OldInst) {}
+
+ bool operator()(const CodeGenInstruction *NewInst) {
+ RecognizableInstrBase NewRI(*NewInst);
+ RecognizableInstrBase OldRI(*OldInst);
+
+ // Return false if any of the following fields of does not match.
+ if (std::make_tuple(OldRI.IsCodeGenOnly, OldRI.OpMap, NewRI.OpPrefix,
+ OldRI.HasVEX_4V, OldRI.HasVEX_L, OldRI.HasREX_W,
+ OldRI.Form) !=
+ std::make_tuple(NewRI.IsCodeGenOnly, NewRI.OpMap, OldRI.OpPrefix,
+ NewRI.HasVEX_4V, NewRI.HasVEX_L, NewRI.HasREX_W,
+ NewRI.Form))
+ return false;
+
+ for (unsigned I = 0, E = OldInst->Operands.size(); I < E; ++I) {
+ Record *OldOpRec = OldInst->Operands[I].Rec;
+ Record *NewOpRec = NewInst->Operands[I].Rec;
+
+ if (OldOpRec == NewOpRec)
+ continue;
+
+ if (isRegisterOperand(OldOpRec) && isRegisterOperand(NewOpRec)) {
+ if (getRegOperandSize(OldOpRec) != getRegOperandSize(NewOpRec))
+ return false;
+ } else if (isMemoryOperand(OldOpRec) && isMemoryOperand(NewOpRec)) {
+ if (getMemOperandSize(OldOpRec) != getMemOperandSize(NewOpRec))
+ return false;
+ } else if (isImmediateOperand(OldOpRec) && isImmediateOperand(NewOpRec)) {
+ if (OldOpRec->getValueAsDef("Type") != NewOpRec->getValueAsDef("Type"))
+ return false;
+ }
+ }
+
+ return true;
+ }
+};
+
+void X86CompressEVEXTablesEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("X86 EVEX compression tables", OS);
+
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ const Record *Rec = Inst->TheDef;
+ StringRef Name = Rec->getName();
+ // _REV instruction should not appear before encoding optimization
+ if (!Rec->isSubClassOf("X86Inst") ||
+ Rec->getValueAsBit("isAsmParserOnly") || Name.ends_with("_REV"))
+ continue;
+
+ // Promoted legacy instruction is in EVEX space, and has REX2-encoding
+ // alternative. It's added due to HW design and never emitted by compiler.
+ if (byteFromBitsInit(Rec->getValueAsBitsInit("OpMapBits")) ==
+ X86Local::T_MAP4 &&
+ byteFromBitsInit(Rec->getValueAsBitsInit("explicitOpPrefixBits")) ==
+ X86Local::ExplicitEVEX)
+ continue;
+
+ if (NoCompressSet.find(Name) != NoCompressSet.end())
+ continue;
+
+ RecognizableInstrBase RI(*Inst);
+
+ bool IsND = RI.OpMap == X86Local::T_MAP4 && RI.HasEVEX_B && RI.HasVEX_4V;
+ // Add VEX encoded instructions to one of CompressedInsts vectors according
+ // to it's opcode.
+ if (RI.Encoding == X86Local::VEX)
+ CompressedInsts[RI.Opcode].push_back(Inst);
+ // Add relevant EVEX encoded instructions to PreCompressionInsts
+ else if (RI.Encoding == X86Local::EVEX && !RI.HasEVEX_K && !RI.HasEVEX_L2 &&
+ (!RI.HasEVEX_B || IsND))
+ PreCompressionInsts.push_back(Inst);
+ }
+
+ for (const CodeGenInstruction *Inst : PreCompressionInsts) {
+ const Record *Rec = Inst->TheDef;
+ uint8_t Opcode = byteFromBitsInit(Rec->getValueAsBitsInit("Opcode"));
+ StringRef Name = Rec->getName();
+ const CodeGenInstruction *NewInst = nullptr;
+ if (ManualMap.find(Name) != ManualMap.end()) {
+ Record *NewRec = Records.getDef(ManualMap.at(Rec->getName()));
+ assert(NewRec && "Instruction not found!");
+ NewInst = &Target.getInstruction(NewRec);
+ } else if (Name.ends_with("_EVEX")) {
+ if (auto *NewRec = Records.getDef(Name.drop_back(5)))
+ NewInst = &Target.getInstruction(NewRec);
+ } else if (Name.ends_with("_ND")) {
+ if (auto *NewRec = Records.getDef(Name.drop_back(3))) {
+ auto &TempInst = Target.getInstruction(NewRec);
+ if (isRegisterOperand(TempInst.Operands[0].Rec))
+ NewInst = &TempInst;
+ }
+ } else {
+ // For each pre-compression instruction look for a match in the appropriate
+ // vector (instructions with the same opcode) using function object
+ // IsMatch.
+ auto Match = llvm::find_if(CompressedInsts[Opcode], IsMatch(Inst));
+ if (Match != CompressedInsts[Opcode].end())
+ NewInst = *Match;
+ }
+
+ if (!NewInst)
+ continue;
+
+ Table.push_back(std::make_pair(Inst, NewInst));
+ }
+
+ printTable(Table, OS);
+}
+} // namespace
+
+static TableGen::Emitter::OptClass<X86CompressEVEXTablesEmitter>
+ X("gen-x86-compress-evex-tables", "Generate X86 EVEX compression tables");
diff --git a/contrib/llvm-project/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
deleted file mode 100644
index c80d9a199fa3..000000000000
--- a/contrib/llvm-project/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
+++ /dev/null
@@ -1,210 +0,0 @@
-//===- utils/TableGen/X86EVEX2VEXTablesEmitter.cpp - X86 backend-*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// This tablegen backend is responsible for emitting the X86 backend EVEX2VEX
-/// compression tables.
-///
-//===----------------------------------------------------------------------===//
-
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
-#include "X86RecognizableInstr.h"
-#include "llvm/TableGen/Error.h"
-#include "llvm/TableGen/Record.h"
-#include "llvm/TableGen/TableGenBackend.h"
-
-using namespace llvm;
-using namespace X86Disassembler;
-
-namespace {
-
-class X86EVEX2VEXTablesEmitter {
- RecordKeeper &Records;
- CodeGenTarget Target;
-
- // Hold all non-masked & non-broadcasted EVEX encoded instructions
- std::vector<const CodeGenInstruction *> EVEXInsts;
- // Hold all VEX encoded instructions. Divided into groups with same opcodes
- // to make the search more efficient
- std::map<uint64_t, std::vector<const CodeGenInstruction *>> VEXInsts;
-
- typedef std::pair<const CodeGenInstruction *, const CodeGenInstruction *>
- Entry;
-
- // Represent both compress tables
- std::vector<Entry> EVEX2VEX128;
- std::vector<Entry> EVEX2VEX256;
-
-public:
- X86EVEX2VEXTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {}
-
- // run - Output X86 EVEX2VEX tables.
- void run(raw_ostream &OS);
-
-private:
- // Prints the given table as a C++ array of type
- // X86EvexToVexCompressTableEntry
- void printTable(const std::vector<Entry> &Table, raw_ostream &OS);
-};
-
-void X86EVEX2VEXTablesEmitter::printTable(const std::vector<Entry> &Table,
- raw_ostream &OS) {
- StringRef Size = (Table == EVEX2VEX128) ? "128" : "256";
-
- OS << "// X86 EVEX encoded instructions that have a VEX " << Size
- << " encoding\n"
- << "// (table format: <EVEX opcode, VEX-" << Size << " opcode>).\n"
- << "static const X86EvexToVexCompressTableEntry X86EvexToVex" << Size
- << "CompressTable[] = {\n"
- << " // EVEX scalar with corresponding VEX.\n";
-
- // Print all entries added to the table
- for (const auto &Pair : Table) {
- OS << " { X86::" << Pair.first->TheDef->getName()
- << ", X86::" << Pair.second->TheDef->getName() << " },\n";
- }
-
- OS << "};\n\n";
-}
-
-// Return true if the 2 BitsInits are equal
-// Calculates the integer value residing BitsInit object
-static inline uint64_t getValueFromBitsInit(const BitsInit *B) {
- uint64_t Value = 0;
- for (unsigned i = 0, e = B->getNumBits(); i != e; ++i) {
- if (BitInit *Bit = dyn_cast<BitInit>(B->getBit(i)))
- Value |= uint64_t(Bit->getValue()) << i;
- else
- PrintFatalError("Invalid VectSize bit");
- }
- return Value;
-}
-
-// Function object - Operator() returns true if the given VEX instruction
-// matches the EVEX instruction of this object.
-class IsMatch {
- const CodeGenInstruction *EVEXInst;
-
-public:
- IsMatch(const CodeGenInstruction *EVEXInst) : EVEXInst(EVEXInst) {}
-
- bool operator()(const CodeGenInstruction *VEXInst) {
- RecognizableInstrBase VEXRI(*VEXInst);
- RecognizableInstrBase EVEXRI(*EVEXInst);
- bool VEX_W = VEXRI.HasREX_W;
- bool EVEX_W = EVEXRI.HasREX_W;
- bool VEX_WIG = VEXRI.IgnoresW;
- bool EVEX_WIG = EVEXRI.IgnoresW;
- bool EVEX_W1_VEX_W0 = EVEXInst->TheDef->getValueAsBit("EVEX_W1_VEX_W0");
-
- if (VEXRI.IsCodeGenOnly != EVEXRI.IsCodeGenOnly ||
- // VEX/EVEX fields
- VEXRI.OpPrefix != EVEXRI.OpPrefix || VEXRI.OpMap != EVEXRI.OpMap ||
- VEXRI.HasVEX_4V != EVEXRI.HasVEX_4V ||
- VEXRI.HasVEX_L != EVEXRI.HasVEX_L ||
- // Match is allowed if either is VEX_WIG, or they match, or EVEX
- // is VEX_W1X and VEX is VEX_W0.
- (!(VEX_WIG || (!EVEX_WIG && EVEX_W == VEX_W) ||
- (EVEX_W1_VEX_W0 && EVEX_W && !VEX_W))) ||
- // Instruction's format
- VEXRI.Form != EVEXRI.Form)
- return false;
-
- // This is needed for instructions with intrinsic version (_Int).
- // Where the only difference is the size of the operands.
- // For example: VUCOMISDZrm and Int_VUCOMISDrm
- // Also for instructions that their EVEX version was upgraded to work with
- // k-registers. For example VPCMPEQBrm (xmm output register) and
- // VPCMPEQBZ128rm (k register output register).
- for (unsigned i = 0, e = EVEXInst->Operands.size(); i < e; i++) {
- Record *OpRec1 = EVEXInst->Operands[i].Rec;
- Record *OpRec2 = VEXInst->Operands[i].Rec;
-
- if (OpRec1 == OpRec2)
- continue;
-
- if (isRegisterOperand(OpRec1) && isRegisterOperand(OpRec2)) {
- if (getRegOperandSize(OpRec1) != getRegOperandSize(OpRec2))
- return false;
- } else if (isMemoryOperand(OpRec1) && isMemoryOperand(OpRec2)) {
- return false;
- } else if (isImmediateOperand(OpRec1) && isImmediateOperand(OpRec2)) {
- if (OpRec1->getValueAsDef("Type") != OpRec2->getValueAsDef("Type")) {
- return false;
- }
- } else
- return false;
- }
-
- return true;
- }
-};
-
-void X86EVEX2VEXTablesEmitter::run(raw_ostream &OS) {
- emitSourceFileHeader("X86 EVEX2VEX tables", OS);
-
- ArrayRef<const CodeGenInstruction *> NumberedInstructions =
- Target.getInstructionsByEnumValue();
-
- for (const CodeGenInstruction *Inst : NumberedInstructions) {
- const Record *Def = Inst->TheDef;
- // Filter non-X86 instructions.
- if (!Def->isSubClassOf("X86Inst"))
- continue;
- // _REV instruction should not appear before encoding optimization
- if (Def->getName().ends_with("_REV"))
- continue;
- RecognizableInstrBase RI(*Inst);
-
- // Add VEX encoded instructions to one of VEXInsts vectors according to
- // it's opcode.
- if (RI.Encoding == X86Local::VEX)
- VEXInsts[RI.Opcode].push_back(Inst);
- // Add relevant EVEX encoded instructions to EVEXInsts
- else if (RI.Encoding == X86Local::EVEX && !RI.HasEVEX_K && !RI.HasEVEX_B &&
- !RI.HasEVEX_L2 && !Def->getValueAsBit("notEVEX2VEXConvertible"))
- EVEXInsts.push_back(Inst);
- }
-
- for (const CodeGenInstruction *EVEXInst : EVEXInsts) {
- uint64_t Opcode = getValueFromBitsInit(EVEXInst->TheDef->
- getValueAsBitsInit("Opcode"));
- // For each EVEX instruction look for a VEX match in the appropriate vector
- // (instructions with the same opcode) using function object IsMatch.
- // Allow EVEX2VEXOverride to explicitly specify a match.
- const CodeGenInstruction *VEXInst = nullptr;
- if (!EVEXInst->TheDef->isValueUnset("EVEX2VEXOverride")) {
- StringRef AltInstStr =
- EVEXInst->TheDef->getValueAsString("EVEX2VEXOverride");
- Record *AltInstRec = Records.getDef(AltInstStr);
- assert(AltInstRec && "EVEX2VEXOverride instruction not found!");
- VEXInst = &Target.getInstruction(AltInstRec);
- } else {
- auto Match = llvm::find_if(VEXInsts[Opcode], IsMatch(EVEXInst));
- if (Match != VEXInsts[Opcode].end())
- VEXInst = *Match;
- }
-
- if (!VEXInst)
- continue;
-
- // In case a match is found add new entry to the appropriate table
- if (EVEXInst->TheDef->getValueAsBit("hasVEX_L"))
- EVEX2VEX256.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,1}
- else
- EVEX2VEX128.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,0}
- }
-
- // Print both tables
- printTable(EVEX2VEX128, OS);
- printTable(EVEX2VEX256, OS);
-}
-} // namespace
-
-static TableGen::Emitter::OptClass<X86EVEX2VEXTablesEmitter>
- X("gen-x86-EVEX2VEX-tables", "Generate X86 EVEX to VEX compress tables");
diff --git a/contrib/llvm-project/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index 101b75e2f087..8a860d0945bb 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -374,8 +374,7 @@ public:
RegRI.HasEVEX_L2, RegRI.HasEVEX_NF,
RegRec->getValueAsBit("hasEVEX_RC"),
RegRec->getValueAsBit("hasLockPrefix"),
- RegRec->getValueAsBit("hasNoTrackPrefix"),
- RegRec->getValueAsBit("EVEX_W1_VEX_W0")) !=
+ RegRec->getValueAsBit("hasNoTrackPrefix")) !=
std::make_tuple(MemRI.Encoding, MemRI.Opcode, MemRI.OpPrefix,
MemRI.OpMap, MemRI.OpSize, MemRI.AdSize, MemRI.HasREX_W,
MemRI.HasVEX_4V, MemRI.HasVEX_L, MemRI.IgnoresVEX_L,
@@ -383,8 +382,7 @@ public:
MemRI.HasEVEX_L2, MemRI.HasEVEX_NF,
MemRec->getValueAsBit("hasEVEX_RC"),
MemRec->getValueAsBit("hasLockPrefix"),
- MemRec->getValueAsBit("hasNoTrackPrefix"),
- MemRec->getValueAsBit("EVEX_W1_VEX_W0")))
+ MemRec->getValueAsBit("hasNoTrackPrefix")))
return false;
// Make sure the sizes of the operands of both instructions suit each other.
diff --git a/contrib/llvm-project/llvm/utils/TableGen/X86ManualCompressEVEXTables.def b/contrib/llvm-project/llvm/utils/TableGen/X86ManualCompressEVEXTables.def
new file mode 100644
index 000000000000..58ca10e9e10f
--- /dev/null
+++ b/contrib/llvm-project/llvm/utils/TableGen/X86ManualCompressEVEXTables.def
@@ -0,0 +1,331 @@
+//===- X86ManualCompressEVEXTables.def ---------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// \file
+// This file defines all the entries in X86 EVEX compression tables that need
+// special handling.
+//===----------------------------------------------------------------------===//
+
+#ifndef NOCOMP
+#define NOCOMP(INSN)
+#endif
+NOCOMP(VCVTQQ2PDZ128rr)
+NOCOMP(VCVTQQ2PSZ128rm)
+NOCOMP(VCVTQQ2PSZ128rr)
+NOCOMP(VDBPSADBWZ128rmi)
+NOCOMP(VDBPSADBWZ128rri)
+NOCOMP(VPMAXSQZ128rm)
+NOCOMP(VPMAXSQZ128rr)
+NOCOMP(VPMAXUQZ128rm)
+NOCOMP(VPMAXUQZ128rr)
+NOCOMP(VPMINSQZ128rm)
+NOCOMP(VPMINSQZ128rr)
+NOCOMP(VPMINUQZ128rm)
+NOCOMP(VPMINUQZ128rr)
+NOCOMP(VPMULLQZ128rm)
+NOCOMP(VPMULLQZ128rr)
+NOCOMP(VPSRAQZ128ri)
+NOCOMP(VPSRAQZ128rm)
+NOCOMP(VPSRAQZ128rr)
+NOCOMP(VSCALEFPSZ128rm)
+NOCOMP(VDBPSADBWZ256rmi)
+NOCOMP(VDBPSADBWZ256rri)
+NOCOMP(VPMAXSQZ256rm)
+NOCOMP(VPMAXSQZ256rr)
+NOCOMP(VPMAXUQZ256rm)
+NOCOMP(VPMAXUQZ256rr)
+NOCOMP(VPMINSQZ256rm)
+NOCOMP(VPMINSQZ256rr)
+NOCOMP(VPMINUQZ256rm)
+NOCOMP(VPMINUQZ256rr)
+NOCOMP(VPMULLQZ256rm)
+NOCOMP(VPMULLQZ256rr)
+NOCOMP(VPSRAQZ256ri)
+NOCOMP(VPSRAQZ256rm)
+NOCOMP(VPSRAQZ256rr)
+NOCOMP(VSCALEFPSZ256rm)
+#undef NOCOMP
+
+#ifndef ENTRY
+#define ENTRY(OLD, NEW)
+#endif
+ENTRY(VALIGNDZ128rmi, VPALIGNRrmi)
+ENTRY(VALIGNDZ128rri, VPALIGNRrri)
+ENTRY(VALIGNQZ128rmi, VPALIGNRrmi)
+ENTRY(VALIGNQZ128rri, VPALIGNRrri)
+ENTRY(VMAXSDZrm, VMAXSDrm)
+ENTRY(VMAXSDZrr, VMAXSDrr)
+ENTRY(VMAXSSZrm, VMAXSSrm)
+ENTRY(VMAXSSZrr, VMAXSSrr)
+ENTRY(VMINSDZrm, VMINSDrm)
+ENTRY(VMINSDZrr, VMINSDrr)
+ENTRY(VMINSSZrm, VMINSSrm)
+ENTRY(VMINSSZrr, VMINSSrr)
+ENTRY(VMOVDQU16Z128mr, VMOVDQUmr)
+ENTRY(VMOVDQU16Z128rm, VMOVDQUrm)
+ENTRY(VMOVDQU16Z128rr, VMOVDQUrr)
+ENTRY(VMOVDQU8Z128mr, VMOVDQUmr)
+ENTRY(VMOVDQU8Z128rm, VMOVDQUrm)
+ENTRY(VMOVDQU8Z128rr, VMOVDQUrr)
+ENTRY(VMOVDQU16Z256mr, VMOVDQUYmr)
+ENTRY(VMOVDQU16Z256rm, VMOVDQUYrm)
+ENTRY(VMOVDQU16Z256rr, VMOVDQUYrr)
+ENTRY(VMOVDQU8Z256mr, VMOVDQUYmr)
+ENTRY(VMOVDQU8Z256rm, VMOVDQUYrm)
+ENTRY(VMOVDQU8Z256rr, VMOVDQUYrr)
+ENTRY(VSHUFF32X4Z256rmi, VPERM2F128rm)
+ENTRY(VSHUFF32X4Z256rri, VPERM2F128rr)
+ENTRY(VSHUFF64X2Z256rmi, VPERM2F128rm)
+ENTRY(VSHUFF64X2Z256rri, VPERM2F128rr)
+ENTRY(VSHUFI32X4Z256rmi, VPERM2I128rm)
+ENTRY(VSHUFI32X4Z256rri, VPERM2I128rr)
+ENTRY(VSHUFI64X2Z256rmi, VPERM2I128rm)
+ENTRY(VSHUFI64X2Z256rri, VPERM2I128rr)
+// W bit does not match
+ENTRY(VADDPDZ128rm, VADDPDrm)
+ENTRY(VADDPDZ128rr, VADDPDrr)
+ENTRY(VADDSDZrm, VADDSDrm)
+ENTRY(VADDSDZrm_Int, VADDSDrm_Int)
+ENTRY(VADDSDZrr, VADDSDrr)
+ENTRY(VADDSDZrr_Int, VADDSDrr_Int)
+ENTRY(VANDNPDZ128rm, VANDNPDrm)
+ENTRY(VANDNPDZ128rr, VANDNPDrr)
+ENTRY(VANDPDZ128rm, VANDPDrm)
+ENTRY(VANDPDZ128rr, VANDPDrr)
+ENTRY(VCOMISDZrm, VCOMISDrm)
+ENTRY(VCOMISDZrm_Int, VCOMISDrm_Int)
+ENTRY(VCOMISDZrr, VCOMISDrr)
+ENTRY(VCOMISDZrr_Int, VCOMISDrr_Int)
+ENTRY(VCVTPD2DQZ128rm, VCVTPD2DQrm)
+ENTRY(VCVTPD2DQZ128rr, VCVTPD2DQrr)
+ENTRY(VCVTPD2PSZ128rm, VCVTPD2PSrm)
+ENTRY(VCVTPD2PSZ128rr, VCVTPD2PSrr)
+ENTRY(VCVTSD2SSZrm, VCVTSD2SSrm)
+ENTRY(VCVTSD2SSZrm_Int, VCVTSD2SSrm_Int)
+ENTRY(VCVTSD2SSZrr, VCVTSD2SSrr)
+ENTRY(VCVTSD2SSZrr_Int, VCVTSD2SSrr_Int)
+ENTRY(VCVTTPD2DQZ128rm, VCVTTPD2DQrm)
+ENTRY(VCVTTPD2DQZ128rr, VCVTTPD2DQrr)
+ENTRY(VDIVPDZ128rm, VDIVPDrm)
+ENTRY(VDIVPDZ128rr, VDIVPDrr)
+ENTRY(VDIVSDZrm, VDIVSDrm)
+ENTRY(VDIVSDZrm_Int, VDIVSDrm_Int)
+ENTRY(VDIVSDZrr, VDIVSDrr)
+ENTRY(VDIVSDZrr_Int, VDIVSDrr_Int)
+ENTRY(VMAXCPDZ128rm, VMAXCPDrm)
+ENTRY(VMAXCPDZ128rr, VMAXCPDrr)
+ENTRY(VMAXCSDZrm, VMAXCSDrm)
+ENTRY(VMAXCSDZrr, VMAXCSDrr)
+ENTRY(VMAXPDZ128rm, VMAXPDrm)
+ENTRY(VMAXPDZ128rr, VMAXPDrr)
+ENTRY(VMAXSDZrm_Int, VMAXSDrm_Int)
+ENTRY(VMAXSDZrr_Int, VMAXSDrr_Int)
+ENTRY(VMINCPDZ128rm, VMINCPDrm)
+ENTRY(VMINCPDZ128rr, VMINCPDrr)
+ENTRY(VMINCSDZrm, VMINCSDrm)
+ENTRY(VMINCSDZrr, VMINCSDrr)
+ENTRY(VMINPDZ128rm, VMINPDrm)
+ENTRY(VMINPDZ128rr, VMINPDrr)
+ENTRY(VMINSDZrm_Int, VMINSDrm_Int)
+ENTRY(VMINSDZrr_Int, VMINSDrr_Int)
+ENTRY(VMOVAPDZ128mr, VMOVAPDmr)
+ENTRY(VMOVAPDZ128rm, VMOVAPDrm)
+ENTRY(VMOVAPDZ128rr, VMOVAPDrr)
+ENTRY(VMOVDDUPZ128rm, VMOVDDUPrm)
+ENTRY(VMOVDDUPZ128rr, VMOVDDUPrr)
+ENTRY(VMOVDQA64Z128mr, VMOVDQAmr)
+ENTRY(VMOVDQA64Z128rm, VMOVDQArm)
+ENTRY(VMOVDQA64Z128rr, VMOVDQArr)
+ENTRY(VMOVDQU64Z128mr, VMOVDQUmr)
+ENTRY(VMOVDQU64Z128rm, VMOVDQUrm)
+ENTRY(VMOVDQU64Z128rr, VMOVDQUrr)
+ENTRY(VMOVHPDZ128mr, VMOVHPDmr)
+ENTRY(VMOVHPDZ128rm, VMOVHPDrm)
+ENTRY(VMOVLPDZ128mr, VMOVLPDmr)
+ENTRY(VMOVLPDZ128rm, VMOVLPDrm)
+ENTRY(VMOVNTPDZ128mr, VMOVNTPDmr)
+ENTRY(VMOVPQI2QIZmr, VMOVPQI2QImr)
+ENTRY(VMOVPQI2QIZrr, VMOVPQI2QIrr)
+ENTRY(VMOVQI2PQIZrm, VMOVQI2PQIrm)
+ENTRY(VMOVSDZmr, VMOVSDmr)
+ENTRY(VMOVSDZrm, VMOVSDrm)
+ENTRY(VMOVSDZrm_alt, VMOVSDrm_alt)
+ENTRY(VMOVSDZrr, VMOVSDrr)
+ENTRY(VMOVUPDZ128mr, VMOVUPDmr)
+ENTRY(VMOVUPDZ128rm, VMOVUPDrm)
+ENTRY(VMOVUPDZ128rr, VMOVUPDrr)
+ENTRY(VMOVZPQILo2PQIZrr, VMOVZPQILo2PQIrr)
+ENTRY(VMULPDZ128rm, VMULPDrm)
+ENTRY(VMULPDZ128rr, VMULPDrr)
+ENTRY(VMULSDZrm, VMULSDrm)
+ENTRY(VMULSDZrm_Int, VMULSDrm_Int)
+ENTRY(VMULSDZrr, VMULSDrr)
+ENTRY(VMULSDZrr_Int, VMULSDrr_Int)
+ENTRY(VORPDZ128rm, VORPDrm)
+ENTRY(VORPDZ128rr, VORPDrr)
+ENTRY(VPADDQZ128rm, VPADDQrm)
+ENTRY(VPADDQZ128rr, VPADDQrr)
+ENTRY(VPANDNQZ128rm, VPANDNrm)
+ENTRY(VPANDNQZ128rr, VPANDNrr)
+ENTRY(VPANDQZ128rm, VPANDrm)
+ENTRY(VPANDQZ128rr, VPANDrr)
+ENTRY(VPERMILPDZ128mi, VPERMILPDmi)
+ENTRY(VPERMILPDZ128ri, VPERMILPDri)
+ENTRY(VPERMILPDZ128rm, VPERMILPDrm)
+ENTRY(VPERMILPDZ128rr, VPERMILPDrr)
+ENTRY(VPMULDQZ128rm, VPMULDQrm)
+ENTRY(VPMULDQZ128rr, VPMULDQrr)
+ENTRY(VPMULUDQZ128rm, VPMULUDQrm)
+ENTRY(VPMULUDQZ128rr, VPMULUDQrr)
+ENTRY(VPORQZ128rm, VPORrm)
+ENTRY(VPORQZ128rr, VPORrr)
+ENTRY(VPSLLQZ128ri, VPSLLQri)
+ENTRY(VPSLLQZ128rm, VPSLLQrm)
+ENTRY(VPSLLQZ128rr, VPSLLQrr)
+ENTRY(VPSRLQZ128ri, VPSRLQri)
+ENTRY(VPSRLQZ128rm, VPSRLQrm)
+ENTRY(VPSRLQZ128rr, VPSRLQrr)
+ENTRY(VPSUBQZ128rm, VPSUBQrm)
+ENTRY(VPSUBQZ128rr, VPSUBQrr)
+ENTRY(VPUNPCKHQDQZ128rm, VPUNPCKHQDQrm)
+ENTRY(VPUNPCKHQDQZ128rr, VPUNPCKHQDQrr)
+ENTRY(VPUNPCKLQDQZ128rm, VPUNPCKLQDQrm)
+ENTRY(VPUNPCKLQDQZ128rr, VPUNPCKLQDQrr)
+ENTRY(VPXORQZ128rm, VPXORrm)
+ENTRY(VPXORQZ128rr, VPXORrr)
+ENTRY(VRNDSCALEPDZ128rmi, VROUNDPDm)
+ENTRY(VRNDSCALEPDZ128rri, VROUNDPDr)
+ENTRY(VRNDSCALESDZm, VROUNDSDm)
+ENTRY(VRNDSCALESDZm_Int, VROUNDSDm_Int)
+ENTRY(VRNDSCALESDZr, VROUNDSDr)
+ENTRY(VRNDSCALESDZr_Int, VROUNDSDr_Int)
+ENTRY(VSHUFPDZ128rmi, VSHUFPDrmi)
+ENTRY(VSHUFPDZ128rri, VSHUFPDrri)
+ENTRY(VSQRTPDZ128m, VSQRTPDm)
+ENTRY(VSQRTPDZ128r, VSQRTPDr)
+ENTRY(VSQRTSDZm, VSQRTSDm)
+ENTRY(VSQRTSDZm_Int, VSQRTSDm_Int)
+ENTRY(VSQRTSDZr, VSQRTSDr)
+ENTRY(VSQRTSDZr_Int, VSQRTSDr_Int)
+ENTRY(VSUBPDZ128rm, VSUBPDrm)
+ENTRY(VSUBPDZ128rr, VSUBPDrr)
+ENTRY(VSUBSDZrm, VSUBSDrm)
+ENTRY(VSUBSDZrm_Int, VSUBSDrm_Int)
+ENTRY(VSUBSDZrr, VSUBSDrr)
+ENTRY(VSUBSDZrr_Int, VSUBSDrr_Int)
+ENTRY(VUCOMISDZrm, VUCOMISDrm)
+ENTRY(VUCOMISDZrm_Int, VUCOMISDrm_Int)
+ENTRY(VUCOMISDZrr, VUCOMISDrr)
+ENTRY(VUCOMISDZrr_Int, VUCOMISDrr_Int)
+ENTRY(VUNPCKHPDZ128rm, VUNPCKHPDrm)
+ENTRY(VUNPCKHPDZ128rr, VUNPCKHPDrr)
+ENTRY(VUNPCKLPDZ128rm, VUNPCKLPDrm)
+ENTRY(VUNPCKLPDZ128rr, VUNPCKLPDrr)
+ENTRY(VXORPDZ128rm, VXORPDrm)
+ENTRY(VXORPDZ128rr, VXORPDrr)
+ENTRY(VADDPDZ256rm, VADDPDYrm)
+ENTRY(VADDPDZ256rr, VADDPDYrr)
+ENTRY(VANDNPDZ256rm, VANDNPDYrm)
+ENTRY(VANDNPDZ256rr, VANDNPDYrr)
+ENTRY(VANDPDZ256rm, VANDPDYrm)
+ENTRY(VANDPDZ256rr, VANDPDYrr)
+ENTRY(VCVTPD2DQZ256rm, VCVTPD2DQYrm)
+ENTRY(VCVTPD2DQZ256rr, VCVTPD2DQYrr)
+ENTRY(VCVTPD2PSZ256rm, VCVTPD2PSYrm)
+ENTRY(VCVTPD2PSZ256rr, VCVTPD2PSYrr)
+ENTRY(VCVTTPD2DQZ256rm, VCVTTPD2DQYrm)
+ENTRY(VCVTTPD2DQZ256rr, VCVTTPD2DQYrr)
+ENTRY(VDIVPDZ256rm, VDIVPDYrm)
+ENTRY(VDIVPDZ256rr, VDIVPDYrr)
+ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF128mr)
+ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF128rr)
+ENTRY(VEXTRACTI64x2Z256mr, VEXTRACTI128mr)
+ENTRY(VEXTRACTI64x2Z256rr, VEXTRACTI128rr)
+ENTRY(VINSERTF64x2Z256rm, VINSERTF128rm)
+ENTRY(VINSERTF64x2Z256rr, VINSERTF128rr)
+ENTRY(VINSERTI64x2Z256rm, VINSERTI128rm)
+ENTRY(VINSERTI64x2Z256rr, VINSERTI128rr)
+ENTRY(VMAXCPDZ256rm, VMAXCPDYrm)
+ENTRY(VMAXCPDZ256rr, VMAXCPDYrr)
+ENTRY(VMAXPDZ256rm, VMAXPDYrm)
+ENTRY(VMAXPDZ256rr, VMAXPDYrr)
+ENTRY(VMINCPDZ256rm, VMINCPDYrm)
+ENTRY(VMINCPDZ256rr, VMINCPDYrr)
+ENTRY(VMINPDZ256rm, VMINPDYrm)
+ENTRY(VMINPDZ256rr, VMINPDYrr)
+ENTRY(VMOVAPDZ256mr, VMOVAPDYmr)
+ENTRY(VMOVAPDZ256rm, VMOVAPDYrm)
+ENTRY(VMOVAPDZ256rr, VMOVAPDYrr)
+ENTRY(VMOVDDUPZ256rm, VMOVDDUPYrm)
+ENTRY(VMOVDDUPZ256rr, VMOVDDUPYrr)
+ENTRY(VMOVDQA64Z256mr, VMOVDQAYmr)
+ENTRY(VMOVDQA64Z256rm, VMOVDQAYrm)
+ENTRY(VMOVDQA64Z256rr, VMOVDQAYrr)
+ENTRY(VMOVDQU64Z256mr, VMOVDQUYmr)
+ENTRY(VMOVDQU64Z256rm, VMOVDQUYrm)
+ENTRY(VMOVDQU64Z256rr, VMOVDQUYrr)
+ENTRY(VMOVNTPDZ256mr, VMOVNTPDYmr)
+ENTRY(VMOVUPDZ256mr, VMOVUPDYmr)
+ENTRY(VMOVUPDZ256rm, VMOVUPDYrm)
+ENTRY(VMOVUPDZ256rr, VMOVUPDYrr)
+ENTRY(VMULPDZ256rm, VMULPDYrm)
+ENTRY(VMULPDZ256rr, VMULPDYrr)
+ENTRY(VORPDZ256rm, VORPDYrm)
+ENTRY(VORPDZ256rr, VORPDYrr)
+ENTRY(VPADDQZ256rm, VPADDQYrm)
+ENTRY(VPADDQZ256rr, VPADDQYrr)
+ENTRY(VPANDNQZ256rm, VPANDNYrm)
+ENTRY(VPANDNQZ256rr, VPANDNYrr)
+ENTRY(VPANDQZ256rm, VPANDYrm)
+ENTRY(VPANDQZ256rr, VPANDYrr)
+ENTRY(VPERMILPDZ256mi, VPERMILPDYmi)
+ENTRY(VPERMILPDZ256ri, VPERMILPDYri)
+ENTRY(VPERMILPDZ256rm, VPERMILPDYrm)
+ENTRY(VPERMILPDZ256rr, VPERMILPDYrr)
+ENTRY(VPMULDQZ256rm, VPMULDQYrm)
+ENTRY(VPMULDQZ256rr, VPMULDQYrr)
+ENTRY(VPMULUDQZ256rm, VPMULUDQYrm)
+ENTRY(VPMULUDQZ256rr, VPMULUDQYrr)
+ENTRY(VPORQZ256rm, VPORYrm)
+ENTRY(VPORQZ256rr, VPORYrr)
+ENTRY(VPSLLQZ256ri, VPSLLQYri)
+ENTRY(VPSLLQZ256rm, VPSLLQYrm)
+ENTRY(VPSLLQZ256rr, VPSLLQYrr)
+ENTRY(VPSRLQZ256ri, VPSRLQYri)
+ENTRY(VPSRLQZ256rm, VPSRLQYrm)
+ENTRY(VPSRLQZ256rr, VPSRLQYrr)
+ENTRY(VPSUBQZ256rm, VPSUBQYrm)
+ENTRY(VPSUBQZ256rr, VPSUBQYrr)
+ENTRY(VPUNPCKHQDQZ256rm, VPUNPCKHQDQYrm)
+ENTRY(VPUNPCKHQDQZ256rr, VPUNPCKHQDQYrr)
+ENTRY(VPUNPCKLQDQZ256rm, VPUNPCKLQDQYrm)
+ENTRY(VPUNPCKLQDQZ256rr, VPUNPCKLQDQYrr)
+ENTRY(VPXORQZ256rm, VPXORYrm)
+ENTRY(VPXORQZ256rr, VPXORYrr)
+ENTRY(VRNDSCALEPDZ256rmi, VROUNDPDYm)
+ENTRY(VRNDSCALEPDZ256rri, VROUNDPDYr)
+ENTRY(VSHUFPDZ256rmi, VSHUFPDYrmi)
+ENTRY(VSHUFPDZ256rri, VSHUFPDYrri)
+ENTRY(VSQRTPDZ256m, VSQRTPDYm)
+ENTRY(VSQRTPDZ256r, VSQRTPDYr)
+ENTRY(VSUBPDZ256rm, VSUBPDYrm)
+ENTRY(VSUBPDZ256rr, VSUBPDYrr)
+ENTRY(VUNPCKHPDZ256rm, VUNPCKHPDYrm)
+ENTRY(VUNPCKHPDZ256rr, VUNPCKHPDYrr)
+ENTRY(VUNPCKLPDZ256rm, VUNPCKLPDYrm)
+ENTRY(VUNPCKLPDZ256rr, VUNPCKLPDYrr)
+ENTRY(VXORPDZ256rm, VXORPDYrm)
+ENTRY(VXORPDZ256rr, VXORPDYrr)
+ENTRY(VPBROADCASTQZ128rm, VPBROADCASTQrm)
+ENTRY(VPBROADCASTQZ128rr, VPBROADCASTQrr)
+ENTRY(VBROADCASTF64X2Z128rm, VBROADCASTF128rm)
+ENTRY(VBROADCASTI64X2Z128rm, VBROADCASTI128rm)
+ENTRY(VBROADCASTSDZ256rm, VBROADCASTSDYrm)
+ENTRY(VBROADCASTSDZ256rr, VBROADCASTSDYrr)
+ENTRY(VPBROADCASTQZ256rm, VPBROADCASTQYrm)
+ENTRY(VPBROADCASTQZ256rr, VPBROADCASTQYrr)
+#undef ENTRY