summaryrefslogtreecommitdiff
path: root/include/llvm/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/CodeGen')
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h2
-rw-r--r--include/llvm/CodeGen/MachineValueType.h234
-rw-r--r--include/llvm/CodeGen/Passes.h51
-rw-r--r--include/llvm/CodeGen/StackProtector.h12
-rw-r--r--include/llvm/CodeGen/ValueTypes.td220
5 files changed, 247 insertions, 272 deletions
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index f5b1f87720ad3..181cb375de866 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -189,7 +189,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
void pruneValue(LiveRange &LR, SlotIndex Kill,
SmallVectorImpl<SlotIndex> *EndPoints);
- /// This function should be used. Its intend is to tell you that
+ /// This function should not be used. Its intend is to tell you that
/// you are doing something wrong if you call pruveValue directly on a
/// LiveInterval. Indeed, you are supposed to call pruneValue on the main
/// LiveRange and all the LiveRange of the subranges if any.
diff --git a/include/llvm/CodeGen/MachineValueType.h b/include/llvm/CodeGen/MachineValueType.h
index a90fe96227b99..e92bb7f749672 100644
--- a/include/llvm/CodeGen/MachineValueType.h
+++ b/include/llvm/CodeGen/MachineValueType.h
@@ -56,117 +56,119 @@ class MVT {
FIRST_FP_VALUETYPE = f16,
LAST_FP_VALUETYPE = ppcf128,
- v2i1 = 14, // 2 x i1
- v4i1 = 15, // 4 x i1
- v8i1 = 16, // 8 x i1
- v16i1 = 17, // 16 x i1
- v32i1 = 18, // 32 x i1
- v64i1 = 19, // 64 x i1
- v512i1 = 20, // 512 x i1
- v1024i1 = 21, // 1024 x i1
-
- v1i8 = 22, // 1 x i8
- v2i8 = 23, // 2 x i8
- v4i8 = 24, // 4 x i8
- v8i8 = 25, // 8 x i8
- v16i8 = 26, // 16 x i8
- v32i8 = 27, // 32 x i8
- v64i8 = 28, // 64 x i8
- v128i8 = 29, //128 x i8
- v256i8 = 30, //256 x i8
-
- v1i16 = 31, // 1 x i16
- v2i16 = 32, // 2 x i16
- v4i16 = 33, // 4 x i16
- v8i16 = 34, // 8 x i16
- v16i16 = 35, // 16 x i16
- v32i16 = 36, // 32 x i16
- v64i16 = 37, // 64 x i16
- v128i16 = 38, //128 x i16
-
- v1i32 = 39, // 1 x i32
- v2i32 = 40, // 2 x i32
- v4i32 = 41, // 4 x i32
- v8i32 = 42, // 8 x i32
- v16i32 = 43, // 16 x i32
- v32i32 = 44, // 32 x i32
- v64i32 = 45, // 64 x i32
-
- v1i64 = 46, // 1 x i64
- v2i64 = 47, // 2 x i64
- v4i64 = 48, // 4 x i64
- v8i64 = 49, // 8 x i64
- v16i64 = 50, // 16 x i64
- v32i64 = 51, // 32 x i64
-
- v1i128 = 52, // 1 x i128
+ v1i1 = 14, // 1 x i1
+ v2i1 = 15, // 2 x i1
+ v4i1 = 16, // 4 x i1
+ v8i1 = 17, // 8 x i1
+ v16i1 = 18, // 16 x i1
+ v32i1 = 19, // 32 x i1
+ v64i1 = 20, // 64 x i1
+ v512i1 = 21, // 512 x i1
+ v1024i1 = 22, // 1024 x i1
+
+ v1i8 = 23, // 1 x i8
+ v2i8 = 24, // 2 x i8
+ v4i8 = 25, // 4 x i8
+ v8i8 = 26, // 8 x i8
+ v16i8 = 27, // 16 x i8
+ v32i8 = 28, // 32 x i8
+ v64i8 = 29, // 64 x i8
+ v128i8 = 30, //128 x i8
+ v256i8 = 31, //256 x i8
+
+ v1i16 = 32, // 1 x i16
+ v2i16 = 33, // 2 x i16
+ v4i16 = 34, // 4 x i16
+ v8i16 = 35, // 8 x i16
+ v16i16 = 36, // 16 x i16
+ v32i16 = 37, // 32 x i16
+ v64i16 = 38, // 64 x i16
+ v128i16 = 39, //128 x i16
+
+ v1i32 = 40, // 1 x i32
+ v2i32 = 41, // 2 x i32
+ v4i32 = 42, // 4 x i32
+ v8i32 = 43, // 8 x i32
+ v16i32 = 44, // 16 x i32
+ v32i32 = 45, // 32 x i32
+ v64i32 = 46, // 64 x i32
+
+ v1i64 = 47, // 1 x i64
+ v2i64 = 48, // 2 x i64
+ v4i64 = 49, // 4 x i64
+ v8i64 = 50, // 8 x i64
+ v16i64 = 51, // 16 x i64
+ v32i64 = 52, // 32 x i64
+
+ v1i128 = 53, // 1 x i128
// Scalable integer types
- nxv2i1 = 53, // n x 2 x i1
- nxv4i1 = 54, // n x 4 x i1
- nxv8i1 = 55, // n x 8 x i1
- nxv16i1 = 56, // n x 16 x i1
- nxv32i1 = 57, // n x 32 x i1
-
- nxv1i8 = 58, // n x 1 x i8
- nxv2i8 = 59, // n x 2 x i8
- nxv4i8 = 60, // n x 4 x i8
- nxv8i8 = 61, // n x 8 x i8
- nxv16i8 = 62, // n x 16 x i8
- nxv32i8 = 63, // n x 32 x i8
-
- nxv1i16 = 64, // n x 1 x i16
- nxv2i16 = 65, // n x 2 x i16
- nxv4i16 = 66, // n x 4 x i16
- nxv8i16 = 67, // n x 8 x i16
- nxv16i16 = 68, // n x 16 x i16
- nxv32i16 = 69, // n x 32 x i16
-
- nxv1i32 = 70, // n x 1 x i32
- nxv2i32 = 71, // n x 2 x i32
- nxv4i32 = 72, // n x 4 x i32
- nxv8i32 = 73, // n x 8 x i32
- nxv16i32 = 74, // n x 16 x i32
- nxv32i32 = 75, // n x 32 x i32
-
- nxv1i64 = 76, // n x 1 x i64
- nxv2i64 = 77, // n x 2 x i64
- nxv4i64 = 78, // n x 4 x i64
- nxv8i64 = 79, // n x 8 x i64
- nxv16i64 = 80, // n x 16 x i64
- nxv32i64 = 81, // n x 32 x i64
-
- FIRST_INTEGER_VECTOR_VALUETYPE = v2i1,
+ nxv1i1 = 54, // n x 1 x i1
+ nxv2i1 = 55, // n x 2 x i1
+ nxv4i1 = 56, // n x 4 x i1
+ nxv8i1 = 57, // n x 8 x i1
+ nxv16i1 = 58, // n x 16 x i1
+ nxv32i1 = 59, // n x 32 x i1
+
+ nxv1i8 = 60, // n x 1 x i8
+ nxv2i8 = 61, // n x 2 x i8
+ nxv4i8 = 62, // n x 4 x i8
+ nxv8i8 = 63, // n x 8 x i8
+ nxv16i8 = 64, // n x 16 x i8
+ nxv32i8 = 65, // n x 32 x i8
+
+ nxv1i16 = 66, // n x 1 x i16
+ nxv2i16 = 67, // n x 2 x i16
+ nxv4i16 = 68, // n x 4 x i16
+ nxv8i16 = 69, // n x 8 x i16
+ nxv16i16 = 70, // n x 16 x i16
+ nxv32i16 = 71, // n x 32 x i16
+
+ nxv1i32 = 72, // n x 1 x i32
+ nxv2i32 = 73, // n x 2 x i32
+ nxv4i32 = 74, // n x 4 x i32
+ nxv8i32 = 75, // n x 8 x i32
+ nxv16i32 = 76, // n x 16 x i32
+ nxv32i32 = 77, // n x 32 x i32
+
+ nxv1i64 = 78, // n x 1 x i64
+ nxv2i64 = 79, // n x 2 x i64
+ nxv4i64 = 80, // n x 4 x i64
+ nxv8i64 = 81, // n x 8 x i64
+ nxv16i64 = 82, // n x 16 x i64
+ nxv32i64 = 83, // n x 32 x i64
+
+ FIRST_INTEGER_VECTOR_VALUETYPE = v1i1,
LAST_INTEGER_VECTOR_VALUETYPE = nxv32i64,
- FIRST_INTEGER_SCALABLE_VALUETYPE = nxv2i1,
+ FIRST_INTEGER_SCALABLE_VALUETYPE = nxv1i1,
LAST_INTEGER_SCALABLE_VALUETYPE = nxv32i64,
- v2f16 = 82, // 2 x f16
- v4f16 = 83, // 4 x f16
- v8f16 = 84, // 8 x f16
- v1f32 = 85, // 1 x f32
- v2f32 = 86, // 2 x f32
- v4f32 = 87, // 4 x f32
- v8f32 = 88, // 8 x f32
- v16f32 = 89, // 16 x f32
- v1f64 = 90, // 1 x f64
- v2f64 = 91, // 2 x f64
- v4f64 = 92, // 4 x f64
- v8f64 = 93, // 8 x f64
-
- nxv2f16 = 94, // n x 2 x f16
- nxv4f16 = 95, // n x 4 x f16
- nxv8f16 = 96, // n x 8 x f16
- nxv1f32 = 97, // n x 1 x f32
- nxv2f32 = 98, // n x 2 x f32
- nxv4f32 = 99, // n x 4 x f32
- nxv8f32 = 100, // n x 8 x f32
- nxv16f32 = 101, // n x 16 x f32
- nxv1f64 = 102, // n x 1 x f64
- nxv2f64 = 103, // n x 2 x f64
- nxv4f64 = 104, // n x 4 x f64
- nxv8f64 = 105, // n x 8 x f64
+ v2f16 = 84, // 2 x f16
+ v4f16 = 85, // 4 x f16
+ v8f16 = 86, // 8 x f16
+ v1f32 = 87, // 1 x f32
+ v2f32 = 88, // 2 x f32
+ v4f32 = 89, // 4 x f32
+ v8f32 = 90, // 8 x f32
+ v16f32 = 91, // 16 x f32
+ v1f64 = 92, // 1 x f64
+ v2f64 = 93, // 2 x f64
+ v4f64 = 94, // 4 x f64
+ v8f64 = 95, // 8 x f64
+
+ nxv2f16 = 96, // n x 2 x f16
+ nxv4f16 = 97, // n x 4 x f16
+ nxv8f16 = 98, // n x 8 x f16
+ nxv1f32 = 99, // n x 1 x f32
+ nxv2f32 = 100, // n x 2 x f32
+ nxv4f32 = 101, // n x 4 x f32
+ nxv8f32 = 102, // n x 8 x f32
+ nxv16f32 = 103, // n x 16 x f32
+ nxv1f64 = 104, // n x 1 x f64
+ nxv2f64 = 105, // n x 2 x f64
+ nxv4f64 = 106, // n x 4 x f64
+ nxv8f64 = 107, // n x 8 x f64
FIRST_FP_VECTOR_VALUETYPE = v2f16,
LAST_FP_VECTOR_VALUETYPE = nxv8f64,
@@ -174,21 +176,21 @@ class MVT {
FIRST_FP_SCALABLE_VALUETYPE = nxv2f16,
LAST_FP_SCALABLE_VALUETYPE = nxv8f64,
- FIRST_VECTOR_VALUETYPE = v2i1,
+ FIRST_VECTOR_VALUETYPE = v1i1,
LAST_VECTOR_VALUETYPE = nxv8f64,
- x86mmx = 106, // This is an X86 MMX value
+ x86mmx = 108, // This is an X86 MMX value
- Glue = 107, // This glues nodes together during pre-RA sched
+ Glue = 109, // This glues nodes together during pre-RA sched
- isVoid = 108, // This has no value
+ isVoid = 110, // This has no value
- Untyped = 109, // This value takes a register, but has
+ Untyped = 111, // This value takes a register, but has
// unspecified type. The register class
// will be determined by the opcode.
FIRST_VALUETYPE = 1, // This is always the beginning of the list.
- LAST_VALUETYPE = 110, // This always remains at the end of the list.
+ LAST_VALUETYPE = 112, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
@@ -411,6 +413,7 @@ class MVT {
switch (SimpleTy) {
default:
llvm_unreachable("Not a vector MVT!");
+ case v1i1:
case v2i1:
case v4i1:
case v8i1:
@@ -419,6 +422,7 @@ class MVT {
case v64i1:
case v512i1:
case v1024i1:
+ case nxv1i1:
case nxv2i1:
case nxv4i1:
case nxv8i1:
@@ -589,6 +593,7 @@ class MVT {
case nxv2f16:
case nxv2f32:
case nxv2f64: return 2;
+ case v1i1:
case v1i8:
case v1i16:
case v1i32:
@@ -596,6 +601,7 @@ class MVT {
case v1i128:
case v1f32:
case v1f64:
+ case nxv1i1:
case nxv1i8:
case nxv1i16:
case nxv1i32:
@@ -628,7 +634,9 @@ class MVT {
"in codegen and has no size");
case Metadata:
llvm_unreachable("Value type is metadata.");
- case i1 : return 1;
+ case i1:
+ case v1i1:
+ case nxv1i1: return 1;
case v2i1:
case nxv2i1: return 2;
case v4i1:
@@ -814,6 +822,7 @@ class MVT {
default:
break;
case MVT::i1:
+ if (NumElements == 1) return MVT::v1i1;
if (NumElements == 2) return MVT::v2i1;
if (NumElements == 4) return MVT::v4i1;
if (NumElements == 8) return MVT::v8i1;
@@ -891,6 +900,7 @@ class MVT {
default:
break;
case MVT::i1:
+ if (NumElements == 1) return MVT::nxv1i1;
if (NumElements == 2) return MVT::nxv2i1;
if (NumElements == 4) return MVT::nxv4i1;
if (NumElements == 8) return MVT::nxv8i1;
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
index 8a5a1997386f2..f3e04cffcda69 100644
--- a/include/llvm/CodeGen/Passes.h
+++ b/include/llvm/CodeGen/Passes.h
@@ -33,7 +33,7 @@ class raw_ostream;
/// List of target independent CodeGen pass IDs.
namespace llvm {
- FunctionPass *createAtomicExpandPass(const TargetMachine *TM);
+ FunctionPass *createAtomicExpandPass();
/// createUnreachableBlockEliminationPass - The LLVM code generator does not
/// work well with unreachable basic blocks (what live ranges make sense for a
@@ -66,7 +66,7 @@ namespace llvm {
/// createCodeGenPreparePass - Transform the code to expose more pattern
/// matching during instruction selection.
- FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
+ FunctionPass *createCodeGenPreparePass();
/// createScalarizeMaskedMemIntrinPass - Replace masked load, store, gather
/// and scatter intrinsics with scalar code when target doesn't support them.
@@ -133,10 +133,6 @@ namespace llvm {
// instruction and update the MachineFunctionInfo with that information.
extern char &ShrinkWrapID;
- /// LiveRangeShrink pass. Move instruction close to its definition to shrink
- /// the definition's live range.
- extern char &LiveRangeShrinkID;
-
/// Greedy register allocator.
extern char &RAGreedyID;
@@ -177,7 +173,7 @@ namespace llvm {
/// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
/// and eliminates abstract frame references.
extern char &PrologEpilogCodeInserterID;
- MachineFunctionPass *createPrologEpilogInserterPass(const TargetMachine *TM);
+ MachineFunctionPass *createPrologEpilogInserterPass();
/// ExpandPostRAPseudos - This pass expands pseudo instructions after
/// register allocation.
@@ -305,7 +301,7 @@ namespace llvm {
/// createStackProtectorPass - This pass adds stack protectors to functions.
///
- FunctionPass *createStackProtectorPass(const TargetMachine *TM);
+ FunctionPass *createStackProtectorPass();
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
@@ -314,11 +310,11 @@ namespace llvm {
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.
- FunctionPass *createDwarfEHPass(const TargetMachine *TM);
+ FunctionPass *createDwarfEHPass();
/// createWinEHPass - Prepares personality functions used by MSVC on Windows,
/// in addition to the Itanium LSDA based personalities.
- FunctionPass *createWinEHPass(const TargetMachine *TM);
+ FunctionPass *createWinEHPass();
/// createSjLjEHPreparePass - This pass adapts exception handling code to use
/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
@@ -362,12 +358,12 @@ namespace llvm {
/// InterleavedAccess Pass - This pass identifies and matches interleaved
/// memory accesses to target specific intrinsics.
///
- FunctionPass *createInterleavedAccessPass(const TargetMachine *TM);
+ FunctionPass *createInterleavedAccessPass();
/// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
/// TLS variables for the emulated TLS model.
///
- ModulePass *createLowerEmuTLSPass(const TargetMachine *TM);
+ ModulePass *createLowerEmuTLSPass();
/// This pass lowers the @llvm.load.relative intrinsic to instructions.
/// This is unsafe to do earlier because a pass may combine the constant
@@ -384,7 +380,7 @@ namespace llvm {
/// This pass splits the stack into a safe stack and an unsafe stack to
/// protect against stack-based overflow vulnerabilities.
- FunctionPass *createSafeStackPass(const TargetMachine *TM = nullptr);
+ FunctionPass *createSafeStackPass();
/// This pass detects subregister lanes in a virtual register that are used
/// independently of other lanes and splits them into separate virtual
@@ -419,33 +415,4 @@ namespace llvm {
} // End llvm namespace
-/// Target machine pass initializer for passes with dependencies. Use with
-/// INITIALIZE_TM_PASS_END.
-#define INITIALIZE_TM_PASS_BEGIN INITIALIZE_PASS_BEGIN
-
-/// Target machine pass initializer for passes with dependencies. Use with
-/// INITIALIZE_TM_PASS_BEGIN.
-#define INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis) \
- PassInfo *PI = new PassInfo( \
- name, arg, &passName::ID, \
- PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis, \
- PassInfo::TargetMachineCtor_t(callTargetMachineCtor<passName>)); \
- Registry.registerPass(*PI, true); \
- return PI; \
- } \
- static llvm::once_flag Initialize##passName##PassFlag; \
- void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
- llvm::call_once(Initialize##passName##PassFlag, \
- initialize##passName##PassOnce, std::ref(Registry)); \
- }
-
-/// This initializer registers TargetMachine constructor, so the pass being
-/// initialized can use target dependent interfaces. Please do not move this
-/// macro to be together with INITIALIZE_PASS, which is a complete target
-/// independent initializer, and we don't want to make libScalarOpts depend
-/// on libCodeGen.
-#define INITIALIZE_TM_PASS(passName, arg, name, cfg, analysis) \
- INITIALIZE_TM_PASS_BEGIN(passName, arg, name, cfg, analysis) \
- INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis)
-
#endif
diff --git a/include/llvm/CodeGen/StackProtector.h b/include/llvm/CodeGen/StackProtector.h
index 0655f19a323e4..b970de71f8628 100644
--- a/include/llvm/CodeGen/StackProtector.h
+++ b/include/llvm/CodeGen/StackProtector.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
@@ -55,7 +56,7 @@ private:
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// target type sizes.
const TargetLoweringBase *TLI = nullptr;
- const Triple Trip;
+ Triple Trip;
Function *F;
Module *M;
@@ -114,17 +115,12 @@ private:
public:
static char ID; // Pass identification, replacement for typeid.
- StackProtector() : FunctionPass(ID) {
- initializeStackProtectorPass(*PassRegistry::getPassRegistry());
- }
-
- StackProtector(const TargetMachine *TM)
- : FunctionPass(ID), TM(TM), Trip(TM->getTargetTriple()),
- SSPBufferSize(8) {
+ StackProtector() : FunctionPass(ID), SSPBufferSize(8) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetPassConfig>();
AU.addPreserved<DominatorTreeWrapperPass>();
}
diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td
index b87a5e56699eb..b1e62daa5aaeb 100644
--- a/include/llvm/CodeGen/ValueTypes.td
+++ b/include/llvm/CodeGen/ValueTypes.td
@@ -33,115 +33,117 @@ def f80 : ValueType<80 , 11>; // 80-bit floating point value
def f128 : ValueType<128, 12>; // 128-bit floating point value
def ppcf128: ValueType<128, 13>; // PPC 128-bit floating point value
-def v2i1 : ValueType<2 , 14>; // 2 x i1 vector value
-def v4i1 : ValueType<4 , 15>; // 4 x i1 vector value
-def v8i1 : ValueType<8 , 16>; // 8 x i1 vector value
-def v16i1 : ValueType<16, 17>; // 16 x i1 vector value
-def v32i1 : ValueType<32 , 18>; // 32 x i1 vector value
-def v64i1 : ValueType<64 , 19>; // 64 x i1 vector value
-def v512i1 : ValueType<512, 20>; // 512 x i1 vector value
-def v1024i1: ValueType<1024,21>; //1024 x i1 vector value
-
-def v1i8 : ValueType<8, 22>; // 1 x i8 vector value
-def v2i8 : ValueType<16 , 23>; // 2 x i8 vector value
-def v4i8 : ValueType<32 , 24>; // 4 x i8 vector value
-def v8i8 : ValueType<64 , 25>; // 8 x i8 vector value
-def v16i8 : ValueType<128, 26>; // 16 x i8 vector value
-def v32i8 : ValueType<256, 27>; // 32 x i8 vector value
-def v64i8 : ValueType<512, 28>; // 64 x i8 vector value
-def v128i8 : ValueType<1024,29>; //128 x i8 vector value
-def v256i8 : ValueType<2048,30>; //256 x i8 vector value
-
-def v1i16 : ValueType<16 , 31>; // 1 x i16 vector value
-def v2i16 : ValueType<32 , 32>; // 2 x i16 vector value
-def v4i16 : ValueType<64 , 33>; // 4 x i16 vector value
-def v8i16 : ValueType<128, 34>; // 8 x i16 vector value
-def v16i16 : ValueType<256, 35>; // 16 x i16 vector value
-def v32i16 : ValueType<512, 36>; // 32 x i16 vector value
-def v64i16 : ValueType<1024,37>; // 64 x i16 vector value
-def v128i16: ValueType<2048,38>; //128 x i16 vector value
-
-def v1i32 : ValueType<32 , 39>; // 1 x i32 vector value
-def v2i32 : ValueType<64 , 40>; // 2 x i32 vector value
-def v4i32 : ValueType<128, 41>; // 4 x i32 vector value
-def v8i32 : ValueType<256, 42>; // 8 x i32 vector value
-def v16i32 : ValueType<512, 43>; // 16 x i32 vector value
-def v32i32 : ValueType<1024,44>; // 32 x i32 vector value
-def v64i32 : ValueType<2048,45>; // 32 x i32 vector value
-
-def v1i64 : ValueType<64 , 46>; // 1 x i64 vector value
-def v2i64 : ValueType<128, 47>; // 2 x i64 vector value
-def v4i64 : ValueType<256, 48>; // 4 x i64 vector value
-def v8i64 : ValueType<512, 49>; // 8 x i64 vector value
-def v16i64 : ValueType<1024,50>; // 16 x i64 vector value
-def v32i64 : ValueType<2048,51>; // 32 x i64 vector value
-
-def v1i128 : ValueType<128, 52>; // 1 x i128 vector value
-
-def nxv2i1 : ValueType<2, 53>; // n x 2 x i1 vector value
-def nxv4i1 : ValueType<4, 54>; // n x 4 x i1 vector value
-def nxv8i1 : ValueType<8, 55>; // n x 8 x i1 vector value
-def nxv16i1 : ValueType<16, 56>; // n x 16 x i1 vector value
-def nxv32i1 : ValueType<32, 57>; // n x 32 x i1 vector value
-
-def nxv1i8 : ValueType<8, 58>; // n x 1 x i8 vector value
-def nxv2i8 : ValueType<16, 59>; // n x 2 x i8 vector value
-def nxv4i8 : ValueType<32, 60>; // n x 4 x i8 vector value
-def nxv8i8 : ValueType<64, 61>; // n x 8 x i8 vector value
-def nxv16i8 : ValueType<128, 62>; // n x 16 x i8 vector value
-def nxv32i8 : ValueType<256, 63>; // n x 32 x i8 vector value
-
-def nxv1i16 : ValueType<16, 64>; // n x 1 x i16 vector value
-def nxv2i16 : ValueType<32, 65>; // n x 2 x i16 vector value
-def nxv4i16 : ValueType<64, 66>; // n x 4 x i16 vector value
-def nxv8i16 : ValueType<128, 67>; // n x 8 x i16 vector value
-def nxv16i16: ValueType<256, 68>; // n x 16 x i16 vector value
-def nxv32i16: ValueType<512, 69>; // n x 32 x i16 vector value
-
-def nxv1i32 : ValueType<32, 70>; // n x 1 x i32 vector value
-def nxv2i32 : ValueType<64, 71>; // n x 2 x i32 vector value
-def nxv4i32 : ValueType<128, 72>; // n x 4 x i32 vector value
-def nxv8i32 : ValueType<256, 73>; // n x 8 x i32 vector value
-def nxv16i32: ValueType<512, 74>; // n x 16 x i32 vector value
-def nxv32i32: ValueType<1024,75>; // n x 32 x i32 vector value
-
-def nxv1i64 : ValueType<64, 76>; // n x 1 x i64 vector value
-def nxv2i64 : ValueType<128, 77>; // n x 2 x i64 vector value
-def nxv4i64 : ValueType<256, 78>; // n x 4 x i64 vector value
-def nxv8i64 : ValueType<512, 79>; // n x 8 x i64 vector value
-def nxv16i64: ValueType<1024,80>; // n x 16 x i64 vector value
-def nxv32i64: ValueType<2048,81>; // n x 32 x i64 vector value
-
-def v2f16 : ValueType<32 , 82>; // 2 x f16 vector value
-def v4f16 : ValueType<64 , 83>; // 4 x f16 vector value
-def v8f16 : ValueType<128, 84>; // 8 x f16 vector value
-def v1f32 : ValueType<32 , 85>; // 1 x f32 vector value
-def v2f32 : ValueType<64 , 86>; // 2 x f32 vector value
-def v4f32 : ValueType<128, 87>; // 4 x f32 vector value
-def v8f32 : ValueType<256, 88>; // 8 x f32 vector value
-def v16f32 : ValueType<512, 89>; // 16 x f32 vector value
-def v1f64 : ValueType<64, 90>; // 1 x f64 vector value
-def v2f64 : ValueType<128, 91>; // 2 x f64 vector value
-def v4f64 : ValueType<256, 92>; // 4 x f64 vector value
-def v8f64 : ValueType<512, 93>; // 8 x f64 vector value
-
-def nxv2f16 : ValueType<32 , 94>; // n x 2 x f16 vector value
-def nxv4f16 : ValueType<64 , 95>; // n x 4 x f16 vector value
-def nxv8f16 : ValueType<128, 96>; // n x 8 x f16 vector value
-def nxv1f32 : ValueType<32 , 97>; // n x 1 x f32 vector value
-def nxv2f32 : ValueType<64 , 98>; // n x 2 x f32 vector value
-def nxv4f32 : ValueType<128, 99>; // n x 4 x f32 vector value
-def nxv8f32 : ValueType<256, 100>; // n x 8 x f32 vector value
-def nxv16f32 : ValueType<512, 101>; // n x 16 x f32 vector value
-def nxv1f64 : ValueType<64, 102>; // n x 1 x f64 vector value
-def nxv2f64 : ValueType<128, 103>; // n x 2 x f64 vector value
-def nxv4f64 : ValueType<256, 104>; // n x 4 x f64 vector value
-def nxv8f64 : ValueType<512, 105>; // n x 8 x f64 vector value
-
-def x86mmx : ValueType<64 , 106>; // X86 MMX value
-def FlagVT : ValueType<0 , 107>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 108>; // Produces no value
-def untyped: ValueType<8 , 109>; // Produces an untyped value
+def v1i1 : ValueType<1 , 14>; // 1 x i1 vector value
+def v2i1 : ValueType<2 , 15>; // 2 x i1 vector value
+def v4i1 : ValueType<4 , 16>; // 4 x i1 vector value
+def v8i1 : ValueType<8 , 17>; // 8 x i1 vector value
+def v16i1 : ValueType<16, 18>; // 16 x i1 vector value
+def v32i1 : ValueType<32 , 19>; // 32 x i1 vector value
+def v64i1 : ValueType<64 , 20>; // 64 x i1 vector value
+def v512i1 : ValueType<512, 21>; // 512 x i1 vector value
+def v1024i1: ValueType<1024,22>; //1024 x i1 vector value
+
+def v1i8 : ValueType<8, 23>; // 1 x i8 vector value
+def v2i8 : ValueType<16 , 24>; // 2 x i8 vector value
+def v4i8 : ValueType<32 , 25>; // 4 x i8 vector value
+def v8i8 : ValueType<64 , 26>; // 8 x i8 vector value
+def v16i8 : ValueType<128, 27>; // 16 x i8 vector value
+def v32i8 : ValueType<256, 28>; // 32 x i8 vector value
+def v64i8 : ValueType<512, 29>; // 64 x i8 vector value
+def v128i8 : ValueType<1024,30>; //128 x i8 vector value
+def v256i8 : ValueType<2048,31>; //256 x i8 vector value
+
+def v1i16 : ValueType<16 , 32>; // 1 x i16 vector value
+def v2i16 : ValueType<32 , 33>; // 2 x i16 vector value
+def v4i16 : ValueType<64 , 34>; // 4 x i16 vector value
+def v8i16 : ValueType<128, 35>; // 8 x i16 vector value
+def v16i16 : ValueType<256, 36>; // 16 x i16 vector value
+def v32i16 : ValueType<512, 37>; // 32 x i16 vector value
+def v64i16 : ValueType<1024,38>; // 64 x i16 vector value
+def v128i16: ValueType<2048,39>; //128 x i16 vector value
+
+def v1i32 : ValueType<32 , 40>; // 1 x i32 vector value
+def v2i32 : ValueType<64 , 41>; // 2 x i32 vector value
+def v4i32 : ValueType<128, 42>; // 4 x i32 vector value
+def v8i32 : ValueType<256, 43>; // 8 x i32 vector value
+def v16i32 : ValueType<512, 44>; // 16 x i32 vector value
+def v32i32 : ValueType<1024,45>; // 32 x i32 vector value
+def v64i32 : ValueType<2048,46>; // 32 x i32 vector value
+
+def v1i64 : ValueType<64 , 47>; // 1 x i64 vector value
+def v2i64 : ValueType<128, 48>; // 2 x i64 vector value
+def v4i64 : ValueType<256, 49>; // 4 x i64 vector value
+def v8i64 : ValueType<512, 50>; // 8 x i64 vector value
+def v16i64 : ValueType<1024,51>; // 16 x i64 vector value
+def v32i64 : ValueType<2048,52>; // 32 x i64 vector value
+
+def v1i128 : ValueType<128, 53>; // 1 x i128 vector value
+
+def nxv1i1 : ValueType<1, 54>; // n x 1 x i1 vector value
+def nxv2i1 : ValueType<2, 55>; // n x 2 x i1 vector value
+def nxv4i1 : ValueType<4, 56>; // n x 4 x i1 vector value
+def nxv8i1 : ValueType<8, 57>; // n x 8 x i1 vector value
+def nxv16i1 : ValueType<16, 58>; // n x 16 x i1 vector value
+def nxv32i1 : ValueType<32, 59>; // n x 32 x i1 vector value
+
+def nxv1i8 : ValueType<8, 60>; // n x 1 x i8 vector value
+def nxv2i8 : ValueType<16, 61>; // n x 2 x i8 vector value
+def nxv4i8 : ValueType<32, 62>; // n x 4 x i8 vector value
+def nxv8i8 : ValueType<64, 63>; // n x 8 x i8 vector value
+def nxv16i8 : ValueType<128, 64>; // n x 16 x i8 vector value
+def nxv32i8 : ValueType<256, 65>; // n x 32 x i8 vector value
+
+def nxv1i16 : ValueType<16, 66>; // n x 1 x i16 vector value
+def nxv2i16 : ValueType<32, 67>; // n x 2 x i16 vector value
+def nxv4i16 : ValueType<64, 68>; // n x 4 x i16 vector value
+def nxv8i16 : ValueType<128, 69>; // n x 8 x i16 vector value
+def nxv16i16: ValueType<256, 70>; // n x 16 x i16 vector value
+def nxv32i16: ValueType<512, 71>; // n x 32 x i16 vector value
+
+def nxv1i32 : ValueType<32, 72>; // n x 1 x i32 vector value
+def nxv2i32 : ValueType<64, 73>; // n x 2 x i32 vector value
+def nxv4i32 : ValueType<128, 74>; // n x 4 x i32 vector value
+def nxv8i32 : ValueType<256, 75>; // n x 8 x i32 vector value
+def nxv16i32: ValueType<512, 76>; // n x 16 x i32 vector value
+def nxv32i32: ValueType<1024,77>; // n x 32 x i32 vector value
+
+def nxv1i64 : ValueType<64, 78>; // n x 1 x i64 vector value
+def nxv2i64 : ValueType<128, 79>; // n x 2 x i64 vector value
+def nxv4i64 : ValueType<256, 80>; // n x 4 x i64 vector value
+def nxv8i64 : ValueType<512, 81>; // n x 8 x i64 vector value
+def nxv16i64: ValueType<1024,82>; // n x 16 x i64 vector value
+def nxv32i64: ValueType<2048,83>; // n x 32 x i64 vector value
+
+def v2f16 : ValueType<32 , 84>; // 2 x f16 vector value
+def v4f16 : ValueType<64 , 85>; // 4 x f16 vector value
+def v8f16 : ValueType<128, 86>; // 8 x f16 vector value
+def v1f32 : ValueType<32 , 87>; // 1 x f32 vector value
+def v2f32 : ValueType<64 , 88>; // 2 x f32 vector value
+def v4f32 : ValueType<128, 89>; // 4 x f32 vector value
+def v8f32 : ValueType<256, 90>; // 8 x f32 vector value
+def v16f32 : ValueType<512, 91>; // 16 x f32 vector value
+def v1f64 : ValueType<64, 92>; // 1 x f64 vector value
+def v2f64 : ValueType<128, 93>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 94>; // 4 x f64 vector value
+def v8f64 : ValueType<512, 95>; // 8 x f64 vector value
+
+def nxv2f16 : ValueType<32 , 96>; // n x 2 x f16 vector value
+def nxv4f16 : ValueType<64 , 97>; // n x 4 x f16 vector value
+def nxv8f16 : ValueType<128, 98>; // n x 8 x f16 vector value
+def nxv1f32 : ValueType<32 , 99>; // n x 1 x f32 vector value
+def nxv2f32 : ValueType<64 , 100>; // n x 2 x f32 vector value
+def nxv4f32 : ValueType<128, 101>; // n x 4 x f32 vector value
+def nxv8f32 : ValueType<256, 102>; // n x 8 x f32 vector value
+def nxv16f32 : ValueType<512, 103>; // n x 16 x f32 vector value
+def nxv1f64 : ValueType<64, 104>; // n x 1 x f64 vector value
+def nxv2f64 : ValueType<128, 105>; // n x 2 x f64 vector value
+def nxv4f64 : ValueType<256, 106>; // n x 4 x f64 vector value
+def nxv8f64 : ValueType<512, 107>; // n x 8 x f64 vector value
+
+def x86mmx : ValueType<64 , 108>; // X86 MMX value
+def FlagVT : ValueType<0 , 109>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 110>; // Produces no value
+def untyped: ValueType<8 , 111>; // Produces an untyped value
def token : ValueType<0 , 248>; // TokenTy
def MetadataVT: ValueType<0, 249>; // Metadata