summaryrefslogtreecommitdiff
path: root/llvm/include/llvm/IR
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/include/llvm/IR')
-rw-r--r--llvm/include/llvm/IR/AbstractCallSite.h247
-rw-r--r--llvm/include/llvm/IR/Argument.h11
-rw-r--r--llvm/include/llvm/IR/Attributes.h46
-rw-r--r--llvm/include/llvm/IR/Attributes.td34
-rw-r--r--llvm/include/llvm/IR/AutoUpgrade.h8
-rw-r--r--llvm/include/llvm/IR/BasicBlock.h107
-rw-r--r--llvm/include/llvm/IR/CFG.h43
-rw-r--r--llvm/include/llvm/IR/CFGDiff.h284
-rw-r--r--llvm/include/llvm/IR/CallSite.h926
-rw-r--r--llvm/include/llvm/IR/Constant.h2
-rw-r--r--llvm/include/llvm/IR/ConstantFolder.h107
-rw-r--r--llvm/include/llvm/IR/ConstantRange.h4
-rw-r--r--llvm/include/llvm/IR/Constants.h103
-rw-r--r--llvm/include/llvm/IR/ConstrainedOps.def105
-rw-r--r--llvm/include/llvm/IR/DIBuilder.h35
-rw-r--r--llvm/include/llvm/IR/DataLayout.h42
-rw-r--r--llvm/include/llvm/IR/DebugInfo.h18
-rw-r--r--llvm/include/llvm/IR/DebugInfoMetadata.h345
-rw-r--r--llvm/include/llvm/IR/DebugLoc.h2
-rw-r--r--llvm/include/llvm/IR/DerivedTypes.h275
-rw-r--r--llvm/include/llvm/IR/DiagnosticInfo.h46
-rw-r--r--llvm/include/llvm/IR/Dominators.h7
-rw-r--r--llvm/include/llvm/IR/FPEnv.h22
-rw-r--r--llvm/include/llvm/IR/Function.h28
-rw-r--r--llvm/include/llvm/IR/GetElementPtrTypeIterator.h12
-rw-r--r--llvm/include/llvm/IR/GlobalObject.h25
-rw-r--r--llvm/include/llvm/IR/GlobalValue.h31
-rw-r--r--llvm/include/llvm/IR/GlobalVariable.h1
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h1044
-rw-r--r--llvm/include/llvm/IR/IRBuilderFolder.h141
-rw-r--r--llvm/include/llvm/IR/IRPrintingPasses.h17
-rw-r--r--llvm/include/llvm/IR/InlineAsm.h91
-rw-r--r--llvm/include/llvm/IR/InstVisitor.h26
-rw-r--r--llvm/include/llvm/IR/InstrTypes.h170
-rw-r--r--llvm/include/llvm/IR/Instruction.h87
-rw-r--r--llvm/include/llvm/IR/Instructions.h655
-rw-r--r--llvm/include/llvm/IR/IntrinsicInst.h1685
-rw-r--r--llvm/include/llvm/IR/Intrinsics.h50
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td436
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td923
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td696
-rw-r--r--llvm/include/llvm/IR/IntrinsicsARM.td361
-rw-r--r--llvm/include/llvm/IR/IntrinsicsBPF.td5
-rw-r--r--llvm/include/llvm/IR/IntrinsicsHexagon.td6198
-rw-r--r--llvm/include/llvm/IR/IntrinsicsHexagonDep.td6144
-rw-r--r--llvm/include/llvm/IR/IntrinsicsMips.td268
-rw-r--r--llvm/include/llvm/IR/IntrinsicsNVVM.td26
-rw-r--r--llvm/include/llvm/IR/IntrinsicsPowerPC.td226
-rw-r--r--llvm/include/llvm/IR/IntrinsicsRISCV.td4
-rw-r--r--llvm/include/llvm/IR/IntrinsicsSystemZ.td126
-rw-r--r--llvm/include/llvm/IR/IntrinsicsWebAssembly.td63
-rw-r--r--llvm/include/llvm/IR/IntrinsicsX86.td801
-rw-r--r--llvm/include/llvm/IR/IntrinsicsXCore.td72
-rw-r--r--llvm/include/llvm/IR/LLVMContext.h50
-rw-r--r--llvm/include/llvm/IR/LLVMRemarkStreamer.h95
-rw-r--r--llvm/include/llvm/IR/LegacyPassManagers.h3
-rw-r--r--llvm/include/llvm/IR/LegacyPassNameParser.h41
-rw-r--r--llvm/include/llvm/IR/Mangler.h2
-rw-r--r--llvm/include/llvm/IR/MatrixBuilder.h221
-rw-r--r--llvm/include/llvm/IR/Metadata.h32
-rw-r--r--llvm/include/llvm/IR/Module.h42
-rw-r--r--llvm/include/llvm/IR/ModuleSummaryIndex.h150
-rw-r--r--llvm/include/llvm/IR/ModuleSummaryIndexYAML.h9
-rw-r--r--llvm/include/llvm/IR/NoFolder.h155
-rw-r--r--llvm/include/llvm/IR/Operator.h47
-rw-r--r--llvm/include/llvm/IR/PassInstrumentation.h2
-rw-r--r--llvm/include/llvm/IR/PassManager.h170
-rw-r--r--llvm/include/llvm/IR/PassManagerImpl.h157
-rw-r--r--llvm/include/llvm/IR/PassTimingInfo.h9
-rw-r--r--llvm/include/llvm/IR/PatternMatch.h325
-rw-r--r--llvm/include/llvm/IR/ProfileSummary.h31
-rw-r--r--llvm/include/llvm/IR/RemarkStreamer.h108
-rw-r--r--llvm/include/llvm/IR/RuntimeLibcalls.def5
-rw-r--r--llvm/include/llvm/IR/Statepoint.h309
-rw-r--r--llvm/include/llvm/IR/Type.h83
-rw-r--r--llvm/include/llvm/IR/Use.h69
-rw-r--r--llvm/include/llvm/IR/User.h5
-rw-r--r--llvm/include/llvm/IR/VPIntrinsics.def84
-rw-r--r--llvm/include/llvm/IR/Value.h62
-rw-r--r--llvm/include/llvm/IR/ValueHandle.h44
-rw-r--r--llvm/include/llvm/IR/ValueMap.h2
81 files changed, 13632 insertions, 11911 deletions
diff --git a/llvm/include/llvm/IR/AbstractCallSite.h b/llvm/include/llvm/IR/AbstractCallSite.h
new file mode 100644
index 000000000000..e8cf05001542
--- /dev/null
+++ b/llvm/include/llvm/IR/AbstractCallSite.h
@@ -0,0 +1,247 @@
+//===- AbstractCallSite.h - Abstract call sites -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AbstractCallSite class, which is a is a wrapper that
+// allows treating direct, indirect, and callback calls the same.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ABSTRACTCALLSITE_H
+#define LLVM_IR_ABSTRACTCALLSITE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+
+namespace llvm {
+
+/// AbstractCallSite
+///
+/// An abstract call site is a wrapper that allows to treat direct,
+/// indirect, and callback calls the same. If an abstract call site
+/// represents a direct or indirect call site it behaves like a stripped
+/// down version of a normal call site object. The abstract call site can
+/// also represent a callback call, thus the fact that the initially
+/// called function (=broker) may invoke a third one (=callback callee).
+/// In this case, the abstract call site hides the middle man, hence the
+/// broker function. The result is a representation of the callback call,
+/// inside the broker, but in the context of the original call to the broker.
+///
+/// There are up to three functions involved when we talk about callback call
+/// sites. The caller (1), which invokes the broker function. The broker
+/// function (2), that will invoke the callee zero or more times. And finally
+/// the callee (3), which is the target of the callback call.
+///
+/// The abstract call site will handle the mapping from parameters to arguments
+/// depending on the semantic of the broker function. However, it is important
+/// to note that the mapping is often partial. Thus, some arguments of the
+/// call/invoke instruction are mapped to parameters of the callee while others
+/// are not.
+class AbstractCallSite {
+public:
+
+ /// The encoding of a callback with regards to the underlying instruction.
+ struct CallbackInfo {
+
+ /// For direct/indirect calls the parameter encoding is empty. If it is not,
+ /// the abstract call site represents a callback. In that case, the first
+ /// element of the encoding vector represents which argument of the call
+ /// site CB is the callback callee. The remaining elements map parameters
+ /// (identified by their position) to the arguments that will be passed
+ /// through (also identified by position but in the call site instruction).
+ ///
+ /// NOTE that we use LLVM argument numbers (starting at 0) and not
+ /// clang/source argument numbers (starting at 1). The -1 entries represent
+ /// unknown values that are passed to the callee.
+ using ParameterEncodingTy = SmallVector<int, 0>;
+ ParameterEncodingTy ParameterEncoding;
+
+ };
+
+private:
+
+ /// The underlying call site:
+ /// caller -> callee, if this is a direct or indirect call site
+ /// caller -> broker function, if this is a callback call site
+ CallBase *CB;
+
+ /// The encoding of a callback with regards to the underlying instruction.
+ CallbackInfo CI;
+
+public:
+ /// Sole constructor for abstract call sites (ACS).
+ ///
+ /// An abstract call site can only be constructed through a llvm::Use because
+ /// each operand (=use) of an instruction could potentially be a different
+ /// abstract call site. Furthermore, even if the value of the llvm::Use is the
+ /// same, and the user is as well, the abstract call sites might not be.
+ ///
+ /// If a use is not associated with an abstract call site the constructed ACS
+ /// will evaluate to false if converted to a boolean.
+ ///
+ /// If the use is the callee use of a call or invoke instruction, the
+ /// constructed abstract call site will behave as a llvm::CallSite would.
+ ///
+ /// If the use is not a callee use of a call or invoke instruction, the
+ /// callback metadata is used to determine the argument <-> parameter mapping
+ /// as well as the callee of the abstract call site.
+ AbstractCallSite(const Use *U);
+
+ /// Add operand uses of \p CB that represent callback uses into
+ /// \p CallbackUses.
+ ///
+ /// All uses added to \p CallbackUses can be used to create abstract call
+ /// sites for which AbstractCallSite::isCallbackCall() will return true.
+ static void getCallbackUses(const CallBase &CB,
+ SmallVectorImpl<const Use *> &CallbackUses);
+
+ /// Conversion operator to conveniently check for a valid/initialized ACS.
+ explicit operator bool() const { return CB != nullptr; }
+
+ /// Return the underlying instruction.
+ CallBase *getInstruction() const { return CB; }
+
+ /// Return true if this ACS represents a direct call.
+ bool isDirectCall() const {
+ return !isCallbackCall() && !CB->isIndirectCall();
+ }
+
+ /// Return true if this ACS represents an indirect call.
+ bool isIndirectCall() const {
+ return !isCallbackCall() && CB->isIndirectCall();
+ }
+
+ /// Return true if this ACS represents a callback call.
+ bool isCallbackCall() const {
+ // For a callback call site the callee is ALWAYS stored first in the
+ // transitive values vector. Thus, a non-empty vector indicates a callback.
+ return !CI.ParameterEncoding.empty();
+ }
+
+ /// Return true if @p UI is the use that defines the callee of this ACS.
+ bool isCallee(Value::const_user_iterator UI) const {
+ return isCallee(&UI.getUse());
+ }
+
+ /// Return true if @p U is the use that defines the callee of this ACS.
+ bool isCallee(const Use *U) const {
+ if (isDirectCall())
+ return CB->isCallee(U);
+
+ assert(!CI.ParameterEncoding.empty() &&
+ "Callback without parameter encoding!");
+
+ // If the use is actually in a constant cast expression which itself
+ // has only one use, we look through the constant cast expression.
+ if (auto *CE = dyn_cast<ConstantExpr>(U->getUser()))
+ if (CE->hasOneUse() && CE->isCast())
+ U = &*CE->use_begin();
+
+ return (int)CB->getArgOperandNo(U) == CI.ParameterEncoding[0];
+ }
+
+ /// Return the number of parameters of the callee.
+ unsigned getNumArgOperands() const {
+ if (isDirectCall())
+ return CB->getNumArgOperands();
+ // Subtract 1 for the callee encoding.
+ return CI.ParameterEncoding.size() - 1;
+ }
+
+ /// Return the operand index of the underlying instruction associated with @p
+ /// Arg.
+ int getCallArgOperandNo(Argument &Arg) const {
+ return getCallArgOperandNo(Arg.getArgNo());
+ }
+
+ /// Return the operand index of the underlying instruction associated with
+ /// the function parameter number @p ArgNo or -1 if there is none.
+ int getCallArgOperandNo(unsigned ArgNo) const {
+ if (isDirectCall())
+ return ArgNo;
+ // Add 1 for the callee encoding.
+ return CI.ParameterEncoding[ArgNo + 1];
+ }
+
+ /// Return the operand of the underlying instruction associated with @p Arg.
+ Value *getCallArgOperand(Argument &Arg) const {
+ return getCallArgOperand(Arg.getArgNo());
+ }
+
+ /// Return the operand of the underlying instruction associated with the
+ /// function parameter number @p ArgNo or nullptr if there is none.
+ Value *getCallArgOperand(unsigned ArgNo) const {
+ if (isDirectCall())
+ return CB->getArgOperand(ArgNo);
+ // Add 1 for the callee encoding.
+ return CI.ParameterEncoding[ArgNo + 1] >= 0
+ ? CB->getArgOperand(CI.ParameterEncoding[ArgNo + 1])
+ : nullptr;
+ }
+
+ /// Return the operand index of the underlying instruction associated with the
+ /// callee of this ACS. Only valid for callback calls!
+ int getCallArgOperandNoForCallee() const {
+ assert(isCallbackCall());
+ assert(CI.ParameterEncoding.size() && CI.ParameterEncoding[0] >= 0);
+ return CI.ParameterEncoding[0];
+ }
+
+ /// Return the use of the callee value in the underlying instruction. Only
+ /// valid for callback calls!
+ const Use &getCalleeUseForCallback() const {
+ int CalleeArgIdx = getCallArgOperandNoForCallee();
+ assert(CalleeArgIdx >= 0 &&
+ unsigned(CalleeArgIdx) < getInstruction()->getNumOperands());
+ return getInstruction()->getOperandUse(CalleeArgIdx);
+ }
+
+ /// Return the pointer to function that is being called.
+ Value *getCalledOperand() const {
+ if (isDirectCall())
+ return CB->getCalledOperand();
+ return CB->getArgOperand(getCallArgOperandNoForCallee());
+ }
+
+ /// Return the function being called if this is a direct call, otherwise
+ /// return null (if it's an indirect call).
+ Function *getCalledFunction() const {
+ Value *V = getCalledOperand();
+ return V ? dyn_cast<Function>(V->stripPointerCasts()) : nullptr;
+ }
+};
+
+/// Apply function Func to each CB's callback call site.
+template <typename UnaryFunction>
+void forEachCallbackCallSite(const CallBase &CB, UnaryFunction Func) {
+ SmallVector<const Use *, 4u> CallbackUses;
+ AbstractCallSite::getCallbackUses(CB, CallbackUses);
+ for (const Use *U : CallbackUses) {
+ AbstractCallSite ACS(U);
+ assert(ACS && ACS.isCallbackCall() && "must be a callback call");
+ Func(ACS);
+ }
+}
+
+/// Apply function Func to each CB's callback function.
+template <typename UnaryFunction>
+void forEachCallbackFunction(const CallBase &CB, UnaryFunction Func) {
+ forEachCallbackCallSite(CB, [&Func](AbstractCallSite &ACS) {
+ if (Function *Callback = ACS.getCalledFunction())
+ Func(Callback);
+ });
+}
+
+} // end namespace llvm
+
+#endif // LLVM_IR_ABSTRACTCALLSITE_H
diff --git a/llvm/include/llvm/IR/Argument.h b/llvm/include/llvm/IR/Argument.h
index 244878bd3155..af469e8a5d1a 100644
--- a/llvm/include/llvm/IR/Argument.h
+++ b/llvm/include/llvm/IR/Argument.h
@@ -71,9 +71,13 @@ public:
/// Return true if this argument has the swifterror attribute.
bool hasSwiftErrorAttr() const;
- /// Return true if this argument has the byval attribute or inalloca
+ /// Return true if this argument has the byval, inalloca, or preallocated
/// attribute. These attributes represent arguments being passed by value.
- bool hasByValOrInAllocaAttr() const;
+ bool hasPassPointeeByValueAttr() const;
+
+ /// If this argument satisfies has hasPassPointeeByValueAttr, return the
+ /// in-memory ABI size copied to the stack for the call. Otherwise, return 0.
+ uint64_t getPassPointeeByValueCopySize(const DataLayout &DL) const;
/// If this is a byval or inalloca argument, return its alignment.
/// FIXME: Remove this function once transition to Align is over.
@@ -110,6 +114,9 @@ public:
/// Return true if this argument has the inalloca attribute.
bool hasInAllocaAttr() const;
+ /// Return true if this argument has the preallocated attribute.
+ bool hasPreallocatedAttr() const;
+
/// Return true if this argument has the zext attribute.
bool hasZExtAttr() const;
diff --git a/llvm/include/llvm/IR/Attributes.h b/llvm/include/llvm/IR/Attributes.h
index e6b280465f72..58365aa2b764 100644
--- a/llvm/include/llvm/IR/Attributes.h
+++ b/llvm/include/llvm/IR/Attributes.h
@@ -17,7 +17,6 @@
#include "llvm-c/Types.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
@@ -38,6 +37,7 @@ class AttributeImpl;
class AttributeListImpl;
class AttributeSetNode;
template<typename T> struct DenseMapInfo;
+class FoldingSetNodeID;
class Function;
class LLVMContext;
class Type;
@@ -70,9 +70,12 @@ public:
enum AttrKind {
// IR-Level Attributes
None, ///< No attributes have been set
- #define GET_ATTR_ENUM
+ #define GET_ATTR_NAMES
+ #define ATTRIBUTE_ENUM(ENUM_NAME, OTHER) ENUM_NAME,
#include "llvm/IR/Attributes.inc"
- EndAttrKinds ///< Sentinal value useful for loops
+ EndAttrKinds, ///< Sentinal value useful for loops
+ EmptyKey, ///< Use as Empty key for DenseMap of AttrKind
+ TombstoneKey, ///< Use as Tombstone key for DenseMap of AttrKind
};
private:
@@ -105,6 +108,18 @@ public:
unsigned ElemSizeArg,
const Optional<unsigned> &NumElemsArg);
static Attribute getWithByValType(LLVMContext &Context, Type *Ty);
+ static Attribute getWithPreallocatedType(LLVMContext &Context, Type *Ty);
+
+ static Attribute::AttrKind getAttrKindFromName(StringRef AttrName);
+
+ static StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind);
+
+ /// Return true if and only if the attribute has an Argument.
+ static bool doesAttrKindHaveArgument(Attribute::AttrKind AttrKind);
+
+ /// Return true if the provided string matches the IR name of an attribute.
+ /// example: "noalias" return true but not "NoAlias"
+ static bool isExistingAttribute(StringRef Name);
//===--------------------------------------------------------------------===//
// Attribute Accessors
@@ -180,9 +195,7 @@ public:
/// Less-than operator. Useful for sorting the attributes list.
bool operator<(Attribute A) const;
- void Profile(FoldingSetNodeID &ID) const {
- ID.AddPointer(pImpl);
- }
+ void Profile(FoldingSetNodeID &ID) const;
/// Return a raw pointer that uniquely identifies this attribute.
void *getRawPointer() const {
@@ -290,6 +303,7 @@ public:
uint64_t getDereferenceableBytes() const;
uint64_t getDereferenceableOrNullBytes() const;
Type *getByValType() const;
+ Type *getPreallocatedType() const;
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
std::string getAsString(bool InAttrGrp = false) const;
@@ -383,6 +397,9 @@ public:
static AttributeList get(LLVMContext &C, unsigned Index,
ArrayRef<Attribute::AttrKind> Kinds);
static AttributeList get(LLVMContext &C, unsigned Index,
+ ArrayRef<Attribute::AttrKind> Kinds,
+ ArrayRef<uint64_t> Values);
+ static AttributeList get(LLVMContext &C, unsigned Index,
ArrayRef<StringRef> Kind);
static AttributeList get(LLVMContext &C, unsigned Index,
const AttrBuilder &B);
@@ -530,9 +547,6 @@ public:
// AttributeList Accessors
//===--------------------------------------------------------------------===//
- /// Retrieve the LLVM context.
- LLVMContext &getContext() const;
-
/// The attributes for the specified index are returned.
AttributeSet getAttributes(unsigned Index) const;
@@ -612,6 +626,9 @@ public:
/// Return the byval type for the specified function parameter.
Type *getParamByValType(unsigned ArgNo) const;
+ /// Return the preallocated type for the specified function parameter.
+ Type *getParamPreallocatedType(unsigned ArgNo) const;
+
/// Get the stack alignment.
MaybeAlign getStackAlignment(unsigned Index) const;
@@ -712,6 +729,7 @@ class AttrBuilder {
uint64_t DerefOrNullBytes = 0;
uint64_t AllocSizeArgs = 0;
Type *ByValType = nullptr;
+ Type *PreallocatedType = nullptr;
public:
AttrBuilder() = default;
@@ -790,6 +808,9 @@ public:
/// Retrieve the byval type.
Type *getByValType() const { return ByValType; }
+ /// Retrieve the preallocated type.
+ Type *getPreallocatedType() const { return PreallocatedType; }
+
/// Retrieve the allocsize args, if the allocsize attribute exists. If it
/// doesn't exist, pair(0, 0) is returned.
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
@@ -833,6 +854,9 @@ public:
/// This turns a byval type into the form used internally in Attribute.
AttrBuilder &addByValAttr(Type *Ty);
+ /// This turns a preallocated type into the form used internally in Attribute.
+ AttrBuilder &addPreallocatedAttr(Type *Ty);
+
/// Add an allocsize attribute, using the representation returned by
/// Attribute.getIntValue().
AttrBuilder &addAllocSizeAttrFromRawRepr(uint64_t RawAllocSizeRepr);
@@ -843,8 +867,8 @@ public:
// Iterators for target-dependent attributes.
using td_type = std::pair<std::string, std::string>;
- using td_iterator = std::map<std::string, std::string>::iterator;
- using td_const_iterator = std::map<std::string, std::string>::const_iterator;
+ using td_iterator = decltype(TargetDepAttrs)::iterator;
+ using td_const_iterator = decltype(TargetDepAttrs)::const_iterator;
using td_range = iterator_range<td_iterator>;
using td_const_range = iterator_range<td_const_iterator>;
diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td
index 5d4a5f6743b7..395f9dbfb176 100644
--- a/llvm/include/llvm/IR/Attributes.td
+++ b/llvm/include/llvm/IR/Attributes.td
@@ -7,18 +7,24 @@ class Attr<string S> {
/// Enum attribute.
class EnumAttr<string S> : Attr<S>;
+/// Int attribute.
+class IntAttr<string S> : Attr<S>;
+
/// StringBool attribute.
class StrBoolAttr<string S> : Attr<S>;
+/// Type attribute.
+class TypeAttr<string S> : Attr<S>;
+
/// Target-independent enum attributes.
/// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
/// 0 means unaligned (different from align(1)).
-def Alignment : EnumAttr<"align">;
+def Alignment : IntAttr<"align">;
/// The result of the function is guaranteed to point to a number of bytes that
/// we can determine if we know the value of the function's arguments.
-def AllocSize : EnumAttr<"allocsize">;
+def AllocSize : IntAttr<"allocsize">;
/// inline=always.
def AlwaysInline : EnumAttr<"alwaysinline">;
@@ -31,7 +37,10 @@ def ArgMemOnly : EnumAttr<"argmemonly">;
def Builtin : EnumAttr<"builtin">;
/// Pass structure by value.
-def ByVal : EnumAttr<"byval">;
+def ByVal : TypeAttr<"byval">;
+
+/// Parameter or return value may not contain uninitialized or poison bits.
+def NoUndef : EnumAttr<"noundef">;
/// Marks function as being in a cold path.
def Cold : EnumAttr<"cold">;
@@ -40,10 +49,10 @@ def Cold : EnumAttr<"cold">;
def Convergent : EnumAttr<"convergent">;
/// Pointer is known to be dereferenceable.
-def Dereferenceable : EnumAttr<"dereferenceable">;
+def Dereferenceable : IntAttr<"dereferenceable">;
/// Pointer is either null or dereferenceable.
-def DereferenceableOrNull : EnumAttr<"dereferenceable_or_null">;
+def DereferenceableOrNull : IntAttr<"dereferenceable_or_null">;
/// Function may only access memory that is inaccessible from IR.
def InaccessibleMemOnly : EnumAttr<"inaccessiblememonly">;
@@ -97,6 +106,9 @@ def NoInline : EnumAttr<"noinline">;
/// Function is called early and/or often, so lazy binding isn't worthwhile.
def NonLazyBind : EnumAttr<"nonlazybind">;
+/// Disable merging for call sites
+def NoMerge : EnumAttr<"nomerge">;
+
/// Pointer is known to be not null.
def NonNull : EnumAttr<"nonnull">;
@@ -118,6 +130,9 @@ def NoCfCheck : EnumAttr<"nocf_check">;
/// Function doesn't unwind stack.
def NoUnwind : EnumAttr<"nounwind">;
+/// Null pointer in address space zero is valid.
+def NullPointerIsValid : EnumAttr<"null_pointer_is_valid">;
+
/// Select optimizations for best fuzzing signal.
def OptForFuzzing : EnumAttr<"optforfuzzing">;
@@ -127,6 +142,9 @@ def OptimizeForSize : EnumAttr<"optsize">;
/// Function must not be optimized.
def OptimizeNone : EnumAttr<"optnone">;
+/// Similar to byval but without a copy.
+def Preallocated : TypeAttr<"preallocated">;
+
/// Function does not access memory.
def ReadNone : EnumAttr<"readnone">;
@@ -153,7 +171,7 @@ def SExt : EnumAttr<"signext">;
/// Alignment of stack for function (3 bits) stored as log2 of alignment with
/// +1 bias 0 means unaligned (different from alignstack=(1)).
-def StackAlignment : EnumAttr<"alignstack">;
+def StackAlignment : IntAttr<"alignstack">;
/// Function can be speculated.
def Speculatable : EnumAttr<"speculatable">;
@@ -218,10 +236,12 @@ def ZExt : EnumAttr<"zeroext">;
def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">;
+def NoSignedZerosFPMath : StrBoolAttr<"no-signed-zeros-fp-math">;
def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">;
def NoJumpTables : StrBoolAttr<"no-jump-tables">;
def NoInlineLineTables : StrBoolAttr<"no-inline-line-tables">;
def ProfileSampleAccurate : StrBoolAttr<"profile-sample-accurate">;
+def UseSampleProfile : StrBoolAttr<"use-sample-profile">;
class CompatRule<string F> {
// The name of the function called to check the attribute of the caller and
@@ -240,6 +260,7 @@ def : CompatRule<"isEqual<SanitizeHWAddressAttr>">;
def : CompatRule<"isEqual<SanitizeMemTagAttr>">;
def : CompatRule<"isEqual<SafeStackAttr>">;
def : CompatRule<"isEqual<ShadowCallStackAttr>">;
+def : CompatRule<"isEqual<UseSampleProfileAttr>">;
class MergeRule<string F> {
// The name of the function called to merge the attributes of the caller and
@@ -253,6 +274,7 @@ class MergeRule<string F> {
def : MergeRule<"setAND<LessPreciseFPMADAttr>">;
def : MergeRule<"setAND<NoInfsFPMathAttr>">;
def : MergeRule<"setAND<NoNansFPMathAttr>">;
+def : MergeRule<"setAND<NoSignedZerosFPMathAttr>">;
def : MergeRule<"setAND<UnsafeFPMathAttr>">;
def : MergeRule<"setOR<NoImplicitFloatAttr>">;
def : MergeRule<"setOR<NoJumpTablesAttr>">;
diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h
index 42f50cc991de..f331fc3c413f 100644
--- a/llvm/include/llvm/IR/AutoUpgrade.h
+++ b/llvm/include/llvm/IR/AutoUpgrade.h
@@ -61,6 +61,9 @@ namespace llvm {
void UpgradeSectionAttributes(Module &M);
+ /// Correct any IR that is relying on old function attribute behavior.
+ void UpgradeFunctionAttributes(Function &F);
+
/// If the given TBAA tag uses the scalar TBAA format, create a new node
/// corresponding to the upgrade to the struct-path aware TBAA format.
/// Otherwise return the \p TBAANode itself.
@@ -92,9 +95,8 @@ namespace llvm {
/// pointers.
std::string UpgradeDataLayoutString(StringRef DL, StringRef Triple);
- /// Upgrade function attributes "no-frame-pointer-elim" and
- /// "no-frame-pointer-elim-non-leaf" to "frame-pointer".
- void UpgradeFramePointerAttributes(AttrBuilder &B);
+ /// Upgrade attributes that changed format or kind.
+ void UpgradeAttributes(AttrBuilder &B);
} // End llvm namespace
diff --git a/llvm/include/llvm/IR/BasicBlock.h b/llvm/include/llvm/IR/BasicBlock.h
index d594145f8636..24d568a728c6 100644
--- a/llvm/include/llvm/IR/BasicBlock.h
+++ b/llvm/include/llvm/IR/BasicBlock.h
@@ -31,6 +31,7 @@
namespace llvm {
+class AssemblyAnnotationWriter;
class CallInst;
class Function;
class LandingPadInst;
@@ -133,6 +134,15 @@ public:
static_cast<const BasicBlock *>(this)->getTerminatingDeoptimizeCall());
}
+ /// Returns the call instruction calling \@llvm.experimental.deoptimize
+ /// that is present either in current basic block or in block that is a unique
+ /// successor to current block, if such call is present. Otherwise, returns null.
+ const CallInst *getPostdominatingDeoptimizeCall() const;
+ CallInst *getPostdominatingDeoptimizeCall() {
+ return const_cast<CallInst *>(
+ static_cast<const BasicBlock *>(this)->getPostdominatingDeoptimizeCall());
+ }
+
/// Returns the call instruction marked 'musttail' prior to the terminating
/// return instruction of this basic block, if such a call is present.
/// Otherwise, returns null.
@@ -267,6 +277,12 @@ public:
static_cast<const BasicBlock *>(this)->getUniqueSuccessor());
}
+ /// Print the basic block to an output stream with an optional
+ /// AssemblyAnnotationWriter.
+ void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW = nullptr,
+ bool ShouldPreserveUseListOrder = false,
+ bool IsForDebug = false) const;
+
//===--------------------------------------------------------------------===//
/// Instruction iterator methods
///
@@ -361,12 +377,12 @@ public:
/// except operator delete.
void dropAllReferences();
- /// Notify the BasicBlock that the predecessor \p Pred is no longer able to
- /// reach it.
+ /// Update PHI nodes in this BasicBlock before removal of predecessor \p Pred.
+ /// Note that this function does not actually remove the predecessor.
///
- /// This is actually not used to update the Predecessor list, but is actually
- /// used to update the PHI nodes that reside in the block. Note that this
- /// should be called while the predecessor still refers to this block.
+ /// If \p KeepOneInputPHIs is true then don't remove PHIs that are left with
+ /// zero or one incoming values, and don't simplify PHIs with all incoming
+ /// values the same.
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs = false);
bool canSplitPredecessors() const;
@@ -393,7 +409,9 @@ public:
/// Returns true if there are any uses of this basic block other than
/// direct branches, switches, etc. to it.
- bool hasAddressTaken() const { return getSubclassDataFromValue() != 0; }
+ bool hasAddressTaken() const {
+ return getBasicBlockBits().BlockAddressRefCount != 0;
+ }
/// Update all phi nodes in this basic block to refer to basic block \p New
/// instead of basic block \p Old.
@@ -428,16 +446,81 @@ public:
Optional<uint64_t> getIrrLoopHeaderWeight() const;
+ /// Returns true if the Order field of child Instructions is valid.
+ bool isInstrOrderValid() const {
+ return getBasicBlockBits().InstrOrderValid;
+ }
+
+ /// Mark instruction ordering invalid. Done on every instruction insert.
+ void invalidateOrders() {
+ validateInstrOrdering();
+ BasicBlockBits Bits = getBasicBlockBits();
+ Bits.InstrOrderValid = false;
+ setBasicBlockBits(Bits);
+ }
+
+ /// Renumber instructions and mark the ordering as valid.
+ void renumberInstructions();
+
+ /// Asserts that instruction order numbers are marked invalid, or that they
+ /// are in ascending order. This is constant time if the ordering is invalid,
+ /// and linear in the number of instructions if the ordering is valid. Callers
+ /// should be careful not to call this in ways that make common operations
+ /// O(n^2). For example, it takes O(n) time to assign order numbers to
+ /// instructions, so the order should be validated no more than once after
+ /// each ordering to ensure that transforms have the same algorithmic
+ /// complexity when asserts are enabled as when they are disabled.
+ void validateInstrOrdering() const;
+
private:
+#if defined(_AIX) && (!defined(__GNUC__) || defined(__ibmxl__))
+// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
+// and give the `pack` pragma push semantics.
+#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")
+#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")
+#else
+#define BEGIN_TWO_BYTE_PACK()
+#define END_TWO_BYTE_PACK()
+#endif
+
+ BEGIN_TWO_BYTE_PACK()
+ /// Bitfield to help interpret the bits in Value::SubclassData.
+ struct BasicBlockBits {
+ unsigned short BlockAddressRefCount : 15;
+ unsigned short InstrOrderValid : 1;
+ };
+ END_TWO_BYTE_PACK()
+
+#undef BEGIN_TWO_BYTE_PACK
+#undef END_TWO_BYTE_PACK
+
+ /// Safely reinterpret the subclass data bits to a more useful form.
+ BasicBlockBits getBasicBlockBits() const {
+ static_assert(sizeof(BasicBlockBits) == sizeof(unsigned short),
+ "too many bits for Value::SubclassData");
+ unsigned short ValueData = getSubclassDataFromValue();
+ BasicBlockBits AsBits;
+ memcpy(&AsBits, &ValueData, sizeof(AsBits));
+ return AsBits;
+ }
+
+ /// Reinterpret our subclass bits and store them back into Value.
+ void setBasicBlockBits(BasicBlockBits AsBits) {
+ unsigned short D;
+ memcpy(&D, &AsBits, sizeof(D));
+ Value::setValueSubclassData(D);
+ }
+
/// Increment the internal refcount of the number of BlockAddresses
/// referencing this BasicBlock by \p Amt.
///
/// This is almost always 0, sometimes one possibly, but almost never 2, and
/// inconceivably 3 or more.
void AdjustBlockAddressRefCount(int Amt) {
- setValueSubclassData(getSubclassDataFromValue()+Amt);
- assert((int)(signed char)getSubclassDataFromValue() >= 0 &&
- "Refcount wrap-around");
+ BasicBlockBits Bits = getBasicBlockBits();
+ Bits.BlockAddressRefCount += Amt;
+ setBasicBlockBits(Bits);
+ assert(Bits.BlockAddressRefCount < 255 && "Refcount wrap-around");
}
/// Shadow Value::setValueSubclassData with a private forwarding method so
@@ -454,6 +537,12 @@ DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef)
/// This assumes that \p It is not at the end of a block.
BasicBlock::iterator skipDebugIntrinsics(BasicBlock::iterator It);
+#ifdef NDEBUG
+/// In release builds, this is a no-op. For !NDEBUG builds, the checks are
+/// implemented in the .cpp file to avoid circular header deps.
+inline void BasicBlock::validateInstrOrdering() const {}
+#endif
+
} // end namespace llvm
#endif // LLVM_IR_BASICBLOCK_H
diff --git a/llvm/include/llvm/IR/CFG.h b/llvm/include/llvm/IR/CFG.h
index 55aff7137e86..f798b1af6c83 100644
--- a/llvm/include/llvm/IR/CFG.h
+++ b/llvm/include/llvm/IR/CFG.h
@@ -22,18 +22,19 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
-#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
-#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
-#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstddef>
#include <iterator>
namespace llvm {
+class BasicBlock;
+class Instruction;
+class Use;
+
//===----------------------------------------------------------------------===//
// BasicBlock pred_iterator definition
//===----------------------------------------------------------------------===//
@@ -103,7 +104,7 @@ using pred_iterator = PredIterator<BasicBlock, Value::user_iterator>;
using const_pred_iterator =
PredIterator<const BasicBlock, Value::const_user_iterator>;
using pred_range = iterator_range<pred_iterator>;
-using pred_const_range = iterator_range<const_pred_iterator>;
+using const_pred_range = iterator_range<const_pred_iterator>;
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
@@ -124,8 +125,8 @@ inline unsigned pred_size(const BasicBlock *BB) {
inline pred_range predecessors(BasicBlock *BB) {
return pred_range(pred_begin(BB), pred_end(BB));
}
-inline pred_const_range predecessors(const BasicBlock *BB) {
- return pred_const_range(pred_begin(BB), pred_end(BB));
+inline const_pred_range predecessors(const BasicBlock *BB) {
+ return const_pred_range(pred_begin(BB), pred_end(BB));
}
//===----------------------------------------------------------------------===//
@@ -238,17 +239,17 @@ public:
};
using succ_iterator = SuccIterator<Instruction, BasicBlock>;
-using succ_const_iterator = SuccIterator<const Instruction, const BasicBlock>;
+using const_succ_iterator = SuccIterator<const Instruction, const BasicBlock>;
using succ_range = iterator_range<succ_iterator>;
-using succ_const_range = iterator_range<succ_const_iterator>;
+using const_succ_range = iterator_range<const_succ_iterator>;
inline succ_iterator succ_begin(Instruction *I) { return succ_iterator(I); }
-inline succ_const_iterator succ_begin(const Instruction *I) {
- return succ_const_iterator(I);
+inline const_succ_iterator succ_begin(const Instruction *I) {
+ return const_succ_iterator(I);
}
inline succ_iterator succ_end(Instruction *I) { return succ_iterator(I, true); }
-inline succ_const_iterator succ_end(const Instruction *I) {
- return succ_const_iterator(I, true);
+inline const_succ_iterator succ_end(const Instruction *I) {
+ return const_succ_iterator(I, true);
}
inline bool succ_empty(const Instruction *I) {
return succ_begin(I) == succ_end(I);
@@ -259,21 +260,21 @@ inline unsigned succ_size(const Instruction *I) {
inline succ_range successors(Instruction *I) {
return succ_range(succ_begin(I), succ_end(I));
}
-inline succ_const_range successors(const Instruction *I) {
- return succ_const_range(succ_begin(I), succ_end(I));
+inline const_succ_range successors(const Instruction *I) {
+ return const_succ_range(succ_begin(I), succ_end(I));
}
inline succ_iterator succ_begin(BasicBlock *BB) {
return succ_iterator(BB->getTerminator());
}
-inline succ_const_iterator succ_begin(const BasicBlock *BB) {
- return succ_const_iterator(BB->getTerminator());
+inline const_succ_iterator succ_begin(const BasicBlock *BB) {
+ return const_succ_iterator(BB->getTerminator());
}
inline succ_iterator succ_end(BasicBlock *BB) {
return succ_iterator(BB->getTerminator(), true);
}
-inline succ_const_iterator succ_end(const BasicBlock *BB) {
- return succ_const_iterator(BB->getTerminator(), true);
+inline const_succ_iterator succ_end(const BasicBlock *BB) {
+ return const_succ_iterator(BB->getTerminator(), true);
}
inline bool succ_empty(const BasicBlock *BB) {
return succ_begin(BB) == succ_end(BB);
@@ -284,8 +285,8 @@ inline unsigned succ_size(const BasicBlock *BB) {
inline succ_range successors(BasicBlock *BB) {
return succ_range(succ_begin(BB), succ_end(BB));
}
-inline succ_const_range successors(const BasicBlock *BB) {
- return succ_const_range(succ_begin(BB), succ_end(BB));
+inline const_succ_range successors(const BasicBlock *BB) {
+ return const_succ_range(succ_begin(BB), succ_end(BB));
}
//===--------------------------------------------------------------------===//
@@ -306,7 +307,7 @@ template <> struct GraphTraits<BasicBlock*> {
template <> struct GraphTraits<const BasicBlock*> {
using NodeRef = const BasicBlock *;
- using ChildIteratorType = succ_const_iterator;
+ using ChildIteratorType = const_succ_iterator;
static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }
diff --git a/llvm/include/llvm/IR/CFGDiff.h b/llvm/include/llvm/IR/CFGDiff.h
deleted file mode 100644
index 57b62dd66a47..000000000000
--- a/llvm/include/llvm/IR/CFGDiff.h
+++ /dev/null
@@ -1,284 +0,0 @@
-//===- CFGDiff.h - Define a CFG snapshot. -----------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines specializations of GraphTraits that allows generic
-// algorithms to see a different snapshot of a CFG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_IR_CFGDIFF_H
-#define LLVM_IR_CFGDIFF_H
-
-#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/iterator.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CFG.h"
-#include "llvm/Support/CFGUpdate.h"
-#include "llvm/Support/type_traits.h"
-#include <cassert>
-#include <cstddef>
-#include <iterator>
-
-// Two booleans are used to define orders in graphs:
-// InverseGraph defines when we need to reverse the whole graph and is as such
-// also equivalent to applying updates in reverse.
-// InverseEdge defines whether we want to change the edges direction. E.g., for
-// a non-inversed graph, the children are naturally the successors when
-// InverseEdge is false and the predecessors when InverseEdge is true.
-
-// We define two base clases that call into GraphDiff, one for successors
-// (CFGSuccessors), where InverseEdge is false, and one for predecessors
-// (CFGPredecessors), where InverseEdge is true.
-// FIXME: Further refactoring may merge the two base classes into a single one
-// templated / parametrized on using succ_iterator/pred_iterator and false/true
-// for the InverseEdge.
-
-// CFGViewSuccessors and CFGViewPredecessors, both can be parametrized to
-// consider the graph inverted or not (i.e. InverseGraph). Successors
-// implicitly has InverseEdge = false and Predecessors implicitly has
-// InverseEdge = true (see calls to GraphDiff methods in there). The GraphTraits
-// instantiations that follow define the value of InverseGraph.
-
-// GraphTraits instantiations:
-// - GraphDiff<BasicBlock *> is equivalent to InverseGraph = false
-// - GraphDiff<Inverse<BasicBlock *>> is equivalent to InverseGraph = true
-// - second pair item is BasicBlock *, then InverseEdge = false (so it inherits
-// from CFGViewSuccessors).
-// - second pair item is Inverse<BasicBlock *>, then InverseEdge = true (so it
-// inherits from CFGViewPredecessors).
-
-// The 4 GraphTraits are as follows:
-// 1. std::pair<const GraphDiff<BasicBlock *> *, BasicBlock *>> :
-// CFGViewSuccessors<false>
-// Regular CFG, children means successors, InverseGraph = false,
-// InverseEdge = false.
-// 2. std::pair<const GraphDiff<Inverse<BasicBlock *>> *, BasicBlock *>> :
-// CFGViewSuccessors<true>
-// Reverse the graph, get successors but reverse-apply updates,
-// InverseGraph = true, InverseEdge = false.
-// 3. std::pair<const GraphDiff<BasicBlock *> *, Inverse<BasicBlock *>>> :
-// CFGViewPredecessors<false>
-// Regular CFG, reverse edges, so children mean predecessors,
-// InverseGraph = false, InverseEdge = true.
-// 4. std::pair<const GraphDiff<Inverse<BasicBlock *>> *, Inverse<BasicBlock *>>
-// : CFGViewPredecessors<true>
-// Reverse the graph and the edges, InverseGraph = true, InverseEdge = true.
-
-namespace llvm {
-
-// GraphDiff defines a CFG snapshot: given a set of Update<NodePtr>, provide
-// utilities to skip edges marked as deleted and return a set of edges marked as
-// newly inserted. The current diff treats the CFG as a graph rather than a
-// multigraph. Added edges are pruned to be unique, and deleted edges will
-// remove all existing edges between two blocks.
-template <typename NodePtr, bool InverseGraph = false> class GraphDiff {
- using UpdateMapType = SmallDenseMap<NodePtr, SmallVector<NodePtr, 2>>;
- UpdateMapType SuccInsert;
- UpdateMapType SuccDelete;
- UpdateMapType PredInsert;
- UpdateMapType PredDelete;
- // Using a singleton empty vector for all BasicBlock requests with no
- // children.
- SmallVector<NodePtr, 1> Empty;
-
- void printMap(raw_ostream &OS, const UpdateMapType &M) const {
- for (auto Pair : M)
- for (auto Child : Pair.second) {
- OS << "(";
- Pair.first->printAsOperand(OS, false);
- OS << ", ";
- Child->printAsOperand(OS, false);
- OS << ") ";
- }
- OS << "\n";
- }
-
-public:
- GraphDiff() {}
- GraphDiff(ArrayRef<cfg::Update<NodePtr>> Updates) {
- SmallVector<cfg::Update<NodePtr>, 4> LegalizedUpdates;
- cfg::LegalizeUpdates<NodePtr>(Updates, LegalizedUpdates, InverseGraph);
- for (auto U : LegalizedUpdates) {
- if (U.getKind() == cfg::UpdateKind::Insert) {
- SuccInsert[U.getFrom()].push_back(U.getTo());
- PredInsert[U.getTo()].push_back(U.getFrom());
- } else {
- SuccDelete[U.getFrom()].push_back(U.getTo());
- PredDelete[U.getTo()].push_back(U.getFrom());
- }
- }
- }
-
- bool ignoreChild(const NodePtr BB, NodePtr EdgeEnd, bool InverseEdge) const {
- auto &DeleteChildren =
- (InverseEdge != InverseGraph) ? PredDelete : SuccDelete;
- auto It = DeleteChildren.find(BB);
- if (It == DeleteChildren.end())
- return false;
- auto &EdgesForBB = It->second;
- return llvm::find(EdgesForBB, EdgeEnd) != EdgesForBB.end();
- }
-
- iterator_range<typename SmallVectorImpl<NodePtr>::const_iterator>
- getAddedChildren(const NodePtr BB, bool InverseEdge) const {
- auto &InsertChildren =
- (InverseEdge != InverseGraph) ? PredInsert : SuccInsert;
- auto It = InsertChildren.find(BB);
- if (It == InsertChildren.end())
- return make_range(Empty.begin(), Empty.end());
- return make_range(It->second.begin(), It->second.end());
- }
-
- void print(raw_ostream &OS) const {
- OS << "===== GraphDiff: CFG edge changes to create a CFG snapshot. \n"
- "===== (Note: notion of children/inverse_children depends on "
- "the direction of edges and the graph.)\n";
- OS << "Children to insert:\n\t";
- printMap(OS, SuccInsert);
- OS << "Children to delete:\n\t";
- printMap(OS, SuccDelete);
- OS << "Inverse_children to insert:\n\t";
- printMap(OS, PredInsert);
- OS << "Inverse_children to delete:\n\t";
- printMap(OS, PredDelete);
- OS << "\n";
- }
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
-#endif
-};
-
-template <bool InverseGraph = false> struct CFGViewSuccessors {
- using DataRef = const GraphDiff<BasicBlock *, InverseGraph> *;
- using NodeRef = std::pair<DataRef, BasicBlock *>;
-
- using ExistingChildIterator =
- WrappedPairNodeDataIterator<succ_iterator, NodeRef, DataRef>;
- struct DeletedEdgesFilter {
- BasicBlock *BB;
- DeletedEdgesFilter(BasicBlock *BB) : BB(BB){};
- bool operator()(NodeRef N) const {
- return !N.first->ignoreChild(BB, N.second, false);
- }
- };
- using FilterExistingChildrenIterator =
- filter_iterator<ExistingChildIterator, DeletedEdgesFilter>;
-
- using vec_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
- using AddNewChildrenIterator =
- WrappedPairNodeDataIterator<vec_iterator, NodeRef, DataRef>;
- using ChildIteratorType =
- concat_iterator<NodeRef, FilterExistingChildrenIterator,
- AddNewChildrenIterator>;
-
- static ChildIteratorType child_begin(NodeRef N) {
- auto InsertVec = N.first->getAddedChildren(N.second, false);
- // filter iterator init:
- auto firstit = make_filter_range(
- make_range<ExistingChildIterator>({succ_begin(N.second), N.first},
- {succ_end(N.second), N.first}),
- DeletedEdgesFilter(N.second));
- // new inserts iterator init:
- auto secondit = make_range<AddNewChildrenIterator>(
- {InsertVec.begin(), N.first}, {InsertVec.end(), N.first});
-
- return concat_iterator<NodeRef, FilterExistingChildrenIterator,
- AddNewChildrenIterator>(firstit, secondit);
- }
-
- static ChildIteratorType child_end(NodeRef N) {
- auto InsertVec = N.first->getAddedChildren(N.second, false);
- // filter iterator init:
- auto firstit = make_filter_range(
- make_range<ExistingChildIterator>({succ_end(N.second), N.first},
- {succ_end(N.second), N.first}),
- DeletedEdgesFilter(N.second));
- // new inserts iterator init:
- auto secondit = make_range<AddNewChildrenIterator>(
- {InsertVec.end(), N.first}, {InsertVec.end(), N.first});
-
- return concat_iterator<NodeRef, FilterExistingChildrenIterator,
- AddNewChildrenIterator>(firstit, secondit);
- }
-};
-
-template <bool InverseGraph = false> struct CFGViewPredecessors {
- using DataRef = const GraphDiff<BasicBlock *, InverseGraph> *;
- using NodeRef = std::pair<DataRef, BasicBlock *>;
-
- using ExistingChildIterator =
- WrappedPairNodeDataIterator<pred_iterator, NodeRef, DataRef>;
- struct DeletedEdgesFilter {
- BasicBlock *BB;
- DeletedEdgesFilter(BasicBlock *BB) : BB(BB){};
- bool operator()(NodeRef N) const {
- return !N.first->ignoreChild(BB, N.second, true);
- }
- };
- using FilterExistingChildrenIterator =
- filter_iterator<ExistingChildIterator, DeletedEdgesFilter>;
-
- using vec_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
- using AddNewChildrenIterator =
- WrappedPairNodeDataIterator<vec_iterator, NodeRef, DataRef>;
- using ChildIteratorType =
- concat_iterator<NodeRef, FilterExistingChildrenIterator,
- AddNewChildrenIterator>;
-
- static ChildIteratorType child_begin(NodeRef N) {
- auto InsertVec = N.first->getAddedChildren(N.second, true);
- // filter iterator init:
- auto firstit = make_filter_range(
- make_range<ExistingChildIterator>({pred_begin(N.second), N.first},
- {pred_end(N.second), N.first}),
- DeletedEdgesFilter(N.second));
- // new inserts iterator init:
- auto secondit = make_range<AddNewChildrenIterator>(
- {InsertVec.begin(), N.first}, {InsertVec.end(), N.first});
-
- return concat_iterator<NodeRef, FilterExistingChildrenIterator,
- AddNewChildrenIterator>(firstit, secondit);
- }
-
- static ChildIteratorType child_end(NodeRef N) {
- auto InsertVec = N.first->getAddedChildren(N.second, true);
- // filter iterator init:
- auto firstit = make_filter_range(
- make_range<ExistingChildIterator>({pred_end(N.second), N.first},
- {pred_end(N.second), N.first}),
- DeletedEdgesFilter(N.second));
- // new inserts iterator init:
- auto secondit = make_range<AddNewChildrenIterator>(
- {InsertVec.end(), N.first}, {InsertVec.end(), N.first});
-
- return concat_iterator<NodeRef, FilterExistingChildrenIterator,
- AddNewChildrenIterator>(firstit, secondit);
- }
-};
-
-template <>
-struct GraphTraits<
- std::pair<const GraphDiff<BasicBlock *, false> *, BasicBlock *>>
- : CFGViewSuccessors<false> {};
-template <>
-struct GraphTraits<
- std::pair<const GraphDiff<BasicBlock *, true> *, BasicBlock *>>
- : CFGViewSuccessors<true> {};
-template <>
-struct GraphTraits<
- std::pair<const GraphDiff<BasicBlock *, false> *, Inverse<BasicBlock *>>>
- : CFGViewPredecessors<false> {};
-template <>
-struct GraphTraits<
- std::pair<const GraphDiff<BasicBlock *, true> *, Inverse<BasicBlock *>>>
- : CFGViewPredecessors<true> {};
-} // end namespace llvm
-
-#endif // LLVM_IR_CFGDIFF_H
diff --git a/llvm/include/llvm/IR/CallSite.h b/llvm/include/llvm/IR/CallSite.h
deleted file mode 100644
index 0e957c4797e8..000000000000
--- a/llvm/include/llvm/IR/CallSite.h
+++ /dev/null
@@ -1,926 +0,0 @@
-//===- CallSite.h - Abstract Call & Invoke instrs ---------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the CallSite class, which is a handy wrapper for code that
-// wants to treat Call, Invoke and CallBr instructions in a generic way. When
-// in non-mutation context (e.g. an analysis) ImmutableCallSite should be used.
-// Finally, when some degree of customization is necessary between these two
-// extremes, CallSiteBase<> can be supplied with fine-tuned parameters.
-//
-// NOTE: These classes are supposed to have "value semantics". So they should be
-// passed by value, not by reference; they should not be "new"ed or "delete"d.
-// They are efficiently copyable, assignable and constructable, with cost
-// equivalent to copying a pointer (notice that they have only a single data
-// member). The internal representation carries a flag which indicates which of
-// the three variants is enclosed. This allows for cheaper checks when various
-// accessors of CallSite are employed.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_IR_CALLSITE_H
-#define LLVM_IR_CALLSITE_H
-
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/InstrTypes.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Use.h"
-#include "llvm/IR/User.h"
-#include "llvm/IR/Value.h"
-#include "llvm/Support/Casting.h"
-#include <cassert>
-#include <cstdint>
-#include <iterator>
-
-namespace llvm {
-
-namespace Intrinsic {
-typedef unsigned ID;
-}
-
-template <typename FunTy = const Function, typename BBTy = const BasicBlock,
- typename ValTy = const Value, typename UserTy = const User,
- typename UseTy = const Use, typename InstrTy = const Instruction,
- typename CallTy = const CallInst,
- typename InvokeTy = const InvokeInst,
- typename CallBrTy = const CallBrInst,
- typename IterTy = User::const_op_iterator>
-class CallSiteBase {
-protected:
- PointerIntPair<InstrTy *, 2, int> I;
-
- CallSiteBase() = default;
- CallSiteBase(CallTy *CI) : I(CI, 1) { assert(CI); }
- CallSiteBase(InvokeTy *II) : I(II, 0) { assert(II); }
- CallSiteBase(CallBrTy *CBI) : I(CBI, 2) { assert(CBI); }
- explicit CallSiteBase(ValTy *II) { *this = get(II); }
-
-private:
- /// This static method is like a constructor. It will create an appropriate
- /// call site for a Call, Invoke or CallBr instruction, but it can also create
- /// a null initialized CallSiteBase object for something which is NOT a call
- /// site.
- static CallSiteBase get(ValTy *V) {
- if (InstrTy *II = dyn_cast<InstrTy>(V)) {
- if (II->getOpcode() == Instruction::Call)
- return CallSiteBase(static_cast<CallTy*>(II));
- if (II->getOpcode() == Instruction::Invoke)
- return CallSiteBase(static_cast<InvokeTy*>(II));
- if (II->getOpcode() == Instruction::CallBr)
- return CallSiteBase(static_cast<CallBrTy *>(II));
- }
- return CallSiteBase();
- }
-
-public:
- /// Return true if a CallInst is enclosed.
- bool isCall() const { return I.getInt() == 1; }
-
- /// Return true if a InvokeInst is enclosed. !I.getInt() may also signify a
- /// NULL instruction pointer, so check that.
- bool isInvoke() const { return getInstruction() && I.getInt() == 0; }
-
- /// Return true if a CallBrInst is enclosed.
- bool isCallBr() const { return I.getInt() == 2; }
-
- InstrTy *getInstruction() const { return I.getPointer(); }
- InstrTy *operator->() const { return I.getPointer(); }
- explicit operator bool() const { return I.getPointer(); }
-
- /// Get the basic block containing the call site.
- BBTy* getParent() const { return getInstruction()->getParent(); }
-
- /// Return the pointer to function that is being called.
- ValTy *getCalledValue() const {
- assert(getInstruction() && "Not a call, invoke or callbr instruction!");
- return *getCallee();
- }
-
- /// Return the function being called if this is a direct call, otherwise
- /// return null (if it's an indirect call).
- FunTy *getCalledFunction() const {
- return dyn_cast<FunTy>(getCalledValue());
- }
-
- /// Return true if the callsite is an indirect call.
- bool isIndirectCall() const {
- const Value *V = getCalledValue();
- if (!V)
- return false;
- if (isa<FunTy>(V) || isa<Constant>(V))
- return false;
- if (const CallBase *CB = dyn_cast<CallBase>(getInstruction()))
- if (CB->isInlineAsm())
- return false;
- return true;
- }
-
- /// Set the callee to the specified value. Unlike the function of the same
- /// name on CallBase, does not modify the type!
- void setCalledFunction(Value *V) {
- assert(getInstruction() && "Not a call, callbr, or invoke instruction!");
- assert(cast<PointerType>(V->getType())->getElementType() ==
- cast<CallBase>(getInstruction())->getFunctionType() &&
- "New callee type does not match FunctionType on call");
- *getCallee() = V;
- }
-
- /// Return the intrinsic ID of the intrinsic called by this CallSite,
- /// or Intrinsic::not_intrinsic if the called function is not an
- /// intrinsic, or if this CallSite is an indirect call.
- Intrinsic::ID getIntrinsicID() const {
- if (auto *F = getCalledFunction())
- return F->getIntrinsicID();
- // Don't use Intrinsic::not_intrinsic, as it will require pulling
- // Intrinsics.h into every header that uses CallSite.
- return static_cast<Intrinsic::ID>(0);
- }
-
- /// Determine whether the passed iterator points to the callee operand's Use.
- bool isCallee(Value::const_user_iterator UI) const {
- return isCallee(&UI.getUse());
- }
-
- /// Determine whether this Use is the callee operand's Use.
- bool isCallee(const Use *U) const { return getCallee() == U; }
-
- /// Determine whether the passed iterator points to an argument operand.
- bool isArgOperand(Value::const_user_iterator UI) const {
- return isArgOperand(&UI.getUse());
- }
-
- /// Determine whether the passed use points to an argument operand.
- bool isArgOperand(const Use *U) const {
- assert(getInstruction() == U->getUser());
- return arg_begin() <= U && U < arg_end();
- }
-
- /// Determine whether the passed iterator points to a bundle operand.
- bool isBundleOperand(Value::const_user_iterator UI) const {
- return isBundleOperand(&UI.getUse());
- }
-
- /// Determine whether the passed use points to a bundle operand.
- bool isBundleOperand(const Use *U) const {
- assert(getInstruction() == U->getUser());
- if (!hasOperandBundles())
- return false;
- unsigned OperandNo = U - (*this)->op_begin();
- return getBundleOperandsStartIndex() <= OperandNo &&
- OperandNo < getBundleOperandsEndIndex();
- }
-
- /// Determine whether the passed iterator points to a data operand.
- bool isDataOperand(Value::const_user_iterator UI) const {
- return isDataOperand(&UI.getUse());
- }
-
- /// Determine whether the passed use points to a data operand.
- bool isDataOperand(const Use *U) const {
- return data_operands_begin() <= U && U < data_operands_end();
- }
-
- ValTy *getArgument(unsigned ArgNo) const {
- assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
- return *(arg_begin() + ArgNo);
- }
-
- void setArgument(unsigned ArgNo, Value* newVal) {
- assert(getInstruction() && "Not a call, invoke or callbr instruction!");
- assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
- getInstruction()->setOperand(ArgNo, newVal);
- }
-
- /// Given a value use iterator, returns the argument that corresponds to it.
- /// Iterator must actually correspond to an argument.
- unsigned getArgumentNo(Value::const_user_iterator I) const {
- return getArgumentNo(&I.getUse());
- }
-
- /// Given a use for an argument, get the argument number that corresponds to
- /// it.
- unsigned getArgumentNo(const Use *U) const {
- assert(getInstruction() && "Not a call, invoke or callbr instruction!");
- assert(isArgOperand(U) && "Argument # out of range!");
- return U - arg_begin();
- }
-
- /// The type of iterator to use when looping over actual arguments at this
- /// call site.
- using arg_iterator = IterTy;
-
- iterator_range<IterTy> args() const {
- return make_range(arg_begin(), arg_end());
- }
- bool arg_empty() const { return arg_end() == arg_begin(); }
- unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); }
-
- /// Given a value use iterator, return the data operand corresponding to it.
- /// Iterator must actually correspond to a data operand.
- unsigned getDataOperandNo(Value::const_user_iterator UI) const {
- return getDataOperandNo(&UI.getUse());
- }
-
- /// Given a use for a data operand, get the data operand number that
- /// corresponds to it.
- unsigned getDataOperandNo(const Use *U) const {
- assert(getInstruction() && "Not a call, invoke or callbr instruction!");
- assert(isDataOperand(U) && "Data operand # out of range!");
- return U - data_operands_begin();
- }
-
- /// Type of iterator to use when looping over data operands at this call site
- /// (see below).
- using data_operand_iterator = IterTy;
-
- /// data_operands_begin/data_operands_end - Return iterators iterating over
- /// the call / invoke / callbr argument list and bundle operands. For invokes,
- /// this is the set of instruction operands except the invoke target and the
- /// two successor blocks; for calls this is the set of instruction operands
- /// except the call target; for callbrs the number of labels to skip must be
- /// determined first.
-
- IterTy data_operands_begin() const {
- assert(getInstruction() && "Not a call or invoke instruction!");
- return cast<CallBase>(getInstruction())->data_operands_begin();
- }
- IterTy data_operands_end() const {
- assert(getInstruction() && "Not a call or invoke instruction!");
- return cast<CallBase>(getInstruction())->data_operands_end();
- }
- iterator_range<IterTy> data_ops() const {
- return make_range(data_operands_begin(), data_operands_end());
- }
- bool data_operands_empty() const {
- return data_operands_end() == data_operands_begin();
- }
- unsigned data_operands_size() const {
- return std::distance(data_operands_begin(), data_operands_end());
- }
-
- /// Return the type of the instruction that generated this call site.
- Type *getType() const { return (*this)->getType(); }
-
- /// Return the caller function for this call site.
- FunTy *getCaller() const { return (*this)->getParent()->getParent(); }
-
- /// Tests if this call site must be tail call optimized. Only a CallInst can
- /// be tail call optimized.
- bool isMustTailCall() const {
- return isCall() && cast<CallInst>(getInstruction())->isMustTailCall();
- }
-
- /// Tests if this call site is marked as a tail call.
- bool isTailCall() const {
- return isCall() && cast<CallInst>(getInstruction())->isTailCall();
- }
-
-#define CALLSITE_DELEGATE_GETTER(METHOD) \
- InstrTy *II = getInstruction(); \
- return isCall() ? cast<CallInst>(II)->METHOD \
- : isCallBr() ? cast<CallBrInst>(II)->METHOD \
- : cast<InvokeInst>(II)->METHOD
-
-#define CALLSITE_DELEGATE_SETTER(METHOD) \
- InstrTy *II = getInstruction(); \
- if (isCall()) \
- cast<CallInst>(II)->METHOD; \
- else if (isCallBr()) \
- cast<CallBrInst>(II)->METHOD; \
- else \
- cast<InvokeInst>(II)->METHOD
-
- unsigned getNumArgOperands() const {
- CALLSITE_DELEGATE_GETTER(getNumArgOperands());
- }
-
- ValTy *getArgOperand(unsigned i) const {
- CALLSITE_DELEGATE_GETTER(getArgOperand(i));
- }
-
- ValTy *getReturnedArgOperand() const {
- CALLSITE_DELEGATE_GETTER(getReturnedArgOperand());
- }
-
- bool isInlineAsm() const {
- return cast<CallBase>(getInstruction())->isInlineAsm();
- }
-
- /// Get the calling convention of the call.
- CallingConv::ID getCallingConv() const {
- CALLSITE_DELEGATE_GETTER(getCallingConv());
- }
- /// Set the calling convention of the call.
- void setCallingConv(CallingConv::ID CC) {
- CALLSITE_DELEGATE_SETTER(setCallingConv(CC));
- }
-
- FunctionType *getFunctionType() const {
- CALLSITE_DELEGATE_GETTER(getFunctionType());
- }
-
- void mutateFunctionType(FunctionType *Ty) const {
- CALLSITE_DELEGATE_SETTER(mutateFunctionType(Ty));
- }
-
- /// Get the parameter attributes of the call.
- AttributeList getAttributes() const {
- CALLSITE_DELEGATE_GETTER(getAttributes());
- }
- /// Set the parameter attributes of the call.
- void setAttributes(AttributeList PAL) {
- CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
- }
-
- void addAttribute(unsigned i, Attribute::AttrKind Kind) {
- CALLSITE_DELEGATE_SETTER(addAttribute(i, Kind));
- }
-
- void addAttribute(unsigned i, Attribute Attr) {
- CALLSITE_DELEGATE_SETTER(addAttribute(i, Attr));
- }
-
- void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
- CALLSITE_DELEGATE_SETTER(addParamAttr(ArgNo, Kind));
- }
-
- void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
- CALLSITE_DELEGATE_SETTER(removeAttribute(i, Kind));
- }
-
- void removeAttribute(unsigned i, StringRef Kind) {
- CALLSITE_DELEGATE_SETTER(removeAttribute(i, Kind));
- }
-
- void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
- CALLSITE_DELEGATE_SETTER(removeParamAttr(ArgNo, Kind));
- }
-
- /// Return true if this function has the given attribute.
- bool hasFnAttr(Attribute::AttrKind Kind) const {
- CALLSITE_DELEGATE_GETTER(hasFnAttr(Kind));
- }
-
- /// Return true if this function has the given attribute.
- bool hasFnAttr(StringRef Kind) const {
- CALLSITE_DELEGATE_GETTER(hasFnAttr(Kind));
- }
-
- /// Return true if this return value has the given attribute.
- bool hasRetAttr(Attribute::AttrKind Kind) const {
- CALLSITE_DELEGATE_GETTER(hasRetAttr(Kind));
- }
-
- /// Return true if the call or the callee has the given attribute.
- bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
- CALLSITE_DELEGATE_GETTER(paramHasAttr(ArgNo, Kind));
- }
-
- Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
- CALLSITE_DELEGATE_GETTER(getAttribute(i, Kind));
- }
-
- Attribute getAttribute(unsigned i, StringRef Kind) const {
- CALLSITE_DELEGATE_GETTER(getAttribute(i, Kind));
- }
-
- /// Return true if the data operand at index \p i directly or indirectly has
- /// the attribute \p A.
- ///
- /// Normal call, invoke or callbr arguments have per operand attributes, as
- /// specified in the attribute set attached to this instruction, while operand
- /// bundle operands may have some attributes implied by the type of its
- /// containing operand bundle.
- bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
- CALLSITE_DELEGATE_GETTER(dataOperandHasImpliedAttr(i, Kind));
- }
-
- /// Extract the alignment of the return value.
- unsigned getRetAlignment() const {
- CALLSITE_DELEGATE_GETTER(getRetAlignment());
- }
-
- /// Extract the alignment for a call or parameter (0=unknown).
- unsigned getParamAlignment(unsigned ArgNo) const {
- CALLSITE_DELEGATE_GETTER(getParamAlignment(ArgNo));
- }
-
- /// Extract the byval type for a call or parameter (nullptr=unknown).
- Type *getParamByValType(unsigned ArgNo) const {
- CALLSITE_DELEGATE_GETTER(getParamByValType(ArgNo));
- }
-
- /// Extract the number of dereferenceable bytes for a call or parameter
- /// (0=unknown).
- uint64_t getDereferenceableBytes(unsigned i) const {
- CALLSITE_DELEGATE_GETTER(getDereferenceableBytes(i));
- }
-
- /// Extract the number of dereferenceable_or_null bytes for a call or
- /// parameter (0=unknown).
- uint64_t getDereferenceableOrNullBytes(unsigned i) const {
- CALLSITE_DELEGATE_GETTER(getDereferenceableOrNullBytes(i));
- }
-
- /// Determine if the return value is marked with NoAlias attribute.
- bool returnDoesNotAlias() const {
- CALLSITE_DELEGATE_GETTER(returnDoesNotAlias());
- }
-
- /// Return true if the call should not be treated as a call to a builtin.
- bool isNoBuiltin() const {
- CALLSITE_DELEGATE_GETTER(isNoBuiltin());
- }
-
- /// Return true if the call requires strict floating point semantics.
- bool isStrictFP() const {
- CALLSITE_DELEGATE_GETTER(isStrictFP());
- }
-
- /// Return true if the call should not be inlined.
- bool isNoInline() const {
- CALLSITE_DELEGATE_GETTER(isNoInline());
- }
- void setIsNoInline(bool Value = true) {
- CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
- }
-
- /// Determine if the call does not access memory.
- bool doesNotAccessMemory() const {
- CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
- }
- void setDoesNotAccessMemory() {
- CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory());
- }
-
- /// Determine if the call does not access or only reads memory.
- bool onlyReadsMemory() const {
- CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
- }
- void setOnlyReadsMemory() {
- CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory());
- }
-
- /// Determine if the call does not access or only writes memory.
- bool doesNotReadMemory() const {
- CALLSITE_DELEGATE_GETTER(doesNotReadMemory());
- }
- void setDoesNotReadMemory() {
- CALLSITE_DELEGATE_SETTER(setDoesNotReadMemory());
- }
-
- /// Determine if the call can access memmory only using pointers based
- /// on its arguments.
- bool onlyAccessesArgMemory() const {
- CALLSITE_DELEGATE_GETTER(onlyAccessesArgMemory());
- }
- void setOnlyAccessesArgMemory() {
- CALLSITE_DELEGATE_SETTER(setOnlyAccessesArgMemory());
- }
-
- /// Determine if the function may only access memory that is
- /// inaccessible from the IR.
- bool onlyAccessesInaccessibleMemory() const {
- CALLSITE_DELEGATE_GETTER(onlyAccessesInaccessibleMemory());
- }
- void setOnlyAccessesInaccessibleMemory() {
- CALLSITE_DELEGATE_SETTER(setOnlyAccessesInaccessibleMemory());
- }
-
- /// Determine if the function may only access memory that is
- /// either inaccessible from the IR or pointed to by its arguments.
- bool onlyAccessesInaccessibleMemOrArgMem() const {
- CALLSITE_DELEGATE_GETTER(onlyAccessesInaccessibleMemOrArgMem());
- }
- void setOnlyAccessesInaccessibleMemOrArgMem() {
- CALLSITE_DELEGATE_SETTER(setOnlyAccessesInaccessibleMemOrArgMem());
- }
-
- /// Determine if the call cannot return.
- bool doesNotReturn() const {
- CALLSITE_DELEGATE_GETTER(doesNotReturn());
- }
- void setDoesNotReturn() {
- CALLSITE_DELEGATE_SETTER(setDoesNotReturn());
- }
-
- /// Determine if the call cannot unwind.
- bool doesNotThrow() const {
- CALLSITE_DELEGATE_GETTER(doesNotThrow());
- }
- void setDoesNotThrow() {
- CALLSITE_DELEGATE_SETTER(setDoesNotThrow());
- }
-
- /// Determine if the call can be duplicated.
- bool cannotDuplicate() const {
- CALLSITE_DELEGATE_GETTER(cannotDuplicate());
- }
- void setCannotDuplicate() {
- CALLSITE_DELEGATE_SETTER(setCannotDuplicate());
- }
-
- /// Determine if the call is convergent.
- bool isConvergent() const {
- CALLSITE_DELEGATE_GETTER(isConvergent());
- }
- void setConvergent() {
- CALLSITE_DELEGATE_SETTER(setConvergent());
- }
- void setNotConvergent() {
- CALLSITE_DELEGATE_SETTER(setNotConvergent());
- }
-
- unsigned getNumOperandBundles() const {
- CALLSITE_DELEGATE_GETTER(getNumOperandBundles());
- }
-
- bool hasOperandBundles() const {
- CALLSITE_DELEGATE_GETTER(hasOperandBundles());
- }
-
- unsigned getBundleOperandsStartIndex() const {
- CALLSITE_DELEGATE_GETTER(getBundleOperandsStartIndex());
- }
-
- unsigned getBundleOperandsEndIndex() const {
- CALLSITE_DELEGATE_GETTER(getBundleOperandsEndIndex());
- }
-
- unsigned getNumTotalBundleOperands() const {
- CALLSITE_DELEGATE_GETTER(getNumTotalBundleOperands());
- }
-
- OperandBundleUse getOperandBundleAt(unsigned Index) const {
- CALLSITE_DELEGATE_GETTER(getOperandBundleAt(Index));
- }
-
- Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
- CALLSITE_DELEGATE_GETTER(getOperandBundle(Name));
- }
-
- Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
- CALLSITE_DELEGATE_GETTER(getOperandBundle(ID));
- }
-
- unsigned countOperandBundlesOfType(uint32_t ID) const {
- CALLSITE_DELEGATE_GETTER(countOperandBundlesOfType(ID));
- }
-
- bool isBundleOperand(unsigned Idx) const {
- CALLSITE_DELEGATE_GETTER(isBundleOperand(Idx));
- }
-
- IterTy arg_begin() const {
- CALLSITE_DELEGATE_GETTER(arg_begin());
- }
-
- IterTy arg_end() const {
- CALLSITE_DELEGATE_GETTER(arg_end());
- }
-
-#undef CALLSITE_DELEGATE_GETTER
-#undef CALLSITE_DELEGATE_SETTER
-
- void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
- // Since this is actually a getter that "looks like" a setter, don't use the
- // above macros to avoid confusion.
- cast<CallBase>(getInstruction())->getOperandBundlesAsDefs(Defs);
- }
-
- /// Determine whether this data operand is not captured.
- bool doesNotCapture(unsigned OpNo) const {
- return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
- }
-
- /// Determine whether this argument is passed by value.
- bool isByValArgument(unsigned ArgNo) const {
- return paramHasAttr(ArgNo, Attribute::ByVal);
- }
-
- /// Determine whether this argument is passed in an alloca.
- bool isInAllocaArgument(unsigned ArgNo) const {
- return paramHasAttr(ArgNo, Attribute::InAlloca);
- }
-
- /// Determine whether this argument is passed by value or in an alloca.
- bool isByValOrInAllocaArgument(unsigned ArgNo) const {
- return paramHasAttr(ArgNo, Attribute::ByVal) ||
- paramHasAttr(ArgNo, Attribute::InAlloca);
- }
-
- /// Determine if there are is an inalloca argument. Only the last argument can
- /// have the inalloca attribute.
- bool hasInAllocaArgument() const {
- return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
- }
-
- bool doesNotAccessMemory(unsigned OpNo) const {
- return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
- }
-
- bool onlyReadsMemory(unsigned OpNo) const {
- return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
- dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
- }
-
- bool doesNotReadMemory(unsigned OpNo) const {
- return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
- dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
- }
-
- /// Return true if the return value is known to be not null.
- /// This may be because it has the nonnull attribute, or because at least
- /// one byte is dereferenceable and the pointer is in addrspace(0).
- bool isReturnNonNull() const {
- if (hasRetAttr(Attribute::NonNull))
- return true;
- else if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
- !NullPointerIsDefined(getCaller(),
- getType()->getPointerAddressSpace()))
- return true;
-
- return false;
- }
-
- /// Returns true if this CallSite passes the given Value* as an argument to
- /// the called function.
- bool hasArgument(const Value *Arg) const {
- for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E;
- ++AI)
- if (AI->get() == Arg)
- return true;
- return false;
- }
-
-private:
- IterTy getCallee() const {
- return cast<CallBase>(getInstruction())->op_end() - 1;
- }
-};
-
-class CallSite : public CallSiteBase<Function, BasicBlock, Value, User, Use,
- Instruction, CallInst, InvokeInst,
- CallBrInst, User::op_iterator> {
-public:
- CallSite() = default;
- CallSite(CallSiteBase B) : CallSiteBase(B) {}
- CallSite(CallInst *CI) : CallSiteBase(CI) {}
- CallSite(InvokeInst *II) : CallSiteBase(II) {}
- CallSite(CallBrInst *CBI) : CallSiteBase(CBI) {}
- explicit CallSite(Instruction *II) : CallSiteBase(II) {}
- explicit CallSite(Value *V) : CallSiteBase(V) {}
-
- bool operator==(const CallSite &CS) const { return I == CS.I; }
- bool operator!=(const CallSite &CS) const { return I != CS.I; }
- bool operator<(const CallSite &CS) const {
- return getInstruction() < CS.getInstruction();
- }
-
-private:
- friend struct DenseMapInfo<CallSite>;
-
- User::op_iterator getCallee() const;
-};
-
-/// Establish a view to a call site for examination.
-class ImmutableCallSite : public CallSiteBase<> {
-public:
- ImmutableCallSite() = default;
- ImmutableCallSite(const CallInst *CI) : CallSiteBase(CI) {}
- ImmutableCallSite(const InvokeInst *II) : CallSiteBase(II) {}
- ImmutableCallSite(const CallBrInst *CBI) : CallSiteBase(CBI) {}
- explicit ImmutableCallSite(const Instruction *II) : CallSiteBase(II) {}
- explicit ImmutableCallSite(const Value *V) : CallSiteBase(V) {}
- ImmutableCallSite(CallSite CS) : CallSiteBase(CS.getInstruction()) {}
-};
-
-/// AbstractCallSite
-///
-/// An abstract call site is a wrapper that allows to treat direct,
-/// indirect, and callback calls the same. If an abstract call site
-/// represents a direct or indirect call site it behaves like a stripped
-/// down version of a normal call site object. The abstract call site can
-/// also represent a callback call, thus the fact that the initially
-/// called function (=broker) may invoke a third one (=callback callee).
-/// In this case, the abstract call site hides the middle man, hence the
-/// broker function. The result is a representation of the callback call,
-/// inside the broker, but in the context of the original call to the broker.
-///
-/// There are up to three functions involved when we talk about callback call
-/// sites. The caller (1), which invokes the broker function. The broker
-/// function (2), that will invoke the callee zero or more times. And finally
-/// the callee (3), which is the target of the callback call.
-///
-/// The abstract call site will handle the mapping from parameters to arguments
-/// depending on the semantic of the broker function. However, it is important
-/// to note that the mapping is often partial. Thus, some arguments of the
-/// call/invoke instruction are mapped to parameters of the callee while others
-/// are not.
-class AbstractCallSite {
-public:
-
- /// The encoding of a callback with regards to the underlying instruction.
- struct CallbackInfo {
-
- /// For direct/indirect calls the parameter encoding is empty. If it is not,
- /// the abstract call site represents a callback. In that case, the first
- /// element of the encoding vector represents which argument of the call
- /// site CS is the callback callee. The remaining elements map parameters
- /// (identified by their position) to the arguments that will be passed
- /// through (also identified by position but in the call site instruction).
- ///
- /// NOTE that we use LLVM argument numbers (starting at 0) and not
- /// clang/source argument numbers (starting at 1). The -1 entries represent
- /// unknown values that are passed to the callee.
- using ParameterEncodingTy = SmallVector<int, 0>;
- ParameterEncodingTy ParameterEncoding;
-
- };
-
-private:
-
- /// The underlying call site:
- /// caller -> callee, if this is a direct or indirect call site
- /// caller -> broker function, if this is a callback call site
- CallSite CS;
-
- /// The encoding of a callback with regards to the underlying instruction.
- CallbackInfo CI;
-
-public:
- /// Sole constructor for abstract call sites (ACS).
- ///
- /// An abstract call site can only be constructed through a llvm::Use because
- /// each operand (=use) of an instruction could potentially be a different
- /// abstract call site. Furthermore, even if the value of the llvm::Use is the
- /// same, and the user is as well, the abstract call sites might not be.
- ///
- /// If a use is not associated with an abstract call site the constructed ACS
- /// will evaluate to false if converted to a boolean.
- ///
- /// If the use is the callee use of a call or invoke instruction, the
- /// constructed abstract call site will behave as a llvm::CallSite would.
- ///
- /// If the use is not a callee use of a call or invoke instruction, the
- /// callback metadata is used to determine the argument <-> parameter mapping
- /// as well as the callee of the abstract call site.
- AbstractCallSite(const Use *U);
-
- /// Add operand uses of \p ICS that represent callback uses into \p CBUses.
- ///
- /// All uses added to \p CBUses can be used to create abstract call sites for
- /// which AbstractCallSite::isCallbackCall() will return true.
- static void getCallbackUses(ImmutableCallSite ICS,
- SmallVectorImpl<const Use *> &CBUses);
-
- /// Conversion operator to conveniently check for a valid/initialized ACS.
- explicit operator bool() const { return (bool)CS; }
-
- /// Return the underlying instruction.
- Instruction *getInstruction() const { return CS.getInstruction(); }
-
- /// Return the call site abstraction for the underlying instruction.
- CallSite getCallSite() const { return CS; }
-
- /// Return true if this ACS represents a direct call.
- bool isDirectCall() const {
- return !isCallbackCall() && !CS.isIndirectCall();
- }
-
- /// Return true if this ACS represents an indirect call.
- bool isIndirectCall() const {
- return !isCallbackCall() && CS.isIndirectCall();
- }
-
- /// Return true if this ACS represents a callback call.
- bool isCallbackCall() const {
- // For a callback call site the callee is ALWAYS stored first in the
- // transitive values vector. Thus, a non-empty vector indicates a callback.
- return !CI.ParameterEncoding.empty();
- }
-
- /// Return true if @p UI is the use that defines the callee of this ACS.
- bool isCallee(Value::const_user_iterator UI) const {
- return isCallee(&UI.getUse());
- }
-
- /// Return true if @p U is the use that defines the callee of this ACS.
- bool isCallee(const Use *U) const {
- if (isDirectCall())
- return CS.isCallee(U);
-
- assert(!CI.ParameterEncoding.empty() &&
- "Callback without parameter encoding!");
-
- return (int)CS.getArgumentNo(U) == CI.ParameterEncoding[0];
- }
-
- /// Return the number of parameters of the callee.
- unsigned getNumArgOperands() const {
- if (isDirectCall())
- return CS.getNumArgOperands();
- // Subtract 1 for the callee encoding.
- return CI.ParameterEncoding.size() - 1;
- }
-
- /// Return the operand index of the underlying instruction associated with @p
- /// Arg.
- int getCallArgOperandNo(Argument &Arg) const {
- return getCallArgOperandNo(Arg.getArgNo());
- }
-
- /// Return the operand index of the underlying instruction associated with
- /// the function parameter number @p ArgNo or -1 if there is none.
- int getCallArgOperandNo(unsigned ArgNo) const {
- if (isDirectCall())
- return ArgNo;
- // Add 1 for the callee encoding.
- return CI.ParameterEncoding[ArgNo + 1];
- }
-
- /// Return the operand of the underlying instruction associated with @p Arg.
- Value *getCallArgOperand(Argument &Arg) const {
- return getCallArgOperand(Arg.getArgNo());
- }
-
- /// Return the operand of the underlying instruction associated with the
- /// function parameter number @p ArgNo or nullptr if there is none.
- Value *getCallArgOperand(unsigned ArgNo) const {
- if (isDirectCall())
- return CS.getArgOperand(ArgNo);
- // Add 1 for the callee encoding.
- return CI.ParameterEncoding[ArgNo + 1] >= 0
- ? CS.getArgOperand(CI.ParameterEncoding[ArgNo + 1])
- : nullptr;
- }
-
- /// Return the operand index of the underlying instruction associated with the
- /// callee of this ACS. Only valid for callback calls!
- int getCallArgOperandNoForCallee() const {
- assert(isCallbackCall());
- assert(CI.ParameterEncoding.size() && CI.ParameterEncoding[0] >= 0);
- return CI.ParameterEncoding[0];
- }
-
- /// Return the use of the callee value in the underlying instruction. Only
- /// valid for callback calls!
- const Use &getCalleeUseForCallback() const {
- int CalleeArgIdx = getCallArgOperandNoForCallee();
- assert(CalleeArgIdx >= 0 &&
- unsigned(CalleeArgIdx) < getInstruction()->getNumOperands());
- return getInstruction()->getOperandUse(CalleeArgIdx);
- }
-
- /// Return the pointer to function that is being called.
- Value *getCalledValue() const {
- if (isDirectCall())
- return CS.getCalledValue();
- return CS.getArgOperand(getCallArgOperandNoForCallee());
- }
-
- /// Return the function being called if this is a direct call, otherwise
- /// return null (if it's an indirect call).
- Function *getCalledFunction() const {
- Value *V = getCalledValue();
- return V ? dyn_cast<Function>(V->stripPointerCasts()) : nullptr;
- }
-};
-
-template <> struct DenseMapInfo<CallSite> {
- using BaseInfo = DenseMapInfo<decltype(CallSite::I)>;
-
- static CallSite getEmptyKey() {
- CallSite CS;
- CS.I = BaseInfo::getEmptyKey();
- return CS;
- }
-
- static CallSite getTombstoneKey() {
- CallSite CS;
- CS.I = BaseInfo::getTombstoneKey();
- return CS;
- }
-
- static unsigned getHashValue(const CallSite &CS) {
- return BaseInfo::getHashValue(CS.I);
- }
-
- static bool isEqual(const CallSite &LHS, const CallSite &RHS) {
- return LHS == RHS;
- }
-};
-
-} // end namespace llvm
-
-#endif // LLVM_IR_CALLSITE_H
diff --git a/llvm/include/llvm/IR/Constant.h b/llvm/include/llvm/IR/Constant.h
index 174e7364c524..9a1d2b80c48e 100644
--- a/llvm/include/llvm/IR/Constant.h
+++ b/llvm/include/llvm/IR/Constant.h
@@ -43,6 +43,8 @@ protected:
Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
: User(ty, vty, Ops, NumOps) {}
+ ~Constant() = default;
+
public:
void operator=(const Constant &) = delete;
Constant(const Constant &) = delete;
diff --git a/llvm/include/llvm/IR/ConstantFolder.h b/llvm/include/llvm/IR/ConstantFolder.h
index 5a5cabfd0206..da4a18e3c181 100644
--- a/llvm/include/llvm/IR/ConstantFolder.h
+++ b/llvm/include/llvm/IR/ConstantFolder.h
@@ -20,11 +20,14 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IRBuilderFolder.h"
namespace llvm {
/// ConstantFolder - Create constants with minimum, target independent, folding.
-class ConstantFolder {
+class ConstantFolder final : public IRBuilderFolder {
+ virtual void anchor();
+
public:
explicit ConstantFolder() = default;
@@ -33,87 +36,87 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateAdd(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW);
}
- Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ Constant *CreateFAdd(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFAdd(LHS, RHS);
}
Constant *CreateSub(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW);
}
- Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
+ Constant *CreateFSub(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFSub(LHS, RHS);
}
Constant *CreateMul(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW);
}
- Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
+ Constant *CreateFMul(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFMul(LHS, RHS);
}
Constant *CreateUDiv(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
return ConstantExpr::getUDiv(LHS, RHS, isExact);
}
Constant *CreateSDiv(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
return ConstantExpr::getSDiv(LHS, RHS, isExact);
}
- Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ Constant *CreateFDiv(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFDiv(LHS, RHS);
}
- Constant *CreateURem(Constant *LHS, Constant *RHS) const {
+ Constant *CreateURem(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getURem(LHS, RHS);
}
- Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
+ Constant *CreateSRem(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getSRem(LHS, RHS);
}
- Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
+ Constant *CreateFRem(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFRem(LHS, RHS);
}
Constant *CreateShl(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW);
}
Constant *CreateLShr(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
return ConstantExpr::getLShr(LHS, RHS, isExact);
}
Constant *CreateAShr(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
return ConstantExpr::getAShr(LHS, RHS, isExact);
}
- Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
+ Constant *CreateAnd(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getAnd(LHS, RHS);
}
- Constant *CreateOr(Constant *LHS, Constant *RHS) const {
+ Constant *CreateOr(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getOr(LHS, RHS);
}
- Constant *CreateXor(Constant *LHS, Constant *RHS) const {
+ Constant *CreateXor(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getXor(LHS, RHS);
}
Constant *CreateBinOp(Instruction::BinaryOps Opc,
- Constant *LHS, Constant *RHS) const {
+ Constant *LHS, Constant *RHS) const override {
return ConstantExpr::get(Opc, LHS, RHS);
}
@@ -122,19 +125,19 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateNeg(Constant *C,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getNeg(C, HasNUW, HasNSW);
}
- Constant *CreateFNeg(Constant *C) const {
+ Constant *CreateFNeg(Constant *C) const override {
return ConstantExpr::getFNeg(C);
}
- Constant *CreateNot(Constant *C) const {
+ Constant *CreateNot(Constant *C) const override {
return ConstantExpr::getNot(C);
}
- Constant *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const {
+ Constant *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const override {
return ConstantExpr::get(Opc, C);
}
@@ -143,11 +146,12 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const {
+ ArrayRef<Constant *> IdxList) const override {
return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
}
- Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) const override {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
@@ -155,25 +159,25 @@ public:
}
Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const {
+ ArrayRef<Value *> IdxList) const override {
return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
}
- Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const {
+ Constant *CreateInBoundsGetElementPtr(
+ Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const override {
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
}
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const {
+ Constant *Idx) const override {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
}
- Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const {
+ Constant *CreateInBoundsGetElementPtr(
+ Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const override {
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
}
@@ -182,49 +186,49 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateCast(Instruction::CastOps Op, Constant *C,
- Type *DestTy) const {
+ Type *DestTy) const override {
return ConstantExpr::getCast(Op, C, DestTy);
}
- Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
+ Constant *CreatePointerCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getPointerCast(C, DestTy);
}
Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
- Type *DestTy) const {
+ Type *DestTy) const override {
return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy);
}
Constant *CreateIntCast(Constant *C, Type *DestTy,
- bool isSigned) const {
+ bool isSigned) const override {
return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
}
- Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+ Constant *CreateFPCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getFPCast(C, DestTy);
}
- Constant *CreateBitCast(Constant *C, Type *DestTy) const {
+ Constant *CreateBitCast(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::BitCast, C, DestTy);
}
- Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
+ Constant *CreateIntToPtr(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
- Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
+ Constant *CreatePtrToInt(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
- Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+ Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getZExtOrBitCast(C, DestTy);
}
- Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+ Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getSExtOrBitCast(C, DestTy);
}
- Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+ Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getTruncOrBitCast(C, DestTy);
}
@@ -233,12 +237,12 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const {
+ Constant *RHS) const override {
return ConstantExpr::getCompare(P, LHS, RHS);
}
Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const {
+ Constant *RHS) const override {
return ConstantExpr::getCompare(P, LHS, RHS);
}
@@ -246,31 +250,32 @@ public:
// Other Instructions
//===--------------------------------------------------------------------===//
- Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const {
+ Constant *CreateSelect(Constant *C, Constant *True,
+ Constant *False) const override {
return ConstantExpr::getSelect(C, True, False);
}
- Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const override {
return ConstantExpr::getExtractElement(Vec, Idx);
}
Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
- Constant *Idx) const {
+ Constant *Idx) const override {
return ConstantExpr::getInsertElement(Vec, NewElt, Idx);
}
Constant *CreateShuffleVector(Constant *V1, Constant *V2,
- Constant *Mask) const {
+ ArrayRef<int> Mask) const override {
return ConstantExpr::getShuffleVector(V1, V2, Mask);
}
Constant *CreateExtractValue(Constant *Agg,
- ArrayRef<unsigned> IdxList) const {
+ ArrayRef<unsigned> IdxList) const override {
return ConstantExpr::getExtractValue(Agg, IdxList);
}
Constant *CreateInsertValue(Constant *Agg, Constant *Val,
- ArrayRef<unsigned> IdxList) const {
+ ArrayRef<unsigned> IdxList) const override {
return ConstantExpr::getInsertValue(Agg, Val, IdxList);
}
};
diff --git a/llvm/include/llvm/IR/ConstantRange.h b/llvm/include/llvm/IR/ConstantRange.h
index e6bac8a5f933..8ecb9aa0ce02 100644
--- a/llvm/include/llvm/IR/ConstantRange.h
+++ b/llvm/include/llvm/IR/ConstantRange.h
@@ -410,6 +410,10 @@ public:
ConstantRange binaryOr(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
+ /// from a binary-xor of a value in this range by a value in \p Other.
+ ConstantRange binaryXor(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
/// from a left shift of a value in this range by a value in \p Other.
/// TODO: This isn't fully implemented yet.
ConstantRange shl(const ConstantRange &Other) const;
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index 262ab439df65..8e2dba9b2417 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -41,12 +41,6 @@
namespace llvm {
-class ArrayType;
-class IntegerType;
-class PointerType;
-class SequentialType;
-class StructType;
-class VectorType;
template <class ConstantClass> struct ConstantAggrKeyType;
/// Base class for constants with no operands.
@@ -157,6 +151,20 @@ public:
return Val.getSExtValue();
}
+ /// Return the constant as an llvm::MaybeAlign.
+ /// Note that this method can assert if the value does not fit in 64 bits or
+ /// is not a power of two.
+ inline MaybeAlign getMaybeAlignValue() const {
+ return MaybeAlign(getZExtValue());
+ }
+
+ /// Return the constant as an llvm::Align, interpreting `0` as `Align(1)`.
+ /// Note that this method can assert if the value does not fit in 64 bits or
+ /// is not a power of two.
+ inline Align getAlignValue() const {
+ return getMaybeAlignValue().valueOrOne();
+ }
+
/// A helper method that can be used to determine if the constant contained
/// within is equal to a constant. This only works for very small values,
/// because this is all that can be represented with all types.
@@ -300,6 +308,7 @@ public:
/// Return true if Ty is big enough to represent V.
static bool isValueValidForType(Type *Ty, const APFloat &V);
inline const APFloat &getValueAPF() const { return Val; }
+ inline const APFloat &getValue() const { return Val; }
/// Return true if the value is positive or negative zero.
bool isZero() const { return Val.isZero(); }
@@ -388,7 +397,7 @@ public:
/// use operands.
class ConstantAggregate : public Constant {
protected:
- ConstantAggregate(CompositeType *T, ValueTy VT, ArrayRef<Constant *> V);
+ ConstantAggregate(Type *T, ValueTy VT, ArrayRef<Constant *> V);
public:
/// Transparently provide more efficient getOperand methods.
@@ -456,8 +465,7 @@ public:
static Constant *get(StructType *T, ArrayRef<Constant*> V);
template <typename... Csts>
- static typename std::enable_if<are_base_of<Constant, Csts...>::value,
- Constant *>::type
+ static std::enable_if_t<are_base_of<Constant, Csts...>::value, Constant *>
get(StructType *T, Csts *... Vs) {
SmallVector<Constant *, 8> Values({Vs...});
return get(T, Values);
@@ -514,12 +522,13 @@ private:
public:
/// Return a ConstantVector with the specified constant in each element.
- static Constant *getSplat(unsigned NumElts, Constant *Elt);
+ /// Note that this might not return an instance of ConstantVector
+ static Constant *getSplat(ElementCount EC, Constant *Elt);
- /// Specialize the getType() method to always return a VectorType,
+ /// Specialize the getType() method to always return a FixedVectorType,
/// which reduces the amount of casting needed in parts of the compiler.
- inline VectorType *getType() const {
- return cast<VectorType>(Value::getType());
+ inline FixedVectorType *getType() const {
+ return cast<FixedVectorType>(Value::getType());
}
/// If all elements of the vector constant have the same value, return that
@@ -628,12 +637,6 @@ public:
/// efficient as getElementAsInteger/Float/Double.
Constant *getElementAsConstant(unsigned i) const;
- /// Specialize the getType() method to always return a SequentialType, which
- /// reduces the amount of casting needed in parts of the compiler.
- inline SequentialType *getType() const {
- return cast<SequentialType>(Value::getType());
- }
-
/// Return the element type of the array/vector.
Type *getElementType() const;
@@ -724,14 +727,15 @@ public:
return getImpl(Data, Ty);
}
- /// getFP() constructors - Return a constant with array type with an element
- /// count and element type of float with precision matching the number of
- /// bits in the ArrayRef passed in. (i.e. half for 16bits, float for 32bits,
- /// double for 64bits) Note that this can return a ConstantAggregateZero
- /// object.
- static Constant *getFP(LLVMContext &Context, ArrayRef<uint16_t> Elts);
- static Constant *getFP(LLVMContext &Context, ArrayRef<uint32_t> Elts);
- static Constant *getFP(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+ /// getFP() constructors - Return a constant of array type with a float
+ /// element type taken from argument `ElementType', and count taken from
+ /// argument `Elts'. The amount of bits of the contained type must match the
+ /// number of bits of the type contained in the passed in ArrayRef.
+ /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note
+ /// that this can return a ConstantAggregateZero object.
+ static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts);
+ static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts);
+ static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts);
/// This method constructs a CDS and initializes it with a text string.
/// The default behavior (AddNull==true) causes a null terminator to
@@ -763,7 +767,12 @@ class ConstantDataVector final : public ConstantDataSequential {
friend class ConstantDataSequential;
explicit ConstantDataVector(Type *ty, const char *Data)
- : ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
+ : ConstantDataSequential(ty, ConstantDataVectorVal, Data),
+ IsSplatSet(false) {}
+ // Cache whether or not the constant is a splat.
+ mutable bool IsSplatSet : 1;
+ mutable bool IsSplat : 1;
+ bool isSplatData() const;
public:
ConstantDataVector(const ConstantDataVector &) = delete;
@@ -778,14 +787,15 @@ public:
static Constant *get(LLVMContext &Context, ArrayRef<float> Elts);
static Constant *get(LLVMContext &Context, ArrayRef<double> Elts);
- /// getFP() constructors - Return a constant with vector type with an element
- /// count and element type of float with the precision matching the number of
- /// bits in the ArrayRef passed in. (i.e. half for 16bits, float for 32bits,
- /// double for 64bits) Note that this can return a ConstantAggregateZero
- /// object.
- static Constant *getFP(LLVMContext &Context, ArrayRef<uint16_t> Elts);
- static Constant *getFP(LLVMContext &Context, ArrayRef<uint32_t> Elts);
- static Constant *getFP(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+ /// getFP() constructors - Return a constant of vector type with a float
+ /// element type taken from argument `ElementType', and count taken from
+ /// argument `Elts'. The amount of bits of the contained type must match the
+ /// number of bits of the type contained in the passed in ArrayRef.
+ /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note
+ /// that this can return a ConstantAggregateZero object.
+ static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts);
+ static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts);
+ static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts);
/// Return a ConstantVector with the specified constant in each element.
/// The specified constant has to be a of a compatible type (i8/i16/
@@ -800,10 +810,10 @@ public:
/// same value, return that value. Otherwise return NULL.
Constant *getSplatValue() const;
- /// Specialize the getType() method to always return a VectorType,
+ /// Specialize the getType() method to always return a FixedVectorType,
/// which reduces the amount of casting needed in parts of the compiler.
- inline VectorType *getType() const {
- return cast<VectorType>(Value::getType());
+ inline FixedVectorType *getType() const {
+ return cast<FixedVectorType>(Value::getType());
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -900,6 +910,8 @@ protected:
setValueSubclassData(Opcode);
}
+ ~ConstantExpr() = default;
+
public:
// Static methods to construct a ConstantExpr of different kinds. Note that
// these methods may return a object that is not an instance of the
@@ -1198,7 +1210,8 @@ public:
Type *OnlyIfReducedTy = nullptr);
static Constant *getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx,
Type *OnlyIfReducedTy = nullptr);
- static Constant *getShuffleVector(Constant *V1, Constant *V2, Constant *Mask,
+ static Constant *getShuffleVector(Constant *V1, Constant *V2,
+ ArrayRef<int> Mask,
Type *OnlyIfReducedTy = nullptr);
static Constant *getExtractValue(Constant *Agg, ArrayRef<unsigned> Idxs,
Type *OnlyIfReducedTy = nullptr);
@@ -1217,6 +1230,16 @@ public:
/// expression and return the list of indices.
ArrayRef<unsigned> getIndices() const;
+ /// Assert that this is a shufflevector and return the mask. See class
+ /// ShuffleVectorInst for a description of the mask representation.
+ ArrayRef<int> getShuffleMask() const;
+
+ /// Assert that this is a shufflevector and return the mask.
+ ///
+ /// TODO: This is a temporary hack until we update the bitcode format for
+ /// shufflevector.
+ Constant *getShuffleMaskForBitcode() const;
+
/// Return a string representation for an opcode.
const char *getOpcodeName() const;
diff --git a/llvm/include/llvm/IR/ConstrainedOps.def b/llvm/include/llvm/IR/ConstrainedOps.def
index 7e24684ca654..ecba68fe0c0e 100644
--- a/llvm/include/llvm/IR/ConstrainedOps.def
+++ b/llvm/include/llvm/IR/ConstrainedOps.def
@@ -11,18 +11,32 @@
//
//===----------------------------------------------------------------------===//
+// DAG_FUNCTION defers to DAG_INSTRUCTION if its defined, otherwise FUNCTION.
+#ifndef DAG_FUNCTION
+#ifdef DAG_INSTRUCTION
+#define DAG_FUNCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
+#else
+#define DAG_FUNCTION(N,A,R,I,D) FUNCTION(N,A,R,I)
+#endif
+#endif
+
#ifndef INSTRUCTION
-#define INSTRUCTION(N,A,R,I,D)
+#define INSTRUCTION(N,A,R,I)
+#endif
+
+// DAG_INSTRUCTION is treated like an INSTRUCTION if the DAG node isn't used.
+#ifndef DAG_INSTRUCTION
+#define DAG_INSTRUCTION(N,A,R,I,D) INSTRUCTION(N,A,R,I)
#endif
// In most cases intrinsic function is handled similar to instruction.
#ifndef FUNCTION
-#define FUNCTION INSTRUCTION
+#define FUNCTION(N,A,R,I) INSTRUCTION(N,A,R,I)
#endif
-// Likewise for compare instructions.
+// Compare instruction have a DAG node so they are treated like DAG_INSTRUCTION.
#ifndef CMP_INSTRUCTION
-#define CMP_INSTRUCTION INSTRUCTION
+#define CMP_INSTRUCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
#endif
// Arguments of the entries are:
@@ -35,52 +49,59 @@
// These are definitions for instructions, that are converted into constrained
// intrinsics.
//
-INSTRUCTION(FAdd, 2, 1, experimental_constrained_fadd, FADD)
-INSTRUCTION(FSub, 2, 1, experimental_constrained_fsub, FSUB)
-INSTRUCTION(FMul, 2, 1, experimental_constrained_fmul, FMUL)
-INSTRUCTION(FDiv, 2, 1, experimental_constrained_fdiv, FDIV)
-INSTRUCTION(FRem, 2, 1, experimental_constrained_frem, FREM)
-INSTRUCTION(FPExt, 1, 0, experimental_constrained_fpext, FP_EXTEND)
-INSTRUCTION(SIToFP, 1, 1, experimental_constrained_sitofp, SINT_TO_FP)
-INSTRUCTION(UIToFP, 1, 1, experimental_constrained_uitofp, UINT_TO_FP)
-INSTRUCTION(FPToSI, 1, 0, experimental_constrained_fptosi, FP_TO_SINT)
-INSTRUCTION(FPToUI, 1, 0, experimental_constrained_fptoui, FP_TO_UINT)
-INSTRUCTION(FPTrunc, 1, 1, experimental_constrained_fptrunc, FP_ROUND)
+DAG_INSTRUCTION(FAdd, 2, 1, experimental_constrained_fadd, FADD)
+DAG_INSTRUCTION(FSub, 2, 1, experimental_constrained_fsub, FSUB)
+DAG_INSTRUCTION(FMul, 2, 1, experimental_constrained_fmul, FMUL)
+DAG_INSTRUCTION(FDiv, 2, 1, experimental_constrained_fdiv, FDIV)
+DAG_INSTRUCTION(FRem, 2, 1, experimental_constrained_frem, FREM)
+DAG_INSTRUCTION(FPExt, 1, 0, experimental_constrained_fpext, FP_EXTEND)
+DAG_INSTRUCTION(SIToFP, 1, 1, experimental_constrained_sitofp, SINT_TO_FP)
+DAG_INSTRUCTION(UIToFP, 1, 1, experimental_constrained_uitofp, UINT_TO_FP)
+DAG_INSTRUCTION(FPToSI, 1, 0, experimental_constrained_fptosi, FP_TO_SINT)
+DAG_INSTRUCTION(FPToUI, 1, 0, experimental_constrained_fptoui, FP_TO_UINT)
+DAG_INSTRUCTION(FPTrunc, 1, 1, experimental_constrained_fptrunc, FP_ROUND)
// These are definitions for compare instructions (signaling and quiet version).
// Both of these match to FCmp / SETCC.
-CMP_INSTRUCTION(FCmp, 2, 0, experimental_constrained_fcmp, FSETCC)
-CMP_INSTRUCTION(FCmp, 2, 0, experimental_constrained_fcmps, FSETCCS)
+CMP_INSTRUCTION(FCmp, 2, 0, experimental_constrained_fcmp, FSETCC)
+CMP_INSTRUCTION(FCmp, 2, 0, experimental_constrained_fcmps, FSETCCS)
// Theses are definitions for intrinsic functions, that are converted into
// constrained intrinsics.
//
-FUNCTION(ceil, 1, 0, experimental_constrained_ceil, FCEIL)
-FUNCTION(cos, 1, 1, experimental_constrained_cos, FCOS)
-FUNCTION(exp, 1, 1, experimental_constrained_exp, FEXP)
-FUNCTION(exp2, 1, 1, experimental_constrained_exp2, FEXP2)
-FUNCTION(floor, 1, 0, experimental_constrained_floor, FFLOOR)
-FUNCTION(fma, 3, 1, experimental_constrained_fma, FMA)
-FUNCTION(log, 1, 1, experimental_constrained_log, FLOG)
-FUNCTION(log10, 1, 1, experimental_constrained_log10, FLOG10)
-FUNCTION(log2, 1, 1, experimental_constrained_log2, FLOG2)
-FUNCTION(lrint, 1, 1, experimental_constrained_lrint, LRINT)
-FUNCTION(llrint, 1, 1, experimental_constrained_llrint, LLRINT)
-FUNCTION(lround, 1, 0, experimental_constrained_lround, LROUND)
-FUNCTION(llround, 1, 0, experimental_constrained_llround, LLROUND)
-FUNCTION(maxnum, 2, 0, experimental_constrained_maxnum, FMAXNUM)
-FUNCTION(minnum, 2, 0, experimental_constrained_minnum, FMINNUM)
-FUNCTION(maximum, 2, 0, experimental_constrained_maximum, FMAXIMUM)
-FUNCTION(minimum, 2, 0, experimental_constrained_minimum, FMINIMUM)
-FUNCTION(nearbyint, 1, 1, experimental_constrained_nearbyint, FNEARBYINT)
-FUNCTION(pow, 2, 1, experimental_constrained_pow, FPOW)
-FUNCTION(powi, 2, 1, experimental_constrained_powi, FPOWI)
-FUNCTION(rint, 1, 1, experimental_constrained_rint, FRINT)
-FUNCTION(round, 1, 0, experimental_constrained_round, FROUND)
-FUNCTION(sin, 1, 1, experimental_constrained_sin, FSIN)
-FUNCTION(sqrt, 1, 1, experimental_constrained_sqrt, FSQRT)
-FUNCTION(trunc, 1, 0, experimental_constrained_trunc, FTRUNC)
+DAG_FUNCTION(ceil, 1, 0, experimental_constrained_ceil, FCEIL)
+DAG_FUNCTION(cos, 1, 1, experimental_constrained_cos, FCOS)
+DAG_FUNCTION(exp, 1, 1, experimental_constrained_exp, FEXP)
+DAG_FUNCTION(exp2, 1, 1, experimental_constrained_exp2, FEXP2)
+DAG_FUNCTION(floor, 1, 0, experimental_constrained_floor, FFLOOR)
+DAG_FUNCTION(fma, 3, 1, experimental_constrained_fma, FMA)
+DAG_FUNCTION(log, 1, 1, experimental_constrained_log, FLOG)
+DAG_FUNCTION(log10, 1, 1, experimental_constrained_log10, FLOG10)
+DAG_FUNCTION(log2, 1, 1, experimental_constrained_log2, FLOG2)
+DAG_FUNCTION(lrint, 1, 1, experimental_constrained_lrint, LRINT)
+DAG_FUNCTION(llrint, 1, 1, experimental_constrained_llrint, LLRINT)
+DAG_FUNCTION(lround, 1, 0, experimental_constrained_lround, LROUND)
+DAG_FUNCTION(llround, 1, 0, experimental_constrained_llround, LLROUND)
+DAG_FUNCTION(maxnum, 2, 0, experimental_constrained_maxnum, FMAXNUM)
+DAG_FUNCTION(minnum, 2, 0, experimental_constrained_minnum, FMINNUM)
+DAG_FUNCTION(maximum, 2, 0, experimental_constrained_maximum, FMAXIMUM)
+DAG_FUNCTION(minimum, 2, 0, experimental_constrained_minimum, FMINIMUM)
+DAG_FUNCTION(nearbyint, 1, 1, experimental_constrained_nearbyint, FNEARBYINT)
+DAG_FUNCTION(pow, 2, 1, experimental_constrained_pow, FPOW)
+DAG_FUNCTION(powi, 2, 1, experimental_constrained_powi, FPOWI)
+DAG_FUNCTION(rint, 1, 1, experimental_constrained_rint, FRINT)
+DAG_FUNCTION(round, 1, 0, experimental_constrained_round, FROUND)
+DAG_FUNCTION(roundeven, 1, 0, experimental_constrained_roundeven, FROUNDEVEN)
+DAG_FUNCTION(sin, 1, 1, experimental_constrained_sin, FSIN)
+DAG_FUNCTION(sqrt, 1, 1, experimental_constrained_sqrt, FSQRT)
+DAG_FUNCTION(trunc, 1, 0, experimental_constrained_trunc, FTRUNC)
+
+// This is definition for fmuladd intrinsic function, that is converted into
+// constrained FMA or FMUL + FADD intrinsics.
+FUNCTION(fmuladd, 3, 1, experimental_constrained_fmuladd)
#undef INSTRUCTION
#undef FUNCTION
#undef CMP_INSTRUCTION
+#undef DAG_INSTRUCTION
+#undef DAG_FUNCTION
diff --git a/llvm/include/llvm/IR/DIBuilder.h b/llvm/include/llvm/IR/DIBuilder.h
index f7c242554f6a..d1c7d126b5a9 100644
--- a/llvm/include/llvm/IR/DIBuilder.h
+++ b/llvm/include/llvm/IR/DIBuilder.h
@@ -135,6 +135,9 @@ namespace llvm {
/// profile collection.
/// \param NameTableKind Whether to emit .debug_gnu_pubnames,
/// .debug_pubnames, or no pubnames at all.
+ /// \param SysRoot The clang system root (value of -isysroot).
+ /// \param SDK The SDK name. On Darwin, this is the last component
+ /// of the sysroot.
DICompileUnit *
createCompileUnit(unsigned Lang, DIFile *File, StringRef Producer,
bool isOptimized, StringRef Flags, unsigned RV,
@@ -145,7 +148,8 @@ namespace llvm {
bool DebugInfoForProfiling = false,
DICompileUnit::DebugNameTableKind NameTableKind =
DICompileUnit::DebugNameTableKind::Default,
- bool RangesBaseAddress = false);
+ bool RangesBaseAddress = false, StringRef SysRoot = {},
+ StringRef SDK = {});
/// Create a file descriptor to hold debugging information for a file.
/// \param Filename File name.
@@ -442,19 +446,22 @@ namespace llvm {
/// \param Scope Scope in which this type is defined.
/// \param Name Type parameter name.
/// \param Ty Parameter type.
- DITemplateTypeParameter *
- createTemplateTypeParameter(DIScope *Scope, StringRef Name, DIType *Ty);
+ /// \param IsDefault Parameter is default or not
+ DITemplateTypeParameter *createTemplateTypeParameter(DIScope *Scope,
+ StringRef Name,
+ DIType *Ty,
+ bool IsDefault);
/// Create debugging information for template
/// value parameter.
/// \param Scope Scope in which this type is defined.
/// \param Name Value parameter name.
/// \param Ty Parameter type.
+ /// \param IsDefault Parameter is default or not
/// \param Val Constant parameter value.
- DITemplateValueParameter *createTemplateValueParameter(DIScope *Scope,
- StringRef Name,
- DIType *Ty,
- Constant *Val);
+ DITemplateValueParameter *
+ createTemplateValueParameter(DIScope *Scope, StringRef Name, DIType *Ty,
+ bool IsDefault, Constant *Val);
/// Create debugging information for a template template parameter.
/// \param Scope Scope in which this type is defined.
@@ -566,6 +573,8 @@ namespace llvm {
/// implicitly uniques the values returned.
DISubrange *getOrCreateSubrange(int64_t Lo, int64_t Count);
DISubrange *getOrCreateSubrange(int64_t Lo, Metadata *CountNode);
+ DISubrange *getOrCreateSubrange(Metadata *Count, Metadata *LowerBound,
+ Metadata *UpperBound, Metadata *Stride);
/// Create a new descriptor for the specified variable.
/// \param Context Variable scope.
@@ -734,11 +743,15 @@ namespace llvm {
/// A space-separated shell-quoted list of -D macro
/// definitions as they would appear on a command line.
/// \param IncludePath The path to the module map file.
- /// \param SysRoot The clang system root (value of -isysroot).
+ /// \param APINotesFile The path to an API notes file for this module.
+ /// \param File Source file of the module declaration. Used for
+ /// Fortran modules.
+ /// \param LineNo Source line number of the module declaration.
+ /// Used for Fortran modules.
DIModule *createModule(DIScope *Scope, StringRef Name,
- StringRef ConfigurationMacros,
- StringRef IncludePath,
- StringRef SysRoot);
+ StringRef ConfigurationMacros, StringRef IncludePath,
+ StringRef APINotesFile = {}, DIFile *File = nullptr,
+ unsigned LineNo = 0);
/// This creates a descriptor for a lexical block with a new file
/// attached. This merely extends the existing
diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h
index 85093dd218f8..17297bb8b309 100644
--- a/llvm/include/llvm/IR/DataLayout.h
+++ b/llvm/include/llvm/IR/DataLayout.h
@@ -133,7 +133,8 @@ private:
MM_MachO,
MM_WinCOFF,
MM_WinCOFFX86,
- MM_Mips
+ MM_Mips,
+ MM_XCOFF
};
ManglingModeT ManglingMode;
@@ -262,7 +263,7 @@ public:
/// Returns true if the given alignment exceeds the natural stack alignment.
bool exceedsNaturalStackAlignment(Align Alignment) const {
- return StackNaturalAlign && (Alignment > StackNaturalAlign);
+ return StackNaturalAlign && (Alignment > *StackNaturalAlign);
}
Align getStackAlignment() const {
@@ -309,6 +310,7 @@ public:
case MM_ELF:
case MM_Mips:
case MM_WinCOFF:
+ case MM_XCOFF:
return '\0';
case MM_MachO:
case MM_WinCOFFX86:
@@ -329,6 +331,8 @@ public:
case MM_MachO:
case MM_WinCOFFX86:
return "L";
+ case MM_XCOFF:
+ return "L..";
}
llvm_unreachable("invalid mangling mode");
}
@@ -501,13 +505,17 @@ public:
}
/// Returns the minimum ABI-required alignment for the specified type.
+ /// FIXME: Deprecate this function once migration to Align is over.
unsigned getABITypeAlignment(Type *Ty) const;
+ /// Returns the minimum ABI-required alignment for the specified type.
+ Align getABITypeAlign(Type *Ty) const;
+
/// Helper function to return `Alignment` if it's set or the result of
/// `getABITypeAlignment(Ty)`, in any case the result is a valid alignment.
inline Align getValueOrABITypeAlignment(MaybeAlign Alignment,
Type *Ty) const {
- return Alignment ? *Alignment : Align(getABITypeAlignment(Ty));
+ return Alignment ? *Alignment : getABITypeAlign(Ty);
}
/// Returns the minimum ABI-required alignment for an integer type of
@@ -518,8 +526,15 @@ public:
/// type.
///
/// This is always at least as good as the ABI alignment.
+ /// FIXME: Deprecate this function once migration to Align is over.
unsigned getPrefTypeAlignment(Type *Ty) const;
+ /// Returns the preferred stack/global alignment for the specified
+ /// type.
+ ///
+ /// This is always at least as good as the ABI alignment.
+ Align getPrefTypeAlign(Type *Ty) const;
+
/// Returns an integer type with size at least as big as that of a
/// pointer in the given address space.
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
@@ -563,13 +578,26 @@ public:
/// Returns the preferred alignment of the specified global.
///
/// This includes an explicitly requested alignment (if the global has one).
- unsigned getPreferredAlignment(const GlobalVariable *GV) const;
+ Align getPreferredAlign(const GlobalVariable *GV) const;
+
+ /// Returns the preferred alignment of the specified global.
+ ///
+ /// This includes an explicitly requested alignment (if the global has one).
+ LLVM_ATTRIBUTE_DEPRECATED(
+ inline unsigned getPreferredAlignment(const GlobalVariable *GV) const,
+ "Use getPreferredAlign instead") {
+ return getPreferredAlign(GV).value();
+ }
/// Returns the preferred alignment of the specified global, returned
/// in log form.
///
/// This includes an explicitly requested alignment (if the global has one).
- unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const;
+ LLVM_ATTRIBUTE_DEPRECATED(
+ inline unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const,
+ "Inline where needed") {
+ return Log2(getPreferredAlign(GV));
+ }
};
inline DataLayout *unwrap(LLVMTargetDataRef P) {
@@ -640,6 +668,7 @@ inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
case Type::IntegerTyID:
return TypeSize::Fixed(Ty->getIntegerBitWidth());
case Type::HalfTyID:
+ case Type::BFloatTyID:
return TypeSize::Fixed(16);
case Type::FloatTyID:
return TypeSize::Fixed(32);
@@ -653,7 +682,8 @@ inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
// only 80 bits contain information.
case Type::X86_FP80TyID:
return TypeSize::Fixed(80);
- case Type::VectorTyID: {
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID: {
VectorType *VTy = cast<VectorType>(Ty);
auto EltCnt = VTy->getElementCount();
uint64_t MinBits = EltCnt.Min *
diff --git a/llvm/include/llvm/IR/DebugInfo.h b/llvm/include/llvm/IR/DebugInfo.h
index 171e1621889f..e7c1d9a90677 100644
--- a/llvm/include/llvm/IR/DebugInfo.h
+++ b/llvm/include/llvm/IR/DebugInfo.h
@@ -16,6 +16,7 @@
#ifndef LLVM_IR_DEBUGINFO_H
#define LLVM_IR_DEBUGINFO_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
@@ -23,8 +24,8 @@
namespace llvm {
-class DbgDeclareInst;
-class DbgValueInst;
+class DbgVariableIntrinsic;
+class Instruction;
class Module;
/// Find subprogram that is enclosing this scope.
@@ -50,6 +51,13 @@ bool stripDebugInfo(Function &F);
/// All debug type metadata nodes are unreachable and garbage collected.
bool stripNonLineTableDebugInfo(Module &M);
+/// Update the debug locations contained within the MD_loop metadata attached
+/// to the instruction \p I, if one exists. \p Updater is applied to each debug
+/// location in the MD_loop metadata: the returned value is included in the
+/// updated loop metadata node if it is non-null.
+void updateLoopMetadataDebugLocations(
+ Instruction &I, function_ref<DILocation *(const DILocation &)> Updater);
+
/// Return Debug Info Metadata Version by checking module flags.
unsigned getDebugMetadataVersionFromModule(const Module &M);
@@ -68,10 +76,8 @@ public:
/// Process a single instruction and collect debug info anchors.
void processInstruction(const Module &M, const Instruction &I);
- /// Process DbgDeclareInst.
- void processDeclare(const Module &M, const DbgDeclareInst *DDI);
- /// Process DbgValueInst.
- void processValue(const Module &M, const DbgValueInst *DVI);
+ /// Process DbgVariableIntrinsic.
+ void processVariable(const Module &M, const DbgVariableIntrinsic &DVI);
/// Process debug info location.
void processLocation(const Module &M, const DILocation *Loc);
diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h
index d6bfe504dd94..7d7cc4de7937 100644
--- a/llvm/include/llvm/IR/DebugInfoMetadata.h
+++ b/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -287,12 +287,8 @@ class DISubrange : public DINode {
friend class LLVMContextImpl;
friend class MDNode;
- int64_t LowerBound;
-
- DISubrange(LLVMContext &C, StorageType Storage, Metadata *Node,
- int64_t LowerBound, ArrayRef<Metadata *> Ops)
- : DINode(C, DISubrangeKind, Storage, dwarf::DW_TAG_subrange_type, Ops),
- LowerBound(LowerBound) {}
+ DISubrange(LLVMContext &C, StorageType Storage, ArrayRef<Metadata *> Ops)
+ : DINode(C, DISubrangeKind, Storage, dwarf::DW_TAG_subrange_type, Ops) {}
~DISubrange() = default;
@@ -304,8 +300,14 @@ class DISubrange : public DINode {
int64_t LowerBound, StorageType Storage,
bool ShouldCreate = true);
+ static DISubrange *getImpl(LLVMContext &Context, Metadata *CountNode,
+ Metadata *LowerBound, Metadata *UpperBound,
+ Metadata *Stride, StorageType Storage,
+ bool ShouldCreate = true);
+
TempDISubrange cloneImpl() const {
- return getTemporary(getContext(), getRawCountNode(), getLowerBound());
+ return getTemporary(getContext(), getRawCountNode(), getRawLowerBound(),
+ getRawUpperBound(), getRawStride());
}
public:
@@ -315,25 +317,33 @@ public:
DEFINE_MDNODE_GET(DISubrange, (Metadata *CountNode, int64_t LowerBound = 0),
(CountNode, LowerBound))
- TempDISubrange clone() const { return cloneImpl(); }
+ DEFINE_MDNODE_GET(DISubrange,
+ (Metadata * CountNode, Metadata *LowerBound,
+ Metadata *UpperBound, Metadata *Stride),
+ (CountNode, LowerBound, UpperBound, Stride))
- int64_t getLowerBound() const { return LowerBound; }
+ TempDISubrange clone() const { return cloneImpl(); }
Metadata *getRawCountNode() const {
return getOperand(0).get();
}
+ Metadata *getRawLowerBound() const { return getOperand(1).get(); }
+
+ Metadata *getRawUpperBound() const { return getOperand(2).get(); }
+
+ Metadata *getRawStride() const { return getOperand(3).get(); }
+
typedef PointerUnion<ConstantInt*, DIVariable*> CountType;
+ typedef PointerUnion<ConstantInt *, DIVariable *, DIExpression *> BoundType;
- CountType getCount() const {
- if (auto *MD = dyn_cast<ConstantAsMetadata>(getRawCountNode()))
- return CountType(cast<ConstantInt>(MD->getValue()));
+ CountType getCount() const;
- if (auto *DV = dyn_cast<DIVariable>(getRawCountNode()))
- return CountType(DV);
+ BoundType getLowerBound() const;
- return CountType();
- }
+ BoundType getUpperBound() const;
+
+ BoundType getStride() const;
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == DISubrangeKind;
@@ -348,22 +358,26 @@ class DIEnumerator : public DINode {
friend class LLVMContextImpl;
friend class MDNode;
- int64_t Value;
- DIEnumerator(LLVMContext &C, StorageType Storage, int64_t Value,
+ APInt Value;
+ DIEnumerator(LLVMContext &C, StorageType Storage, const APInt &Value,
bool IsUnsigned, ArrayRef<Metadata *> Ops)
: DINode(C, DIEnumeratorKind, Storage, dwarf::DW_TAG_enumerator, Ops),
Value(Value) {
SubclassData32 = IsUnsigned;
}
+ DIEnumerator(LLVMContext &C, StorageType Storage, int64_t Value,
+ bool IsUnsigned, ArrayRef<Metadata *> Ops)
+ : DIEnumerator(C, Storage, APInt(64, Value, !IsUnsigned), IsUnsigned,
+ Ops) {}
~DIEnumerator() = default;
- static DIEnumerator *getImpl(LLVMContext &Context, int64_t Value,
+ static DIEnumerator *getImpl(LLVMContext &Context, const APInt &Value,
bool IsUnsigned, StringRef Name,
StorageType Storage, bool ShouldCreate = true) {
return getImpl(Context, Value, IsUnsigned,
getCanonicalMDString(Context, Name), Storage, ShouldCreate);
}
- static DIEnumerator *getImpl(LLVMContext &Context, int64_t Value,
+ static DIEnumerator *getImpl(LLVMContext &Context, const APInt &Value,
bool IsUnsigned, MDString *Name,
StorageType Storage, bool ShouldCreate = true);
@@ -372,14 +386,22 @@ class DIEnumerator : public DINode {
}
public:
- DEFINE_MDNODE_GET(DIEnumerator, (int64_t Value, bool IsUnsigned, StringRef Name),
+ DEFINE_MDNODE_GET(DIEnumerator,
+ (int64_t Value, bool IsUnsigned, StringRef Name),
+ (APInt(64, Value, !IsUnsigned), IsUnsigned, Name))
+ DEFINE_MDNODE_GET(DIEnumerator,
+ (int64_t Value, bool IsUnsigned, MDString *Name),
+ (APInt(64, Value, !IsUnsigned), IsUnsigned, Name))
+ DEFINE_MDNODE_GET(DIEnumerator,
+ (APInt Value, bool IsUnsigned, StringRef Name),
(Value, IsUnsigned, Name))
- DEFINE_MDNODE_GET(DIEnumerator, (int64_t Value, bool IsUnsigned, MDString *Name),
+ DEFINE_MDNODE_GET(DIEnumerator,
+ (APInt Value, bool IsUnsigned, MDString *Name),
(Value, IsUnsigned, Name))
TempDIEnumerator clone() const { return cloneImpl(); }
- int64_t getValue() const { return Value; }
+ const APInt &getValue() const { return Value; }
bool isUnsigned() const { return SubclassData32; }
StringRef getName() const { return getStringOperand(0); }
@@ -465,7 +487,8 @@ public:
// encoding is reserved.
CSK_MD5 = 1,
CSK_SHA1 = 2,
- CSK_Last = CSK_SHA1 // Should be last enumeration.
+ CSK_SHA256 = 3,
+ CSK_Last = CSK_SHA256 // Should be last enumeration.
};
/// A single checksum, represented by a \a Kind and a \a Value (a string).
@@ -918,13 +941,14 @@ class DICompositeType : public DIType {
uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
DINodeArray Elements, unsigned RuntimeLang, DIType *VTableHolder,
DITemplateParameterArray TemplateParams, StringRef Identifier,
- DIDerivedType *Discriminator, StorageType Storage,
- bool ShouldCreate = true) {
- return getImpl(
- Context, Tag, getCanonicalMDString(Context, Name), File, Line, Scope,
- BaseType, SizeInBits, AlignInBits, OffsetInBits, Flags, Elements.get(),
- RuntimeLang, VTableHolder, TemplateParams.get(),
- getCanonicalMDString(Context, Identifier), Discriminator, Storage, ShouldCreate);
+ DIDerivedType *Discriminator, Metadata *DataLocation,
+ StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, Tag, getCanonicalMDString(Context, Name), File,
+ Line, Scope, BaseType, SizeInBits, AlignInBits, OffsetInBits,
+ Flags, Elements.get(), RuntimeLang, VTableHolder,
+ TemplateParams.get(),
+ getCanonicalMDString(Context, Identifier), Discriminator,
+ DataLocation, Storage, ShouldCreate);
}
static DICompositeType *
getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
@@ -932,7 +956,7 @@ class DICompositeType : public DIType {
uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
DIFlags Flags, Metadata *Elements, unsigned RuntimeLang,
Metadata *VTableHolder, Metadata *TemplateParams,
- MDString *Identifier, Metadata *Discriminator,
+ MDString *Identifier, Metadata *Discriminator, Metadata *DataLocation,
StorageType Storage, bool ShouldCreate = true);
TempDICompositeType cloneImpl() const {
@@ -940,34 +964,34 @@ class DICompositeType : public DIType {
getScope(), getBaseType(), getSizeInBits(),
getAlignInBits(), getOffsetInBits(), getFlags(),
getElements(), getRuntimeLang(), getVTableHolder(),
- getTemplateParams(), getIdentifier(), getDiscriminator());
+ getTemplateParams(), getIdentifier(),
+ getDiscriminator(), getRawDataLocation());
}
public:
- DEFINE_MDNODE_GET(DICompositeType,
- (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
- DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
- uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
- DINodeArray Elements, unsigned RuntimeLang,
- DIType *VTableHolder,
- DITemplateParameterArray TemplateParams = nullptr,
- StringRef Identifier = "",
- DIDerivedType *Discriminator = nullptr),
- (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
- AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
- VTableHolder, TemplateParams, Identifier, Discriminator))
- DEFINE_MDNODE_GET(DICompositeType,
- (unsigned Tag, MDString *Name, Metadata *File,
- unsigned Line, Metadata *Scope, Metadata *BaseType,
- uint64_t SizeInBits, uint32_t AlignInBits,
- uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
- unsigned RuntimeLang, Metadata *VTableHolder,
- Metadata *TemplateParams = nullptr,
- MDString *Identifier = nullptr,
- Metadata *Discriminator = nullptr),
- (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
- AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
- VTableHolder, TemplateParams, Identifier, Discriminator))
+ DEFINE_MDNODE_GET(
+ DICompositeType,
+ (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
+ DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
+ DINodeArray Elements, unsigned RuntimeLang, DIType *VTableHolder,
+ DITemplateParameterArray TemplateParams = nullptr,
+ StringRef Identifier = "", DIDerivedType *Discriminator = nullptr,
+ Metadata *DataLocation = nullptr),
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits,
+ OffsetInBits, Flags, Elements, RuntimeLang, VTableHolder, TemplateParams,
+ Identifier, Discriminator, DataLocation))
+ DEFINE_MDNODE_GET(
+ DICompositeType,
+ (unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
+ Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
+ Metadata *Elements, unsigned RuntimeLang, Metadata *VTableHolder,
+ Metadata *TemplateParams = nullptr, MDString *Identifier = nullptr,
+ Metadata *Discriminator = nullptr, Metadata *DataLocation = nullptr),
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits,
+ OffsetInBits, Flags, Elements, RuntimeLang, VTableHolder, TemplateParams,
+ Identifier, Discriminator, DataLocation))
TempDICompositeType clone() const { return cloneImpl(); }
@@ -984,7 +1008,8 @@ public:
Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
unsigned RuntimeLang, Metadata *VTableHolder,
- Metadata *TemplateParams, Metadata *Discriminator);
+ Metadata *TemplateParams, Metadata *Discriminator,
+ Metadata *DataLocation);
static DICompositeType *getODRTypeIfExists(LLVMContext &Context,
MDString &Identifier);
@@ -1003,7 +1028,8 @@ public:
Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
unsigned RuntimeLang, Metadata *VTableHolder,
- Metadata *TemplateParams, Metadata *Discriminator);
+ Metadata *TemplateParams, Metadata *Discriminator,
+ Metadata *DataLocation);
DIType *getBaseType() const { return cast_or_null<DIType>(getRawBaseType()); }
DINodeArray getElements() const {
@@ -1025,6 +1051,13 @@ public:
MDString *getRawIdentifier() const { return getOperandAs<MDString>(7); }
Metadata *getRawDiscriminator() const { return getOperand(8); }
DIDerivedType *getDiscriminator() const { return getOperandAs<DIDerivedType>(8); }
+ Metadata *getRawDataLocation() const { return getOperand(9); }
+ DIVariable *getDataLocation() const {
+ return dyn_cast_or_null<DIVariable>(getRawDataLocation());
+ }
+ DIExpression *getDataLocationExp() const {
+ return dyn_cast_or_null<DIExpression>(getRawDataLocation());
+ }
/// Replace operands.
///
@@ -1172,16 +1205,17 @@ private:
DIGlobalVariableExpressionArray GlobalVariables,
DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
- unsigned NameTableKind, bool RangesBaseAddress, StorageType Storage,
- bool ShouldCreate = true) {
- return getImpl(Context, SourceLanguage, File,
- getCanonicalMDString(Context, Producer), IsOptimized,
- getCanonicalMDString(Context, Flags), RuntimeVersion,
- getCanonicalMDString(Context, SplitDebugFilename),
- EmissionKind, EnumTypes.get(), RetainedTypes.get(),
- GlobalVariables.get(), ImportedEntities.get(), Macros.get(),
- DWOId, SplitDebugInlining, DebugInfoForProfiling,
- NameTableKind, RangesBaseAddress, Storage, ShouldCreate);
+ unsigned NameTableKind, bool RangesBaseAddress, StringRef SysRoot,
+ StringRef SDK, StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(
+ Context, SourceLanguage, File, getCanonicalMDString(Context, Producer),
+ IsOptimized, getCanonicalMDString(Context, Flags), RuntimeVersion,
+ getCanonicalMDString(Context, SplitDebugFilename), EmissionKind,
+ EnumTypes.get(), RetainedTypes.get(), GlobalVariables.get(),
+ ImportedEntities.get(), Macros.get(), DWOId, SplitDebugInlining,
+ DebugInfoForProfiling, NameTableKind, RangesBaseAddress,
+ getCanonicalMDString(Context, SysRoot),
+ getCanonicalMDString(Context, SDK), Storage, ShouldCreate);
}
static DICompileUnit *
getImpl(LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
@@ -1191,7 +1225,8 @@ private:
Metadata *GlobalVariables, Metadata *ImportedEntities,
Metadata *Macros, uint64_t DWOId, bool SplitDebugInlining,
bool DebugInfoForProfiling, unsigned NameTableKind,
- bool RangesBaseAddress, StorageType Storage, bool ShouldCreate = true);
+ bool RangesBaseAddress, MDString *SysRoot, MDString *SDK,
+ StorageType Storage, bool ShouldCreate = true);
TempDICompileUnit cloneImpl() const {
return getTemporary(
@@ -1200,7 +1235,7 @@ private:
getEmissionKind(), getEnumTypes(), getRetainedTypes(),
getGlobalVariables(), getImportedEntities(), getMacros(), DWOId,
getSplitDebugInlining(), getDebugInfoForProfiling(), getNameTableKind(),
- getRangesBaseAddress());
+ getRangesBaseAddress(), getSysRoot(), getSDK());
}
public:
@@ -1216,11 +1251,13 @@ public:
DIGlobalVariableExpressionArray GlobalVariables,
DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
- DebugNameTableKind NameTableKind, bool RangesBaseAddress),
+ DebugNameTableKind NameTableKind, bool RangesBaseAddress,
+ StringRef SysRoot, StringRef SDK),
(SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
- DebugInfoForProfiling, (unsigned)NameTableKind, RangesBaseAddress))
+ DebugInfoForProfiling, (unsigned)NameTableKind, RangesBaseAddress,
+ SysRoot, SDK))
DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
DICompileUnit,
(unsigned SourceLanguage, Metadata *File, MDString *Producer,
@@ -1229,11 +1266,12 @@ public:
Metadata *RetainedTypes, Metadata *GlobalVariables,
Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId,
bool SplitDebugInlining, bool DebugInfoForProfiling,
- unsigned NameTableKind, bool RangesBaseAddress),
+ unsigned NameTableKind, bool RangesBaseAddress, MDString *SysRoot,
+ MDString *SDK),
(SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
- DebugInfoForProfiling, NameTableKind, RangesBaseAddress))
+ DebugInfoForProfiling, NameTableKind, RangesBaseAddress, SysRoot, SDK))
TempDICompileUnit clone() const { return cloneImpl(); }
@@ -1250,14 +1288,10 @@ public:
DebugNameTableKind getNameTableKind() const {
return (DebugNameTableKind)NameTableKind;
}
- bool getRangesBaseAddress() const {
- return RangesBaseAddress; }
- StringRef getProducer() const {
- return getStringOperand(1); }
- StringRef getFlags() const {
- return getStringOperand(2); }
- StringRef getSplitDebugFilename() const {
- return getStringOperand(3); }
+ bool getRangesBaseAddress() const { return RangesBaseAddress; }
+ StringRef getProducer() const { return getStringOperand(1); }
+ StringRef getFlags() const { return getStringOperand(2); }
+ StringRef getSplitDebugFilename() const { return getStringOperand(3); }
DICompositeTypeArray getEnumTypes() const {
return cast_or_null<MDTuple>(getRawEnumTypes());
}
@@ -1279,6 +1313,8 @@ public:
void setSplitDebugInlining(bool SplitDebugInlining) {
this->SplitDebugInlining = SplitDebugInlining;
}
+ StringRef getSysRoot() const { return getStringOperand(9); }
+ StringRef getSDK() const { return getStringOperand(10); }
MDString *getRawProducer() const { return getOperandAs<MDString>(1); }
MDString *getRawFlags() const { return getOperandAs<MDString>(2); }
@@ -1290,6 +1326,8 @@ public:
Metadata *getRawGlobalVariables() const { return getOperand(6); }
Metadata *getRawImportedEntities() const { return getOperand(7); }
Metadata *getRawMacros() const { return getOperand(8); }
+ MDString *getRawSysRoot() const { return getOperandAs<MDString>(9); }
+ MDString *getRawSDK() const { return getOperandAs<MDString>(10); }
/// Replace arrays.
///
@@ -1540,6 +1578,13 @@ public:
static const DILocation *getMergedLocation(const DILocation *LocA,
const DILocation *LocB);
+ /// Try to combine the vector of locations passed as input in a single one.
+ /// This function applies getMergedLocation() repeatedly left-to-right.
+ ///
+ /// \p Locs: The locations to be merged.
+ static
+ const DILocation *getMergedLocations(ArrayRef<const DILocation *> Locs);
+
/// Returns the base discriminator for a given encoded discriminator \p D.
static unsigned getBaseDiscriminatorFromDiscriminator(unsigned D) {
return getUnsignedFromPrefixEncoding(D);
@@ -2072,60 +2117,72 @@ public:
}
};
-/// A (clang) module that has been imported by the compile unit.
-///
+/// Represents a module in the programming language, for example, a Clang
+/// module, or a Fortran module.
class DIModule : public DIScope {
friend class LLVMContextImpl;
friend class MDNode;
+ unsigned LineNo;
- DIModule(LLVMContext &Context, StorageType Storage, ArrayRef<Metadata *> Ops)
- : DIScope(Context, DIModuleKind, Storage, dwarf::DW_TAG_module, Ops) {}
+ DIModule(LLVMContext &Context, StorageType Storage, unsigned LineNo,
+ ArrayRef<Metadata *> Ops)
+ : DIScope(Context, DIModuleKind, Storage, dwarf::DW_TAG_module, Ops),
+ LineNo(LineNo) {}
~DIModule() = default;
- static DIModule *getImpl(LLVMContext &Context, DIScope *Scope,
+ static DIModule *getImpl(LLVMContext &Context, DIFile *File, DIScope *Scope,
StringRef Name, StringRef ConfigurationMacros,
- StringRef IncludePath, StringRef SysRoot,
- StorageType Storage, bool ShouldCreate = true) {
- return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+ StringRef IncludePath, StringRef APINotesFile,
+ unsigned LineNo, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, File, Scope, getCanonicalMDString(Context, Name),
getCanonicalMDString(Context, ConfigurationMacros),
getCanonicalMDString(Context, IncludePath),
- getCanonicalMDString(Context, SysRoot),
- Storage, ShouldCreate);
+ getCanonicalMDString(Context, APINotesFile), LineNo, Storage,
+ ShouldCreate);
}
- static DIModule *getImpl(LLVMContext &Context, Metadata *Scope,
- MDString *Name, MDString *ConfigurationMacros,
- MDString *IncludePath, MDString *SysRoot,
+ static DIModule *getImpl(LLVMContext &Context, Metadata *File,
+ Metadata *Scope, MDString *Name,
+ MDString *ConfigurationMacros, MDString *IncludePath,
+ MDString *APINotesFile, unsigned LineNo,
StorageType Storage, bool ShouldCreate = true);
TempDIModule cloneImpl() const {
- return getTemporary(getContext(), getScope(), getName(),
+ return getTemporary(getContext(), getFile(), getScope(), getName(),
getConfigurationMacros(), getIncludePath(),
- getSysRoot());
+ getAPINotesFile(), getLineNo());
}
public:
- DEFINE_MDNODE_GET(DIModule, (DIScope *Scope, StringRef Name,
- StringRef ConfigurationMacros, StringRef IncludePath,
- StringRef SysRoot),
- (Scope, Name, ConfigurationMacros, IncludePath, SysRoot))
DEFINE_MDNODE_GET(DIModule,
- (Metadata *Scope, MDString *Name, MDString *ConfigurationMacros,
- MDString *IncludePath, MDString *SysRoot),
- (Scope, Name, ConfigurationMacros, IncludePath, SysRoot))
+ (DIFile * File, DIScope *Scope, StringRef Name,
+ StringRef ConfigurationMacros, StringRef IncludePath,
+ StringRef APINotesFile, unsigned LineNo),
+ (File, Scope, Name, ConfigurationMacros, IncludePath,
+ APINotesFile, LineNo))
+ DEFINE_MDNODE_GET(DIModule,
+ (Metadata * File, Metadata *Scope, MDString *Name,
+ MDString *ConfigurationMacros, MDString *IncludePath,
+ MDString *APINotesFile, unsigned LineNo),
+ (File, Scope, Name, ConfigurationMacros, IncludePath,
+ APINotesFile, LineNo))
TempDIModule clone() const { return cloneImpl(); }
DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
- StringRef getName() const { return getStringOperand(1); }
- StringRef getConfigurationMacros() const { return getStringOperand(2); }
- StringRef getIncludePath() const { return getStringOperand(3); }
- StringRef getSysRoot() const { return getStringOperand(4); }
+ StringRef getName() const { return getStringOperand(2); }
+ StringRef getConfigurationMacros() const { return getStringOperand(3); }
+ StringRef getIncludePath() const { return getStringOperand(4); }
+ StringRef getAPINotesFile() const { return getStringOperand(5); }
+ unsigned getLineNo() const { return LineNo; }
- Metadata *getRawScope() const { return getOperand(0); }
- MDString *getRawName() const { return getOperandAs<MDString>(1); }
- MDString *getRawConfigurationMacros() const { return getOperandAs<MDString>(2); }
- MDString *getRawIncludePath() const { return getOperandAs<MDString>(3); }
- MDString *getRawSysRoot() const { return getOperandAs<MDString>(4); }
+ Metadata *getRawScope() const { return getOperand(1); }
+ MDString *getRawName() const { return getOperandAs<MDString>(2); }
+ MDString *getRawConfigurationMacros() const {
+ return getOperandAs<MDString>(3);
+ }
+ MDString *getRawIncludePath() const { return getOperandAs<MDString>(4); }
+ MDString *getRawAPINotesFile() const { return getOperandAs<MDString>(5); }
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == DIModuleKind;
@@ -2135,9 +2192,11 @@ public:
/// Base class for template parameters.
class DITemplateParameter : public DINode {
protected:
+ bool IsDefault;
+
DITemplateParameter(LLVMContext &Context, unsigned ID, StorageType Storage,
- unsigned Tag, ArrayRef<Metadata *> Ops)
- : DINode(Context, ID, Storage, Tag, Ops) {}
+ unsigned Tag, bool IsDefault, ArrayRef<Metadata *> Ops)
+ : DINode(Context, ID, Storage, Tag, Ops), IsDefault(IsDefault) {}
~DITemplateParameter() = default;
public:
@@ -2146,6 +2205,7 @@ public:
MDString *getRawName() const { return getOperandAs<MDString>(0); }
Metadata *getRawType() const { return getOperand(1); }
+ bool isDefault() const { return IsDefault; }
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == DITemplateTypeParameterKind ||
@@ -2158,30 +2218,35 @@ class DITemplateTypeParameter : public DITemplateParameter {
friend class MDNode;
DITemplateTypeParameter(LLVMContext &Context, StorageType Storage,
- ArrayRef<Metadata *> Ops)
+ bool IsDefault, ArrayRef<Metadata *> Ops)
: DITemplateParameter(Context, DITemplateTypeParameterKind, Storage,
- dwarf::DW_TAG_template_type_parameter, Ops) {}
+ dwarf::DW_TAG_template_type_parameter, IsDefault,
+ Ops) {}
~DITemplateTypeParameter() = default;
static DITemplateTypeParameter *getImpl(LLVMContext &Context, StringRef Name,
- DIType *Type, StorageType Storage,
+ DIType *Type, bool IsDefault,
+ StorageType Storage,
bool ShouldCreate = true) {
- return getImpl(Context, getCanonicalMDString(Context, Name), Type, Storage,
- ShouldCreate);
+ return getImpl(Context, getCanonicalMDString(Context, Name), Type,
+ IsDefault, Storage, ShouldCreate);
}
static DITemplateTypeParameter *getImpl(LLVMContext &Context, MDString *Name,
- Metadata *Type, StorageType Storage,
+ Metadata *Type, bool IsDefault,
+ StorageType Storage,
bool ShouldCreate = true);
TempDITemplateTypeParameter cloneImpl() const {
- return getTemporary(getContext(), getName(), getType());
+ return getTemporary(getContext(), getName(), getType(), isDefault());
}
public:
- DEFINE_MDNODE_GET(DITemplateTypeParameter, (StringRef Name, DIType *Type),
- (Name, Type))
- DEFINE_MDNODE_GET(DITemplateTypeParameter, (MDString * Name, Metadata *Type),
- (Name, Type))
+ DEFINE_MDNODE_GET(DITemplateTypeParameter,
+ (StringRef Name, DIType *Type, bool IsDefault),
+ (Name, Type, IsDefault))
+ DEFINE_MDNODE_GET(DITemplateTypeParameter,
+ (MDString *Name, Metadata *Type, bool IsDefault),
+ (Name, Type, IsDefault))
TempDITemplateTypeParameter clone() const { return cloneImpl(); }
@@ -2195,36 +2260,40 @@ class DITemplateValueParameter : public DITemplateParameter {
friend class MDNode;
DITemplateValueParameter(LLVMContext &Context, StorageType Storage,
- unsigned Tag, ArrayRef<Metadata *> Ops)
+ unsigned Tag, bool IsDefault,
+ ArrayRef<Metadata *> Ops)
: DITemplateParameter(Context, DITemplateValueParameterKind, Storage, Tag,
- Ops) {}
+ IsDefault, Ops) {}
~DITemplateValueParameter() = default;
static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
StringRef Name, DIType *Type,
- Metadata *Value, StorageType Storage,
+ bool IsDefault, Metadata *Value,
+ StorageType Storage,
bool ShouldCreate = true) {
return getImpl(Context, Tag, getCanonicalMDString(Context, Name), Type,
- Value, Storage, ShouldCreate);
+ IsDefault, Value, Storage, ShouldCreate);
}
static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
MDString *Name, Metadata *Type,
- Metadata *Value, StorageType Storage,
+ bool IsDefault, Metadata *Value,
+ StorageType Storage,
bool ShouldCreate = true);
TempDITemplateValueParameter cloneImpl() const {
return getTemporary(getContext(), getTag(), getName(), getType(),
- getValue());
+ isDefault(), getValue());
}
public:
DEFINE_MDNODE_GET(DITemplateValueParameter,
- (unsigned Tag, StringRef Name, DIType *Type,
+ (unsigned Tag, StringRef Name, DIType *Type, bool IsDefault,
Metadata *Value),
- (Tag, Name, Type, Value))
- DEFINE_MDNODE_GET(DITemplateValueParameter, (unsigned Tag, MDString *Name,
- Metadata *Type, Metadata *Value),
- (Tag, Name, Type, Value))
+ (Tag, Name, Type, IsDefault, Value))
+ DEFINE_MDNODE_GET(DITemplateValueParameter,
+ (unsigned Tag, MDString *Name, Metadata *Type,
+ bool IsDefault, Metadata *Value),
+ (Tag, Name, Type, IsDefault, Value))
TempDITemplateValueParameter clone() const { return cloneImpl(); }
diff --git a/llvm/include/llvm/IR/DebugLoc.h b/llvm/include/llvm/IR/DebugLoc.h
index 780d17a33661..4914d733fe0d 100644
--- a/llvm/include/llvm/IR/DebugLoc.h
+++ b/llvm/include/llvm/IR/DebugLoc.h
@@ -85,7 +85,7 @@ namespace llvm {
/// the chain now is inlined-at the new call site.
/// \param InlinedAt The new outermost inlined-at in the chain.
/// \param ReplaceLast Replace the last location in the inlined-at chain.
- static DebugLoc appendInlinedAt(DebugLoc DL, DILocation *InlinedAt,
+ static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt,
LLVMContext &Ctx,
DenseMap<const MDNode *, MDNode *> &Cache,
bool ReplaceLast = false);
diff --git a/llvm/include/llvm/IR/DerivedTypes.h b/llvm/include/llvm/IR/DerivedTypes.h
index 20097ef3f31a..3618447168be 100644
--- a/llvm/include/llvm/IR/DerivedTypes.h
+++ b/llvm/include/llvm/IR/DerivedTypes.h
@@ -195,26 +195,6 @@ private:
Value *Callee = nullptr;
};
-/// Common super class of ArrayType, StructType and VectorType.
-class CompositeType : public Type {
-protected:
- explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) {}
-
-public:
- /// Given an index value into the type, return the type of the element.
- Type *getTypeAtIndex(const Value *V) const;
- Type *getTypeAtIndex(unsigned Idx) const;
- bool indexValid(const Value *V) const;
- bool indexValid(unsigned Idx) const;
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast.
- static bool classof(const Type *T) {
- return T->getTypeID() == ArrayTyID ||
- T->getTypeID() == StructTyID ||
- T->getTypeID() == VectorTyID;
- }
-};
-
/// Class to represent struct types. There are two different kinds of struct
/// types: Literal structs and Identified structs.
///
@@ -235,8 +215,8 @@ public:
/// elements as defined by DataLayout (which is required to match what the code
/// generator for a target expects).
///
-class StructType : public CompositeType {
- StructType(LLVMContext &C) : CompositeType(C, StructTyID) {}
+class StructType : public Type {
+ StructType(LLVMContext &C) : Type(C, StructTyID) {}
enum {
/// This is the contents of the SubClassData field.
@@ -267,8 +247,7 @@ public:
StringRef Name, bool isPacked = false);
static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements);
template <class... Tys>
- static typename std::enable_if<are_base_of<Type, Tys...>::value,
- StructType *>::type
+ static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
create(StringRef Name, Type *elt1, Tys *... elts) {
assert(elt1 && "Cannot create a struct type with no elements with this");
SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
@@ -286,8 +265,7 @@ public:
/// specifying the elements as arguments. Note that this method always returns
/// a non-packed struct, and requires at least one element type.
template <class... Tys>
- static typename std::enable_if<are_base_of<Type, Tys...>::value,
- StructType *>::type
+ static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
get(Type *elt1, Tys *... elts) {
assert(elt1 && "Cannot create a struct type with no elements with this");
LLVMContext &Ctx = elt1->getContext();
@@ -324,7 +302,7 @@ public:
void setBody(ArrayRef<Type*> Elements, bool isPacked = false);
template <typename... Tys>
- typename std::enable_if<are_base_of<Type, Tys...>::value, void>::type
+ std::enable_if_t<are_base_of<Type, Tys...>::value, void>
setBody(Type *elt1, Tys *... elts) {
assert(elt1 && "Cannot create a struct type with no elements with this");
SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
@@ -352,6 +330,11 @@ public:
assert(N < NumContainedTys && "Element number out of range!");
return ContainedTys[N];
}
+ /// Given an index value into the type, return the type of the element.
+ Type *getTypeAtIndex(const Value *V) const;
+ Type *getTypeAtIndex(unsigned N) const { return getElementType(N); }
+ bool indexValid(const Value *V) const;
+ bool indexValid(unsigned Idx) const { return Idx < getNumElements(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
@@ -371,47 +354,22 @@ Type *Type::getStructElementType(unsigned N) const {
return cast<StructType>(this)->getElementType(N);
}
-/// This is the superclass of the array and vector type classes. Both of these
-/// represent "arrays" in memory. The array type represents a specifically sized
-/// array, and the vector type represents a specifically sized array that allows
-/// for use of SIMD instructions. SequentialType holds the common features of
-/// both, which stem from the fact that both lay their components out in memory
-/// identically.
-class SequentialType : public CompositeType {
- Type *ContainedType; ///< Storage for the single contained type.
+/// Class to represent array types.
+class ArrayType : public Type {
+ /// The element type of the array.
+ Type *ContainedType;
+ /// Number of elements in the array.
uint64_t NumElements;
-protected:
- SequentialType(TypeID TID, Type *ElType, uint64_t NumElements)
- : CompositeType(ElType->getContext(), TID), ContainedType(ElType),
- NumElements(NumElements) {
- ContainedTys = &ContainedType;
- NumContainedTys = 1;
- }
-
-public:
- SequentialType(const SequentialType &) = delete;
- SequentialType &operator=(const SequentialType &) = delete;
-
- /// For scalable vectors, this will return the minimum number of elements
- /// in the vector.
- uint64_t getNumElements() const { return NumElements; }
- Type *getElementType() const { return ContainedType; }
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast.
- static bool classof(const Type *T) {
- return T->getTypeID() == ArrayTyID || T->getTypeID() == VectorTyID;
- }
-};
-
-/// Class to represent array types.
-class ArrayType : public SequentialType {
ArrayType(Type *ElType, uint64_t NumEl);
public:
ArrayType(const ArrayType &) = delete;
ArrayType &operator=(const ArrayType &) = delete;
+ uint64_t getNumElements() const { return NumElements; }
+ Type *getElementType() const { return ContainedType; }
+
/// This static method is the primary way to construct an ArrayType
static ArrayType *get(Type *ElementType, uint64_t NumElements);
@@ -428,8 +386,8 @@ uint64_t Type::getArrayNumElements() const {
return cast<ArrayType>(this)->getNumElements();
}
-/// Class to represent vector types.
-class VectorType : public SequentialType {
+/// Base class of all SIMD vector types
+class VectorType : public Type {
/// A fully specified VectorType is of the form <vscale x n x Ty>. 'n' is the
/// minimum number of elements of type Ty contained within the vector, and
/// 'vscale x' indicates that the total element count is an integer multiple
@@ -443,25 +401,69 @@ class VectorType : public SequentialType {
/// <vscale x 4 x i32> - a vector containing an unknown integer multiple
/// of 4 i32s
- VectorType(Type *ElType, unsigned NumEl, bool Scalable = false);
- VectorType(Type *ElType, ElementCount EC);
+ /// The element type of the vector.
+ Type *ContainedType;
- // If true, the total number of elements is an unknown multiple of the
- // minimum 'NumElements' from SequentialType. Otherwise the total number
- // of elements is exactly equal to 'NumElements'.
- bool Scalable;
+protected:
+ /// The element quantity of this vector. The meaning of this value depends
+ /// on the type of vector:
+ /// - For FixedVectorType = <ElementQuantity x ty>, there are
+ /// exactly ElementQuantity elements in this vector.
+ /// - For ScalableVectorType = <vscale x ElementQuantity x ty>,
+ /// there are vscale * ElementQuantity elements in this vector, where
+ /// vscale is a runtime-constant integer greater than 0.
+ const unsigned ElementQuantity;
+
+ VectorType(Type *ElType, unsigned EQ, Type::TypeID TID);
public:
VectorType(const VectorType &) = delete;
VectorType &operator=(const VectorType &) = delete;
+ /// Get the number of elements in this vector. It does not make sense to call
+ /// this function on a scalable vector, and this will be moved into
+ /// FixedVectorType in a future commit
+ unsigned getNumElements() const {
+ ElementCount EC = getElementCount();
+#ifdef STRICT_FIXED_SIZE_VECTORS
+ assert(!EC.Scalable &&
+ "Request for fixed number of elements from scalable vector");
+ return EC.Min;
+#else
+ if (EC.Scalable)
+ WithColor::warning()
+ << "The code that requested the fixed number of elements has made "
+ "the assumption that this vector is not scalable. This assumption "
+ "was not correct, and this may lead to broken code\n";
+ return EC.Min;
+#endif
+ }
+
+ Type *getElementType() const { return ContainedType; }
+
/// This static method is the primary way to construct an VectorType.
static VectorType *get(Type *ElementType, ElementCount EC);
+
+ /// Base class getter that specifically constructs a FixedVectorType. This
+ /// function is deprecated, and will be removed after LLVM 11 ships. Since
+ /// this always returns a FixedVectorType via a base VectorType pointer,
+ /// FixedVectorType::get(Type *, unsigned) is strictly better since no cast is
+ /// required to call getNumElements() on the result.
+ LLVM_ATTRIBUTE_DEPRECATED(
+ inline static VectorType *get(Type *ElementType, unsigned NumElements),
+ "The base class version of get with the scalable argument defaulted to "
+ "false is deprecated. Either call VectorType::get(Type *, unsigned, "
+ "bool) and pass false, or call FixedVectorType::get(Type *, unsigned).");
+
static VectorType *get(Type *ElementType, unsigned NumElements,
- bool Scalable = false) {
+ bool Scalable) {
return VectorType::get(ElementType, {NumElements, Scalable});
}
+ static VectorType *get(Type *ElementType, const VectorType *Other) {
+ return VectorType::get(ElementType, Other->getElementCount());
+ }
+
/// This static method gets a VectorType with the same number of elements as
/// the input type, and the element type is an integer type of the same width
/// as the input element type.
@@ -529,9 +531,8 @@ public:
/// input type and the same element type.
static VectorType *getDoubleElementsVectorType(VectorType *VTy) {
auto EltCnt = VTy->getElementCount();
- assert((VTy->getNumElements() * 2ull) <= UINT_MAX &&
- "Too many elements in vector");
- return VectorType::get(VTy->getElementType(), EltCnt*2);
+ assert((EltCnt.Min * 2ull) <= UINT_MAX && "Too many elements in vector");
+ return VectorType::get(VTy->getElementType(), EltCnt * 2);
}
/// Return true if the specified type is valid as a element type.
@@ -539,40 +540,122 @@ public:
/// Return an ElementCount instance to represent the (possibly scalable)
/// number of elements in the vector.
- ElementCount getElementCount() const {
- uint64_t MinimumEltCnt = getNumElements();
- assert(MinimumEltCnt <= UINT_MAX && "Too many elements in vector");
- return { (unsigned)MinimumEltCnt, Scalable };
+ inline ElementCount getElementCount() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Type *T) {
+ return T->getTypeID() == FixedVectorTyID ||
+ T->getTypeID() == ScalableVectorTyID;
}
+};
- /// Returns whether or not this is a scalable vector (meaning the total
- /// element count is a multiple of the minimum).
- bool isScalable() const {
- return Scalable;
+inline VectorType *VectorType::get(Type *ElementType, unsigned NumElements) {
+ return VectorType::get(ElementType, NumElements, false);
+}
+
+/// Class to represent fixed width SIMD vectors
+class FixedVectorType : public VectorType {
+protected:
+ FixedVectorType(Type *ElTy, unsigned NumElts)
+ : VectorType(ElTy, NumElts, FixedVectorTyID) {}
+
+public:
+ static FixedVectorType *get(Type *ElementType, unsigned NumElts);
+
+ static FixedVectorType *get(Type *ElementType, const FixedVectorType *FVTy) {
+ return get(ElementType, FVTy->getNumElements());
}
- /// Return the minimum number of bits in the Vector type.
- /// Returns zero when the vector is a vector of pointers.
- unsigned getBitWidth() const {
- return getNumElements() * getElementType()->getPrimitiveSizeInBits();
+ static FixedVectorType *getInteger(FixedVectorType *VTy) {
+ return cast<FixedVectorType>(VectorType::getInteger(VTy));
+ }
+
+ static FixedVectorType *getExtendedElementVectorType(FixedVectorType *VTy) {
+ return cast<FixedVectorType>(VectorType::getExtendedElementVectorType(VTy));
+ }
+
+ static FixedVectorType *getTruncatedElementVectorType(FixedVectorType *VTy) {
+ return cast<FixedVectorType>(
+ VectorType::getTruncatedElementVectorType(VTy));
+ }
+
+ static FixedVectorType *getSubdividedVectorType(FixedVectorType *VTy,
+ int NumSubdivs) {
+ return cast<FixedVectorType>(
+ VectorType::getSubdividedVectorType(VTy, NumSubdivs));
+ }
+
+ static FixedVectorType *getHalfElementsVectorType(FixedVectorType *VTy) {
+ return cast<FixedVectorType>(VectorType::getHalfElementsVectorType(VTy));
+ }
+
+ static FixedVectorType *getDoubleElementsVectorType(FixedVectorType *VTy) {
+ return cast<FixedVectorType>(VectorType::getDoubleElementsVectorType(VTy));
}
- /// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
- return T->getTypeID() == VectorTyID;
+ return T->getTypeID() == FixedVectorTyID;
}
};
-unsigned Type::getVectorNumElements() const {
- return cast<VectorType>(this)->getNumElements();
-}
+/// Class to represent scalable SIMD vectors
+class ScalableVectorType : public VectorType {
+protected:
+ ScalableVectorType(Type *ElTy, unsigned MinNumElts)
+ : VectorType(ElTy, MinNumElts, ScalableVectorTyID) {}
-bool Type::getVectorIsScalable() const {
- return cast<VectorType>(this)->isScalable();
-}
+public:
+ static ScalableVectorType *get(Type *ElementType, unsigned MinNumElts);
+
+ static ScalableVectorType *get(Type *ElementType,
+ const ScalableVectorType *SVTy) {
+ return get(ElementType, SVTy->getMinNumElements());
+ }
+
+ static ScalableVectorType *getInteger(ScalableVectorType *VTy) {
+ return cast<ScalableVectorType>(VectorType::getInteger(VTy));
+ }
+
+ static ScalableVectorType *
+ getExtendedElementVectorType(ScalableVectorType *VTy) {
+ return cast<ScalableVectorType>(
+ VectorType::getExtendedElementVectorType(VTy));
+ }
+
+ static ScalableVectorType *
+ getTruncatedElementVectorType(ScalableVectorType *VTy) {
+ return cast<ScalableVectorType>(
+ VectorType::getTruncatedElementVectorType(VTy));
+ }
+
+ static ScalableVectorType *getSubdividedVectorType(ScalableVectorType *VTy,
+ int NumSubdivs) {
+ return cast<ScalableVectorType>(
+ VectorType::getSubdividedVectorType(VTy, NumSubdivs));
+ }
+
+ static ScalableVectorType *
+ getHalfElementsVectorType(ScalableVectorType *VTy) {
+ return cast<ScalableVectorType>(VectorType::getHalfElementsVectorType(VTy));
+ }
+
+ static ScalableVectorType *
+ getDoubleElementsVectorType(ScalableVectorType *VTy) {
+ return cast<ScalableVectorType>(
+ VectorType::getDoubleElementsVectorType(VTy));
+ }
+
+ /// Get the minimum number of elements in this vector. The actual number of
+ /// elements in the vector is an integer multiple of this value.
+ uint64_t getMinNumElements() const { return ElementQuantity; }
-ElementCount Type::getVectorElementCount() const {
- return cast<VectorType>(this)->getElementCount();
+ static bool classof(const Type *T) {
+ return T->getTypeID() == ScalableVectorTyID;
+ }
+};
+
+inline ElementCount VectorType::getElementCount() const {
+ return ElementCount(ElementQuantity, isa<ScalableVectorType>(this));
}
/// Class to represent pointers.
@@ -627,8 +710,8 @@ Type *Type::getWithNewBitWidth(unsigned NewBitWidth) const {
isIntOrIntVectorTy() &&
"Original type expected to be a vector of integers or a scalar integer.");
Type *NewType = getIntNTy(getContext(), NewBitWidth);
- if (isVectorTy())
- NewType = VectorType::get(NewType, getVectorElementCount());
+ if (auto *VTy = dyn_cast<VectorType>(this))
+ NewType = VectorType::get(NewType, VTy->getElementCount());
return NewType;
}
diff --git a/llvm/include/llvm/IR/DiagnosticInfo.h b/llvm/include/llvm/IR/DiagnosticInfo.h
index ec469982d378..b7e0ecde8629 100644
--- a/llvm/include/llvm/IR/DiagnosticInfo.h
+++ b/llvm/include/llvm/IR/DiagnosticInfo.h
@@ -55,6 +55,7 @@ enum DiagnosticKind {
DK_ResourceLimit,
DK_StackSize,
DK_Linker,
+ DK_Lowering,
DK_DebugMetadataVersion,
DK_DebugMetadataInvalid,
DK_ISelFallback,
@@ -212,7 +213,7 @@ public:
};
class DiagnosticInfoStackSize : public DiagnosticInfoResourceLimit {
- virtual void anchor() override;
+ void anchor() override;
public:
DiagnosticInfoStackSize(const Function &Fn, uint64_t StackSize,
DiagnosticSeverity Severity = DS_Warning,
@@ -363,7 +364,7 @@ public:
/// Common features for diagnostics with an associated location.
class DiagnosticInfoWithLocationBase : public DiagnosticInfo {
- virtual void anchor() override;
+ void anchor() override;
public:
/// \p Fn is the function where the diagnostic is being emitted. \p Loc is
/// the location information to use in the diagnostic.
@@ -531,9 +532,10 @@ protected:
template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
- typename std::enable_if<
+ std::enable_if_t<
std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
- StringRef>::type S) {
+ StringRef>
+ S) {
R.insert(S);
return R;
}
@@ -543,9 +545,10 @@ operator<<(RemarkT &R,
template <class RemarkT>
RemarkT &
operator<<(RemarkT &&R,
- typename std::enable_if<
+ std::enable_if_t<
std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
- StringRef>::type S) {
+ StringRef>
+ S) {
R.insert(S);
return R;
}
@@ -553,9 +556,10 @@ operator<<(RemarkT &&R,
template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
- typename std::enable_if<
+ std::enable_if_t<
std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
- DiagnosticInfoOptimizationBase::Argument>::type A) {
+ DiagnosticInfoOptimizationBase::Argument>
+ A) {
R.insert(A);
return R;
}
@@ -563,9 +567,10 @@ operator<<(RemarkT &R,
template <class RemarkT>
RemarkT &
operator<<(RemarkT &&R,
- typename std::enable_if<
+ std::enable_if_t<
std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
- DiagnosticInfoOptimizationBase::Argument>::type A) {
+ DiagnosticInfoOptimizationBase::Argument>
+ A) {
R.insert(A);
return R;
}
@@ -573,9 +578,10 @@ operator<<(RemarkT &&R,
template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
- typename std::enable_if<
+ std::enable_if_t<
std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
- DiagnosticInfoOptimizationBase::setIsVerbose>::type V) {
+ DiagnosticInfoOptimizationBase::setIsVerbose>
+ V) {
R.insert(V);
return R;
}
@@ -583,9 +589,10 @@ operator<<(RemarkT &R,
template <class RemarkT>
RemarkT &
operator<<(RemarkT &&R,
- typename std::enable_if<
+ std::enable_if_t<
std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
- DiagnosticInfoOptimizationBase::setIsVerbose>::type V) {
+ DiagnosticInfoOptimizationBase::setIsVerbose>
+ V) {
R.insert(V);
return R;
}
@@ -593,9 +600,10 @@ operator<<(RemarkT &&R,
template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
- typename std::enable_if<
+ std::enable_if_t<
std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
- DiagnosticInfoOptimizationBase::setExtraArgs>::type EA) {
+ DiagnosticInfoOptimizationBase::setExtraArgs>
+ EA) {
R.insert(EA);
return R;
}
@@ -603,7 +611,7 @@ operator<<(RemarkT &R,
/// Common features for diagnostics dealing with optimization remarks
/// that are used by IR passes.
class DiagnosticInfoIROptimization : public DiagnosticInfoOptimizationBase {
- virtual void anchor() override;
+ void anchor() override;
public:
/// \p PassName is the name of the pass emitting this diagnostic. \p
/// RemarkName is a textual identifier for the remark (single-word,
@@ -824,7 +832,7 @@ private:
/// Diagnostic information for optimization analysis remarks related to
/// floating-point non-commutativity.
class OptimizationRemarkAnalysisFPCommute : public OptimizationRemarkAnalysis {
- virtual void anchor();
+ void anchor() override;
public:
/// \p PassName is the name of the pass emitting this diagnostic. If this name
/// matches the regular expression given in -Rpass-analysis=, then the
@@ -866,7 +874,7 @@ private:
/// Diagnostic information for optimization analysis remarks related to
/// pointer aliasing.
class OptimizationRemarkAnalysisAliasing : public OptimizationRemarkAnalysis {
- virtual void anchor();
+ void anchor() override;
public:
/// \p PassName is the name of the pass emitting this diagnostic. If this name
/// matches the regular expression given in -Rpass-analysis=, then the
diff --git a/llvm/include/llvm/IR/Dominators.h b/llvm/include/llvm/IR/Dominators.h
index 6a14785a6cc3..71595cb15df4 100644
--- a/llvm/include/llvm/IR/Dominators.h
+++ b/llvm/include/llvm/IR/Dominators.h
@@ -172,6 +172,8 @@ class DominatorTree : public DominatorTreeBase<BasicBlock, false> {
/// never dominate the use.
bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
+ /// Returns true if edge \p BBE1 dominates edge \p BBE2.
+ bool dominates(const BasicBlockEdge &BBE1, const BasicBlockEdge &BBE2) const;
// Ensure base class overloads are visible.
using Base::isReachableFromEntry;
@@ -206,7 +208,8 @@ template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
template <>
struct GraphTraits<DomTreeNode *>
- : public DomTreeGraphTraitsBase<DomTreeNode, DomTreeNode::iterator> {};
+ : public DomTreeGraphTraitsBase<DomTreeNode, DomTreeNode::const_iterator> {
+};
template <>
struct GraphTraits<const DomTreeNode *>
@@ -275,7 +278,7 @@ public:
AU.setPreservesAll();
}
- void releaseMemory() override { DT.releaseMemory(); }
+ void releaseMemory() override { DT.reset(); }
void print(raw_ostream &OS, const Module *M = nullptr) const override;
};
diff --git a/llvm/include/llvm/IR/FPEnv.h b/llvm/include/llvm/IR/FPEnv.h
index a1e0665d4112..f00cb735932f 100644
--- a/llvm/include/llvm/IR/FPEnv.h
+++ b/llvm/include/llvm/IR/FPEnv.h
@@ -15,27 +15,14 @@
#ifndef LLVM_IR_FLOATINGPOINT_H
#define LLVM_IR_FLOATINGPOINT_H
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/StringRef.h"
-#include <stdint.h>
namespace llvm {
+class StringRef;
namespace fp {
-/// Rounding mode used for floating point operations.
-///
-/// Each of these values correspond to some metadata argument value of a
-/// constrained floating point intrinsic. See the LLVM Language Reference Manual
-/// for details.
-enum RoundingMode : uint8_t {
- rmDynamic, ///< This corresponds to "fpround.dynamic".
- rmToNearest, ///< This corresponds to "fpround.tonearest".
- rmDownward, ///< This corresponds to "fpround.downward".
- rmUpward, ///< This corresponds to "fpround.upward".
- rmTowardZero ///< This corresponds to "fpround.tozero".
-};
-
/// Exception behavior used for floating point operations.
///
/// Each of these values correspond to some metadata argument value of a
@@ -52,11 +39,11 @@ enum ExceptionBehavior : uint8_t {
/// Returns a valid RoundingMode enumerator when given a string
/// that is valid as input in constrained intrinsic rounding mode
/// metadata.
-Optional<fp::RoundingMode> StrToRoundingMode(StringRef);
+Optional<RoundingMode> StrToRoundingMode(StringRef);
/// For any RoundingMode enumerator, returns a string valid as input in
/// constrained intrinsic rounding mode metadata.
-Optional<StringRef> RoundingModeToStr(fp::RoundingMode);
+Optional<StringRef> RoundingModeToStr(RoundingMode);
/// Returns a valid ExceptionBehavior enumerator when given a string
/// valid as input in constrained intrinsic exception behavior metadata.
@@ -65,6 +52,5 @@ Optional<fp::ExceptionBehavior> StrToExceptionBehavior(StringRef);
/// For any ExceptionBehavior enumerator, returns a string valid as
/// input in constrained intrinsic exception behavior metadata.
Optional<StringRef> ExceptionBehaviorToStr(fp::ExceptionBehavior);
-
}
#endif
diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h
index d9cbcc63fa62..bb4ec13c7610 100644
--- a/llvm/include/llvm/IR/Function.h
+++ b/llvm/include/llvm/IR/Function.h
@@ -55,6 +55,8 @@ template <typename T> class Optional;
class raw_ostream;
class Type;
class User;
+class BranchProbabilityInfo;
+class BlockFrequencyInfo;
class Function : public GlobalObject, public ilist_node<Function> {
public:
@@ -197,6 +199,11 @@ public:
/// returns Intrinsic::not_intrinsic!
bool isIntrinsic() const { return HasLLVMReservedName; }
+ /// Returns true if the function is one of the "Constrained Floating-Point
+ /// Intrinsics". Returns false if not, and returns false when
+ /// getIntrinsicID() returns Intrinsic::not_intrinsic.
+ bool isConstrainedFPIntrinsic() const;
+
static Intrinsic::ID lookupIntrinsicID(StringRef Name);
/// Recalculate the ID for this function if it is an Intrinsic defined
@@ -349,6 +356,13 @@ public:
return 0;
}
+ /// Return the stack alignment for the function.
+ MaybeAlign getFnStackAlign() const {
+ if (!hasFnAttribute(Attribute::StackAlignment))
+ return None;
+ return AttributeSets.getStackAlignment(AttributeList::FunctionIndex);
+ }
+
/// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm
/// to use during code generation.
bool hasGC() const {
@@ -780,6 +794,10 @@ public:
///
void viewCFG() const;
+ /// Extended form to print edge weights.
+ void viewCFG(bool ViewCFGOnly, const BlockFrequencyInfo *BFI,
+ const BranchProbabilityInfo *BPI) const;
+
/// viewCFGOnly - This function is meant for use from the debugger. It works
/// just like viewCFG, but it does not include the contents of basic blocks
/// into the nodes, just the label. If you are only interested in the CFG
@@ -787,6 +805,10 @@ public:
///
void viewCFGOnly() const;
+ /// Extended form to print edge weights.
+ void viewCFGOnly(const BlockFrequencyInfo *BFI,
+ const BranchProbabilityInfo *BPI) const;
+
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal;
@@ -808,9 +830,11 @@ public:
/// hasAddressTaken - returns true if there are any uses of this function
/// other than direct calls or invokes to it, or blockaddress expressions.
- /// Optionally passes back an offending user for diagnostic purposes.
+ /// Optionally passes back an offending user for diagnostic purposes and
+ /// ignores callback uses.
///
- bool hasAddressTaken(const User** = nullptr) const;
+ bool hasAddressTaken(const User ** = nullptr,
+ bool IgnoreCallbackUses = false) const;
/// isDefTriviallyDead - Return true if it is trivially safe to remove
/// this function definition from the module (because it isn't externally
diff --git a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
index 9b257abc7c1f..79ea5791b2fd 100644
--- a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
+++ b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
@@ -75,9 +75,15 @@ namespace llvm {
generic_gep_type_iterator& operator++() { // Preincrement
Type *Ty = getIndexedType();
- if (auto *STy = dyn_cast<SequentialType>(Ty)) {
- CurTy = STy->getElementType();
- NumElements = STy->getNumElements();
+ if (auto *ATy = dyn_cast<ArrayType>(Ty)) {
+ CurTy = ATy->getElementType();
+ NumElements = ATy->getNumElements();
+ } else if (auto *VTy = dyn_cast<VectorType>(Ty)) {
+ CurTy = VTy->getElementType();
+ if (isa<ScalableVectorType>(VTy))
+ NumElements = Unbounded;
+ else
+ NumElements = VTy->getNumElements();
} else
CurTy = dyn_cast<StructType>(Ty);
++OpIt;
diff --git a/llvm/include/llvm/IR/GlobalObject.h b/llvm/include/llvm/IR/GlobalObject.h
index ce81eb9f0719..3a7b718845cb 100644
--- a/llvm/include/llvm/IR/GlobalObject.h
+++ b/llvm/include/llvm/IR/GlobalObject.h
@@ -70,16 +70,22 @@ private:
public:
GlobalObject(const GlobalObject &) = delete;
+ /// FIXME: Remove this function once transition to Align is over.
unsigned getAlignment() const {
+ MaybeAlign Align = getAlign();
+ return Align ? Align->value() : 0;
+ }
+
+ /// Returns the alignment of the given variable or function.
+ ///
+ /// Note that for functions this is the alignment of the code, not the
+ /// alignment of a function pointer.
+ MaybeAlign getAlign() const {
unsigned Data = getGlobalValueSubClassData();
unsigned AlignmentData = Data & AlignmentMask;
- MaybeAlign Align = decodeMaybeAlign(AlignmentData);
- return Align ? Align->value() : 0;
+ return decodeMaybeAlign(AlignmentData);
}
- /// FIXME: Remove this setter once the migration to MaybeAlign is over.
- LLVM_ATTRIBUTE_DEPRECATED(void setAlignment(unsigned Align),
- "Please use `void setAlignment(MaybeAlign Align)`");
void setAlignment(MaybeAlign Align);
unsigned getGlobalObjectSubClassData() const {
@@ -178,9 +184,16 @@ public:
void copyMetadata(const GlobalObject *Src, unsigned Offset);
void addTypeMetadata(unsigned Offset, Metadata *TypeID);
- void addVCallVisibilityMetadata(VCallVisibility Visibility);
+ void setVCallVisibilityMetadata(VCallVisibility Visibility);
VCallVisibility getVCallVisibility() const;
+ /// Returns true if the alignment of the value can be unilaterally
+ /// increased.
+ ///
+ /// Note that for functions this is the alignment of the code, not the
+ /// alignment of a function pointer.
+ bool canIncreaseAlignment() const;
+
protected:
void copyAttributesFrom(const GlobalObject *Src);
diff --git a/llvm/include/llvm/IR/GlobalValue.h b/llvm/include/llvm/IR/GlobalValue.h
index 0171356914d6..cf704d1f2374 100644
--- a/llvm/include/llvm/IR/GlobalValue.h
+++ b/llvm/include/llvm/IR/GlobalValue.h
@@ -146,12 +146,6 @@ private:
llvm_unreachable("Fully covered switch above!");
}
- void maybeSetDsoLocal() {
- if (hasLocalLinkage() ||
- (!hasDefaultVisibility() && !hasExternalWeakLinkage()))
- setDSOLocal(true);
- }
-
protected:
/// The intrinsic ID for this subclass (which must be a Function).
///
@@ -191,7 +185,6 @@ public:
GlobalValue(const GlobalValue &) = delete;
- unsigned getAlignment() const;
unsigned getAddressSpace() const;
enum class UnnamedAddr {
@@ -243,7 +236,8 @@ public:
assert((!hasLocalLinkage() || V == DefaultVisibility) &&
"local linkage requires default visibility");
Visibility = V;
- maybeSetDsoLocal();
+ if (isImplicitDSOLocal())
+ setDSOLocal(true);
}
/// If the value is "Thread Local", its value isn't shared by the threads.
@@ -278,6 +272,11 @@ public:
Type *getValueType() const { return ValueType; }
+ bool isImplicitDSOLocal() const {
+ return hasLocalLinkage() ||
+ (!hasDefaultVisibility() && !hasExternalWeakLinkage());
+ }
+
void setDSOLocal(bool Local) { IsDSOLocal = Local; }
bool isDSOLocal() const {
@@ -423,10 +422,11 @@ public:
}
/// Return true if this global's definition can be substituted with an
- /// *arbitrary* definition at link time. We cannot do any IPO or inlinining
- /// across interposable call edges, since the callee can be replaced with
- /// something arbitrary at link time.
- bool isInterposable() const { return isInterposableLinkage(getLinkage()); }
+ /// *arbitrary* definition at link time or load time. We cannot do any IPO or
+ /// inlining across interposable call edges, since the callee can be
+ /// replaced with something arbitrary.
+ bool isInterposable() const;
+ bool canBenefitFromLocalAlias() const;
bool hasExternalLinkage() const { return isExternalLinkage(getLinkage()); }
bool hasAvailableExternallyLinkage() const {
@@ -455,7 +455,8 @@ public:
if (isLocalLinkage(LT))
Visibility = DefaultVisibility;
Linkage = LT;
- maybeSetDsoLocal();
+ if (isImplicitDSOLocal())
+ setDSOLocal(true);
}
LinkageTypes getLinkage() const { return LinkageTypes(Linkage); }
@@ -547,10 +548,6 @@ public:
return !(isDeclarationForLinker() || isWeakForLinker());
}
- // Returns true if the alignment of the value can be unilaterally
- // increased.
- bool canIncreaseAlignment() const;
-
const GlobalObject *getBaseObject() const;
GlobalObject *getBaseObject() {
return const_cast<GlobalObject *>(
diff --git a/llvm/include/llvm/IR/GlobalVariable.h b/llvm/include/llvm/IR/GlobalVariable.h
index 2c730bc312e4..12093e337d6e 100644
--- a/llvm/include/llvm/IR/GlobalVariable.h
+++ b/llvm/include/llvm/IR/GlobalVariable.h
@@ -19,7 +19,6 @@
#ifndef LLVM_IR_GLOBALVARIABLE_H
#define LLVM_IR_GLOBALVARIABLE_H
-#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Attributes.h"
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index a6252b298001..4552ca016bd7 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -59,9 +59,12 @@ class Use;
///
/// By default, this inserts the instruction at the insertion point.
class IRBuilderDefaultInserter {
-protected:
- void InsertHelper(Instruction *I, const Twine &Name,
- BasicBlock *BB, BasicBlock::iterator InsertPt) const {
+public:
+ virtual ~IRBuilderDefaultInserter();
+
+ virtual void InsertHelper(Instruction *I, const Twine &Name,
+ BasicBlock *BB,
+ BasicBlock::iterator InsertPt) const {
if (BB) BB->getInstList().insert(InsertPt, I);
I->setName(Name);
}
@@ -69,16 +72,18 @@ protected:
/// Provides an 'InsertHelper' that calls a user-provided callback after
/// performing the default insertion.
-class IRBuilderCallbackInserter : IRBuilderDefaultInserter {
+class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
std::function<void(Instruction *)> Callback;
public:
+ virtual ~IRBuilderCallbackInserter();
+
IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
: Callback(std::move(Callback)) {}
-protected:
void InsertHelper(Instruction *I, const Twine &Name,
- BasicBlock *BB, BasicBlock::iterator InsertPt) const {
+ BasicBlock *BB,
+ BasicBlock::iterator InsertPt) const override {
IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
Callback(I);
}
@@ -92,26 +97,50 @@ protected:
BasicBlock *BB;
BasicBlock::iterator InsertPt;
LLVMContext &Context;
+ const IRBuilderFolder &Folder;
+ const IRBuilderDefaultInserter &Inserter;
MDNode *DefaultFPMathTag;
FastMathFlags FMF;
bool IsFPConstrained;
fp::ExceptionBehavior DefaultConstrainedExcept;
- fp::RoundingMode DefaultConstrainedRounding;
+ RoundingMode DefaultConstrainedRounding;
ArrayRef<OperandBundleDef> DefaultOperandBundles;
public:
- IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : Context(context), DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
+ IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
+ const IRBuilderDefaultInserter &Inserter,
+ MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
+ : Context(context), Folder(Folder), Inserter(Inserter),
+ DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
DefaultConstrainedExcept(fp::ebStrict),
- DefaultConstrainedRounding(fp::rmDynamic),
+ DefaultConstrainedRounding(RoundingMode::Dynamic),
DefaultOperandBundles(OpBundles) {
ClearInsertionPoint();
}
+ /// Insert and return the specified instruction.
+ template<typename InstTy>
+ InstTy *Insert(InstTy *I, const Twine &Name = "") const {
+ Inserter.InsertHelper(I, Name, BB, InsertPt);
+ SetInstDebugLocation(I);
+ return I;
+ }
+
+ /// No-op overload to handle constants.
+ Constant *Insert(Constant *C, const Twine& = "") const {
+ return C;
+ }
+
+ Value *Insert(Value *V, const Twine &Name = "") const {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ return Insert(I, Name);
+ assert(isa<Constant>(V));
+ return V;
+ }
+
//===--------------------------------------------------------------------===//
// Builder configuration methods
//===--------------------------------------------------------------------===//
@@ -215,6 +244,8 @@ public:
/// Get the flags to be applied to created floating point ops
FastMathFlags getFastMathFlags() const { return FMF; }
+ FastMathFlags &getFastMathFlags() { return FMF; }
+
/// Clear the fast-math flags.
void clearFastMathFlags() { FMF.clear(); }
@@ -239,7 +270,7 @@ public:
}
/// Set the rounding mode handling to be used with constrained floating point
- void setDefaultConstrainedRounding(fp::RoundingMode NewRounding) {
+ void setDefaultConstrainedRounding(RoundingMode NewRounding) {
DefaultConstrainedRounding = NewRounding;
}
@@ -249,7 +280,7 @@ public:
}
/// Get the rounding mode handling used with constrained floating point
- fp::RoundingMode getDefaultConstrainedRounding() {
+ RoundingMode getDefaultConstrainedRounding() {
return DefaultConstrainedRounding;
}
@@ -267,6 +298,10 @@ public:
I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
}
+ void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
+ DefaultOperandBundles = OpBundles;
+ }
+
//===--------------------------------------------------------------------===//
// RAII helpers.
//===--------------------------------------------------------------------===//
@@ -299,10 +334,16 @@ public:
IRBuilderBase &Builder;
FastMathFlags FMF;
MDNode *FPMathTag;
+ bool IsFPConstrained;
+ fp::ExceptionBehavior DefaultConstrainedExcept;
+ RoundingMode DefaultConstrainedRounding;
public:
FastMathFlagGuard(IRBuilderBase &B)
- : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag) {}
+ : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
+ IsFPConstrained(B.IsFPConstrained),
+ DefaultConstrainedExcept(B.DefaultConstrainedExcept),
+ DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
FastMathFlagGuard(const FastMathFlagGuard &) = delete;
FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
@@ -310,9 +351,31 @@ public:
~FastMathFlagGuard() {
Builder.FMF = FMF;
Builder.DefaultFPMathTag = FPMathTag;
+ Builder.IsFPConstrained = IsFPConstrained;
+ Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
+ Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
}
};
+ // RAII object that stores the current default operand bundles and restores
+ // them when the object is destroyed.
+ class OperandBundlesGuard {
+ IRBuilderBase &Builder;
+ ArrayRef<OperandBundleDef> DefaultOperandBundles;
+
+ public:
+ OperandBundlesGuard(IRBuilderBase &B)
+ : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
+
+ OperandBundlesGuard(const OperandBundlesGuard &) = delete;
+ OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
+
+ ~OperandBundlesGuard() {
+ Builder.DefaultOperandBundles = DefaultOperandBundles;
+ }
+ };
+
+
//===--------------------------------------------------------------------===//
// Miscellaneous creation methods.
//===--------------------------------------------------------------------===//
@@ -414,6 +477,11 @@ public:
return Type::getHalfTy(Context);
}
+ /// Fetch the type representing a 16-bit brain floating point value.
+ Type *getBFloatTy() {
+ return Type::getBFloatTy(Context);
+ }
+
/// Fetch the type representing a 32-bit floating point value.
Type *getFloatTy() {
return Type::getFloatTy(Context);
@@ -468,19 +536,6 @@ public:
/// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
/// specified, it will be added to the instruction. Likewise with alias.scope
/// and noalias tags.
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes Align instead of this one.
- LLVM_ATTRIBUTE_DEPRECATED(
- CallInst *CreateElementUnorderedAtomicMemSet(
- Value *Ptr, Value *Val, uint64_t Size, unsigned Alignment,
- uint32_t ElementSize, MDNode *TBAATag = nullptr,
- MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),
- "Use the version that takes Align instead of this one") {
- return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
- Align(Alignment), ElementSize,
- TBAATag, ScopeTag, NoAliasTag);
- }
-
CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
uint64_t Size, Align Alignment,
uint32_t ElementSize,
@@ -492,19 +547,6 @@ public:
TBAATag, ScopeTag, NoAliasTag);
}
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes Align instead of this one.
- LLVM_ATTRIBUTE_DEPRECATED(
- CallInst *CreateElementUnorderedAtomicMemSet(
- Value *Ptr, Value *Val, Value *Size, unsigned Alignment,
- uint32_t ElementSize, MDNode *TBAATag = nullptr,
- MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),
- "Use the version that takes Align instead of this one") {
- return CreateElementUnorderedAtomicMemSet(Ptr, Val, Size, Align(Alignment),
- ElementSize, TBAATag, ScopeTag,
- NoAliasTag);
- }
-
CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
Value *Size, Align Alignment,
uint32_t ElementSize,
@@ -517,21 +559,6 @@ public:
/// If the pointers aren't i8*, they will be converted. If a TBAA tag is
/// specified, it will be added to the instruction. Likewise with alias.scope
/// and noalias tags.
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- LLVM_ATTRIBUTE_DEPRECATED(
- CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src,
- unsigned SrcAlign, uint64_t Size,
- bool isVolatile = false, MDNode *TBAATag = nullptr,
- MDNode *TBAAStructTag = nullptr,
- MDNode *ScopeTag = nullptr,
- MDNode *NoAliasTag = nullptr),
- "Use the version that takes MaybeAlign instead") {
- return CreateMemCpy(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign),
- getInt64(Size), isVolatile, TBAATag, TBAAStructTag,
- ScopeTag, NoAliasTag);
- }
-
CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
MaybeAlign SrcAlign, uint64_t Size,
bool isVolatile = false, MDNode *TBAATag = nullptr,
@@ -543,16 +570,6 @@ public:
NoAliasTag);
}
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- LLVM_ATTRIBUTE_DEPRECATED(
- CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src,
- unsigned SrcAlign, Value *Size,
- bool isVolatile = false, MDNode *TBAATag = nullptr,
- MDNode *TBAAStructTag = nullptr,
- MDNode *ScopeTag = nullptr,
- MDNode *NoAliasTag = nullptr),
- "Use the version that takes MaybeAlign instead");
CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
MaybeAlign SrcAlign, Value *Size,
bool isVolatile = false, MDNode *TBAATag = nullptr,
@@ -560,6 +577,9 @@ public:
MDNode *ScopeTag = nullptr,
MDNode *NoAliasTag = nullptr);
+ CallInst *CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
+ MaybeAlign SrcAlign, Value *Size);
+
/// Create and insert an element unordered-atomic memcpy between the
/// specified pointers.
///
@@ -569,39 +589,37 @@ public:
/// specified, it will be added to the instruction. Likewise with alias.scope
/// and noalias tags.
CallInst *CreateElementUnorderedAtomicMemCpy(
- Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
- uint64_t Size, uint32_t ElementSize, MDNode *TBAATag = nullptr,
- MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
- MDNode *NoAliasTag = nullptr) {
- return CreateElementUnorderedAtomicMemCpy(
- Dst, DstAlign, Src, SrcAlign, getInt64(Size), ElementSize, TBAATag,
- TBAAStructTag, ScopeTag, NoAliasTag);
- }
-
- CallInst *CreateElementUnorderedAtomicMemCpy(
- Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, Value *Size,
+ Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
uint32_t ElementSize, MDNode *TBAATag = nullptr,
MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
MDNode *NoAliasTag = nullptr);
- /// Create and insert a memmove between the specified
- /// pointers.
- ///
- /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
- /// specified, it will be added to the instruction. Likewise with alias.scope
- /// and noalias tags.
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- LLVM_ATTRIBUTE_DEPRECATED(
- CallInst *CreateMemMove(
- Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
- uint64_t Size, bool isVolatile = false, MDNode *TBAATag = nullptr,
- MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),
- "Use the version that takes MaybeAlign") {
- return CreateMemMove(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign),
- getInt64(Size), isVolatile, TBAATag, ScopeTag,
- NoAliasTag);
+ LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemCpy(
+ Value *Dst, unsigned DstAlign, Value *Src,
+ unsigned SrcAlign, uint64_t Size,
+ uint32_t ElementSize, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr),
+ "Use the version that takes Align instead") {
+ return CreateElementUnorderedAtomicMemCpy(
+ Dst, Align(DstAlign), Src, Align(SrcAlign), getInt64(Size), ElementSize,
+ TBAATag, TBAAStructTag, ScopeTag, NoAliasTag);
+ }
+
+ LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemCpy(
+ Value *Dst, unsigned DstAlign, Value *Src,
+ unsigned SrcAlign, Value *Size,
+ uint32_t ElementSize, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr),
+ "Use the version that takes Align instead") {
+ return CreateElementUnorderedAtomicMemCpy(
+ Dst, Align(DstAlign), Src, Align(SrcAlign), Size, ElementSize, TBAATag,
+ TBAAStructTag, ScopeTag, NoAliasTag);
}
+
CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
MaybeAlign SrcAlign, uint64_t Size,
bool isVolatile = false, MDNode *TBAATag = nullptr,
@@ -610,17 +628,7 @@ public:
return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
isVolatile, TBAATag, ScopeTag, NoAliasTag);
}
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- LLVM_ATTRIBUTE_DEPRECATED(
- CallInst *CreateMemMove(
- Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
- Value *Size, bool isVolatile = false, MDNode *TBAATag = nullptr,
- MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),
- "Use the version that takes MaybeAlign") {
- return CreateMemMove(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign),
- Size, isVolatile, TBAATag, ScopeTag, NoAliasTag);
- }
+
CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
MaybeAlign SrcAlign, Value *Size,
bool isVolatile = false, MDNode *TBAATag = nullptr,
@@ -637,21 +645,37 @@ public:
/// specified, it will be added to the instruction. Likewise with alias.scope
/// and noalias tags.
CallInst *CreateElementUnorderedAtomicMemMove(
- Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
- uint64_t Size, uint32_t ElementSize, MDNode *TBAATag = nullptr,
+ Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
+ uint32_t ElementSize, MDNode *TBAATag = nullptr,
MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
- MDNode *NoAliasTag = nullptr) {
+ MDNode *NoAliasTag = nullptr);
+
+ LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemMove(
+ Value *Dst, unsigned DstAlign, Value *Src,
+ unsigned SrcAlign, uint64_t Size,
+ uint32_t ElementSize, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr),
+ "Use the version that takes Align instead") {
return CreateElementUnorderedAtomicMemMove(
- Dst, DstAlign, Src, SrcAlign, getInt64(Size), ElementSize, TBAATag,
+ Dst, Align(DstAlign), Src, Align(SrcAlign), getInt64(Size), ElementSize,
+ TBAATag, TBAAStructTag, ScopeTag, NoAliasTag);
+ }
+
+ LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemMove(
+ Value *Dst, unsigned DstAlign, Value *Src,
+ unsigned SrcAlign, Value *Size,
+ uint32_t ElementSize, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr),
+ "Use the version that takes Align instead") {
+ return CreateElementUnorderedAtomicMemMove(
+ Dst, Align(DstAlign), Src, Align(SrcAlign), Size, ElementSize, TBAATag,
TBAAStructTag, ScopeTag, NoAliasTag);
}
- CallInst *CreateElementUnorderedAtomicMemMove(
- Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, Value *Size,
- uint32_t ElementSize, MDNode *TBAATag = nullptr,
- MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
- MDNode *NoAliasTag = nullptr);
-
/// Create a vector fadd reduction intrinsic of the source vector.
/// The first parameter is a scalar accumulator value for ordered reductions.
CallInst *CreateFAddReduce(Value *Acc, Value *Src);
@@ -707,33 +731,69 @@ public:
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
/// Create a call to Masked Load intrinsic
- CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
+ LLVM_ATTRIBUTE_DEPRECATED(
+ CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask,
+ Value *PassThru = nullptr,
+ const Twine &Name = ""),
+ "Use the version that takes Align instead") {
+ return CreateMaskedLoad(Ptr, assumeAligned(Alignment), Mask, PassThru,
+ Name);
+ }
+ CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");
/// Create a call to Masked Store intrinsic
- CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align,
+ LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedStore(Value *Val, Value *Ptr,
+ unsigned Alignment,
+ Value *Mask),
+ "Use the version that takes Align instead") {
+ return CreateMaskedStore(Val, Ptr, assumeAligned(Alignment), Mask);
+ }
+
+ CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
Value *Mask);
/// Create a call to Masked Gather intrinsic
- CallInst *CreateMaskedGather(Value *Ptrs, unsigned Align,
- Value *Mask = nullptr,
- Value *PassThru = nullptr,
- const Twine& Name = "");
+ LLVM_ATTRIBUTE_DEPRECATED(
+ CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
+ Value *Mask = nullptr,
+ Value *PassThru = nullptr,
+ const Twine &Name = ""),
+ "Use the version that takes Align instead") {
+ return CreateMaskedGather(Ptrs, Align(Alignment), Mask, PassThru, Name);
+ }
+
+ /// Create a call to Masked Gather intrinsic
+ CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
+ Value *Mask = nullptr, Value *PassThru = nullptr,
+ const Twine &Name = "");
/// Create a call to Masked Scatter intrinsic
- CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Align,
+ LLVM_ATTRIBUTE_DEPRECATED(
+ CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment,
+ Value *Mask = nullptr),
+ "Use the version that takes Align instead") {
+ return CreateMaskedScatter(Val, Ptrs, Align(Alignment), Mask);
+ }
+
+ /// Create a call to Masked Scatter intrinsic
+ CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
Value *Mask = nullptr);
/// Create an assume intrinsic call that allows the optimizer to
/// assume that the provided condition will be true.
- CallInst *CreateAssumption(Value *Cond);
+ ///
+ /// The optional argument \p OpBundles specifies operand bundles that are
+ /// added to the call instruction.
+ CallInst *CreateAssumption(Value *Cond,
+ ArrayRef<OperandBundleDef> OpBundles = llvm::None);
/// Create a call to the experimental.gc.statepoint intrinsic to
/// start a new statepoint sequence.
CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
Value *ActualCallee,
ArrayRef<Value *> CallArgs,
- ArrayRef<Value *> DeoptArgs,
+ Optional<ArrayRef<Value *>> DeoptArgs,
ArrayRef<Value *> GCArgs,
const Twine &Name = "");
@@ -742,8 +802,8 @@ public:
CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
Value *ActualCallee, uint32_t Flags,
ArrayRef<Use> CallArgs,
- ArrayRef<Use> TransitionArgs,
- ArrayRef<Use> DeoptArgs,
+ Optional<ArrayRef<Use>> TransitionArgs,
+ Optional<ArrayRef<Use>> DeoptArgs,
ArrayRef<Value *> GCArgs,
const Twine &Name = "");
@@ -752,7 +812,7 @@ public:
/// .get()'ed to get the Value pointer.
CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
Value *ActualCallee, ArrayRef<Use> CallArgs,
- ArrayRef<Value *> DeoptArgs,
+ Optional<ArrayRef<Value *>> DeoptArgs,
ArrayRef<Value *> GCArgs,
const Twine &Name = "");
@@ -762,7 +822,7 @@ public:
CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
Value *ActualInvokee, BasicBlock *NormalDest,
BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
- ArrayRef<Value *> DeoptArgs,
+ Optional<ArrayRef<Value *>> DeoptArgs,
ArrayRef<Value *> GCArgs, const Twine &Name = "");
/// Create an invoke to the experimental.gc.statepoint intrinsic to
@@ -770,8 +830,8 @@ public:
InvokeInst *CreateGCStatepointInvoke(
uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
- ArrayRef<Use> InvokeArgs, ArrayRef<Use> TransitionArgs,
- ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs,
+ ArrayRef<Use> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
+ Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
const Twine &Name = "");
// Convenience function for the common case when CallArgs are filled in using
@@ -781,7 +841,7 @@ public:
CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
Value *ActualInvokee, BasicBlock *NormalDest,
BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
- ArrayRef<Value *> DeoptArgs,
+ Optional<ArrayRef<Value *>> DeoptArgs,
ArrayRef<Value *> GCArgs, const Twine &Name = "");
/// Create a call to the experimental.gc.result intrinsic to extract
@@ -845,85 +905,6 @@ private:
const Twine &Name = "");
Value *getCastedInt8PtrValue(Value *Ptr);
-};
-
-/// This provides a uniform API for creating instructions and inserting
-/// them into a basic block: either at the end of a BasicBlock, or at a specific
-/// iterator location in a block.
-///
-/// Note that the builder does not expose the full generality of LLVM
-/// instructions. For access to extra instruction properties, use the mutators
-/// (e.g. setVolatile) on the instructions after they have been
-/// created. Convenience state exists to specify fast-math flags and fp-math
-/// tags.
-///
-/// The first template argument specifies a class to use for creating constants.
-/// This defaults to creating minimally folded constants. The second template
-/// argument allows clients to specify custom insertion hooks that are called on
-/// every newly created insertion.
-template <typename T = ConstantFolder,
- typename Inserter = IRBuilderDefaultInserter>
-class IRBuilder : public IRBuilderBase, public Inserter {
- T Folder;
-
-public:
- IRBuilder(LLVMContext &C, const T &F, Inserter I = Inserter(),
- MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : IRBuilderBase(C, FPMathTag, OpBundles), Inserter(std::move(I)),
- Folder(F) {}
-
- explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : IRBuilderBase(C, FPMathTag, OpBundles) {}
-
- explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
- SetInsertPoint(TheBB);
- }
-
- explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles) {
- SetInsertPoint(TheBB);
- }
-
- explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : IRBuilderBase(IP->getContext(), FPMathTag, OpBundles) {
- SetInsertPoint(IP);
- }
-
- IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T &F,
- MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
- SetInsertPoint(TheBB, IP);
- }
-
- IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
- MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = None)
- : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles) {
- SetInsertPoint(TheBB, IP);
- }
-
- /// Get the constant folder being used.
- const T &getFolder() { return Folder; }
-
- /// Insert and return the specified instruction.
- template<typename InstTy>
- InstTy *Insert(InstTy *I, const Twine &Name = "") const {
- this->InsertHelper(I, Name, BB, InsertPt);
- this->SetInstDebugLocation(I);
- return I;
- }
-
- /// No-op overload to handle constants.
- Constant *Insert(Constant *C, const Twine& = "") const {
- return C;
- }
//===--------------------------------------------------------------------===//
// Instruction creation methods: Terminators
@@ -1045,28 +1026,6 @@ public:
NormalDest, UnwindDest, Args, Name);
}
- // Deprecated [opaque pointer types]
- InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
- BasicBlock *UnwindDest, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> OpBundles,
- const Twine &Name = "") {
- return CreateInvoke(
- cast<FunctionType>(
- cast<PointerType>(Callee->getType())->getElementType()),
- Callee, NormalDest, UnwindDest, Args, OpBundles, Name);
- }
-
- // Deprecated [opaque pointer types]
- InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
- BasicBlock *UnwindDest,
- ArrayRef<Value *> Args = None,
- const Twine &Name = "") {
- return CreateInvoke(
- cast<FunctionType>(
- cast<PointerType>(Callee->getType())->getElementType()),
- Callee, NormalDest, UnwindDest, Args, Name);
- }
-
/// \brief Create a callbr instruction.
CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
BasicBlock *DefaultDest,
@@ -1169,8 +1128,8 @@ private:
return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
}
- Value *getConstrainedFPRounding(Optional<fp::RoundingMode> Rounding) {
- fp::RoundingMode UseRounding = DefaultConstrainedRounding;
+ Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
+ RoundingMode UseRounding = DefaultConstrainedRounding;
if (Rounding.hasValue())
UseRounding = Rounding.getValue();
@@ -1561,21 +1520,8 @@ public:
CallInst *CreateConstrainedFPBinOp(
Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
const Twine &Name = "", MDNode *FPMathTag = nullptr,
- Optional<fp::RoundingMode> Rounding = None,
- Optional<fp::ExceptionBehavior> Except = None) {
- Value *RoundingV = getConstrainedFPRounding(Rounding);
- Value *ExceptV = getConstrainedFPExcept(Except);
-
- FastMathFlags UseFMF = FMF;
- if (FMFSource)
- UseFMF = FMFSource->getFastMathFlags();
-
- CallInst *C = CreateIntrinsic(ID, {L->getType()},
- {L, R, RoundingV, ExceptV}, nullptr, Name);
- setConstrainedFPCallAttr(C);
- setFPAttrs(C, FPMathTag, UseFMF);
- return C;
- }
+ Optional<RoundingMode> Rounding = None,
+ Optional<fp::ExceptionBehavior> Except = None);
Value *CreateNeg(Value *V, const Twine &Name = "",
bool HasNUW = false, bool HasNSW = false) {
@@ -1634,20 +1580,7 @@ public:
/// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
/// Correct number of operands must be passed accordingly.
Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
- const Twine &Name = "",
- MDNode *FPMathTag = nullptr) {
- if (Instruction::isBinaryOp(Opc)) {
- assert(Ops.size() == 2 && "Invalid number of operands!");
- return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
- Ops[0], Ops[1], Name, FPMathTag);
- }
- if (Instruction::isUnaryOp(Opc)) {
- assert(Ops.size() == 1 && "Invalid number of operands!");
- return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
- Ops[0], Name, FPMathTag);
- }
- llvm_unreachable("Unexpected opcode!");
- }
+ const Twine &Name = "", MDNode *FPMathTag = nullptr);
//===--------------------------------------------------------------------===//
// Instruction creation methods: Memory Instructions
@@ -1655,28 +1588,32 @@ public:
AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
Value *ArraySize = nullptr, const Twine &Name = "") {
- return Insert(new AllocaInst(Ty, AddrSpace, ArraySize), Name);
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align AllocaAlign = DL.getPrefTypeAlign(Ty);
+ return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
}
AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
const Twine &Name = "") {
- const DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
- return Insert(new AllocaInst(Ty, DL.getAllocaAddrSpace(), ArraySize), Name);
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align AllocaAlign = DL.getPrefTypeAlign(Ty);
+ unsigned AddrSpace = DL.getAllocaAddrSpace();
+ return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
}
/// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
/// converting the string to 'bool' for the isVolatile parameter.
LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
- return Insert(new LoadInst(Ty, Ptr), Name);
+ return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
}
LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
- return Insert(new LoadInst(Ty, Ptr), Name);
+ return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
}
LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
const Twine &Name = "") {
- return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile), Name);
+ return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
}
// Deprecated [opaque pointer types]
@@ -1696,65 +1633,71 @@ public:
}
StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
- return Insert(new StoreInst(Val, Ptr, isVolatile));
+ return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
}
- /// Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")'
- /// correctly, instead of converting the string to 'bool' for the isVolatile
- /// parameter.
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
- const char *Name) {
+ LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
+ unsigned Align,
+ const char *Name),
+ "Use the version that takes NaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
const char *Name) {
- LoadInst *LI = CreateLoad(Ty, Ptr, Name);
- LI->setAlignment(Align);
- return LI;
+ return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
}
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
- const Twine &Name = "") {
+
+ LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
+ unsigned Align,
+ const Twine &Name = ""),
+ "Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
const Twine &Name = "") {
- LoadInst *LI = CreateLoad(Ty, Ptr, Name);
- LI->setAlignment(Align);
- return LI;
+ return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
}
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
- bool isVolatile, const Twine &Name = "") {
+
+ LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
+ unsigned Align,
+ bool isVolatile,
+ const Twine &Name = ""),
+ "Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
bool isVolatile, const Twine &Name = "") {
- LoadInst *LI = CreateLoad(Ty, Ptr, isVolatile, Name);
- LI->setAlignment(Align);
- return LI;
+ if (!Align) {
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align = DL.getABITypeAlign(Ty);
+ }
+ return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
}
// Deprecated [opaque pointer types]
- LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
+ LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,
+ unsigned Align,
+ const char *Name),
+ "Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
- Align, Name);
+ MaybeAlign(Align), Name);
}
// Deprecated [opaque pointer types]
- LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
- const Twine &Name = "") {
+ LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,
+ unsigned Align,
+ const Twine &Name = ""),
+ "Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
- Align, Name);
+ MaybeAlign(Align), Name);
}
// Deprecated [opaque pointer types]
- LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
- const Twine &Name = "") {
+ LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,
+ unsigned Align,
+ bool isVolatile,
+ const Twine &Name = ""),
+ "Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
- Align, isVolatile, Name);
+ MaybeAlign(Align), isVolatile, Name);
}
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) {
@@ -1774,15 +1717,19 @@ public:
Align, isVolatile, Name);
}
- StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
- bool isVolatile = false) {
- StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
- SI->setAlignment(MaybeAlign(Align));
- return SI;
+ LLVM_ATTRIBUTE_DEPRECATED(
+ StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
+ bool isVolatile = false),
+ "Use the version that takes MaybeAlign instead") {
+ return CreateAlignedStore(Val, Ptr, MaybeAlign(Align), isVolatile);
}
StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
bool isVolatile = false) {
- return CreateAlignedStore(Val, Ptr, Align ? Align->value() : 0, isVolatile);
+ if (!Align) {
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align = DL.getABITypeAlign(Val->getType());
+ }
+ return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
}
FenceInst *CreateFence(AtomicOrdering Ordering,
SyncScope::ID SSID = SyncScope::System,
@@ -1790,19 +1737,21 @@ public:
return Insert(new FenceInst(Context, Ordering, SSID), Name);
}
- AtomicCmpXchgInst *
- CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
- AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SyncScope::ID SSID = SyncScope::System) {
- return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
- FailureOrdering, SSID));
+ AtomicCmpXchgInst *CreateAtomicCmpXchg(
+ Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID = SyncScope::System) {
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align Alignment(DL.getTypeStoreSize(New->getType()));
+ return Insert(new AtomicCmpXchgInst(
+ Ptr, Cmp, New, Alignment, SuccessOrdering, FailureOrdering, SSID));
}
AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
SyncScope::ID SSID = SyncScope::System) {
- return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID));
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align Alignment(DL.getTypeStoreSize(Val->getType()));
+ return Insert(new AtomicRMWInst(Op, Ptr, Val, Alignment, Ordering, SSID));
}
Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
@@ -2200,39 +2149,8 @@ public:
Intrinsic::ID ID, Value *V, Type *DestTy,
Instruction *FMFSource = nullptr, const Twine &Name = "",
MDNode *FPMathTag = nullptr,
- Optional<fp::RoundingMode> Rounding = None,
- Optional<fp::ExceptionBehavior> Except = None) {
- Value *ExceptV = getConstrainedFPExcept(Except);
-
- FastMathFlags UseFMF = FMF;
- if (FMFSource)
- UseFMF = FMFSource->getFastMathFlags();
-
- CallInst *C;
- bool HasRoundingMD = false;
- switch (ID) {
- default:
- break;
-#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
- case Intrinsic::INTRINSIC: \
- HasRoundingMD = ROUND_MODE; \
- break;
-#include "llvm/IR/ConstrainedOps.def"
- }
- if (HasRoundingMD) {
- Value *RoundingV = getConstrainedFPRounding(Rounding);
- C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
- nullptr, Name);
- } else
- C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
- Name);
-
- setConstrainedFPCallAttr(C);
-
- if (isa<FPMathOperator>(C))
- setFPAttrs(C, FPMathTag, UseFMF);
- return C;
- }
+ Optional<RoundingMode> Rounding = None,
+ Optional<fp::ExceptionBehavior> Except = None);
// Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
// compile time error, instead of converting the string to bool for the
@@ -2366,14 +2284,14 @@ public:
// Note that this differs from CreateFCmpS only if IsFPConstrained is true.
Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
const Twine &Name = "", MDNode *FPMathTag = nullptr) {
- if (IsFPConstrained)
- return CreateConstrainedFPCmp(Intrinsic::experimental_constrained_fcmp,
- P, LHS, RHS, Name);
+ return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
+ }
- if (auto *LC = dyn_cast<Constant>(LHS))
- if (auto *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateFCmp(P, LC, RC), Name);
- return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
+ Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
+ const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+ return CmpInst::isFPPredicate(Pred)
+ ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
+ : CreateICmp(Pred, LHS, RHS, Name);
}
// Create a signaling floating-point comparison (i.e. one that raises an FP
@@ -2381,28 +2299,19 @@ public:
// Note that this differs from CreateFCmp only if IsFPConstrained is true.
Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
const Twine &Name = "", MDNode *FPMathTag = nullptr) {
- if (IsFPConstrained)
- return CreateConstrainedFPCmp(Intrinsic::experimental_constrained_fcmps,
- P, LHS, RHS, Name);
-
- if (auto *LC = dyn_cast<Constant>(LHS))
- if (auto *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateFCmp(P, LC, RC), Name);
- return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
+ return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
}
+private:
+ // Helper routine to create either a signaling or a quiet FP comparison.
+ Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
+ const Twine &Name, MDNode *FPMathTag,
+ bool IsSignaling);
+
+public:
CallInst *CreateConstrainedFPCmp(
Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
- const Twine &Name = "",
- Optional<fp::ExceptionBehavior> Except = None) {
- Value *PredicateV = getConstrainedFPPredicate(P);
- Value *ExceptV = getConstrainedFPExcept(Except);
-
- CallInst *C = CreateIntrinsic(ID, {L->getType()},
- {L, R, PredicateV, ExceptV}, nullptr, Name);
- setConstrainedFPCallAttr(C);
- return C;
- }
+ const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
//===--------------------------------------------------------------------===//
// Instruction creation methods: Other Instructions
@@ -2451,67 +2360,13 @@ public:
OpBundles, Name, FPMathTag);
}
- // Deprecated [opaque pointer types]
- CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
- const Twine &Name = "", MDNode *FPMathTag = nullptr) {
- return CreateCall(
- cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
- Args, Name, FPMathTag);
- }
-
- // Deprecated [opaque pointer types]
- CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> OpBundles,
- const Twine &Name = "", MDNode *FPMathTag = nullptr) {
- return CreateCall(
- cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
- Args, OpBundles, Name, FPMathTag);
- }
-
CallInst *CreateConstrainedFPCall(
Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
- Optional<fp::RoundingMode> Rounding = None,
- Optional<fp::ExceptionBehavior> Except = None) {
- llvm::SmallVector<Value *, 6> UseArgs;
-
- for (auto *OneArg : Args)
- UseArgs.push_back(OneArg);
- bool HasRoundingMD = false;
- switch (Callee->getIntrinsicID()) {
- default:
- break;
-#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
- case Intrinsic::INTRINSIC: \
- HasRoundingMD = ROUND_MODE; \
- break;
-#include "llvm/IR/ConstrainedOps.def"
- }
- if (HasRoundingMD)
- UseArgs.push_back(getConstrainedFPRounding(Rounding));
- UseArgs.push_back(getConstrainedFPExcept(Except));
-
- CallInst *C = CreateCall(Callee, UseArgs, Name);
- setConstrainedFPCallAttr(C);
- return C;
- }
+ Optional<RoundingMode> Rounding = None,
+ Optional<fp::ExceptionBehavior> Except = None);
Value *CreateSelect(Value *C, Value *True, Value *False,
- const Twine &Name = "", Instruction *MDFrom = nullptr) {
- if (auto *CC = dyn_cast<Constant>(C))
- if (auto *TC = dyn_cast<Constant>(True))
- if (auto *FC = dyn_cast<Constant>(False))
- return Insert(Folder.CreateSelect(CC, TC, FC), Name);
-
- SelectInst *Sel = SelectInst::Create(C, True, False);
- if (MDFrom) {
- MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
- MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
- Sel = addBranchMetadata(Sel, Prof, Unpred);
- }
- if (isa<FPMathOperator>(Sel))
- setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
- return Insert(Sel, Name);
- }
+ const Twine &Name = "", Instruction *MDFrom = nullptr);
VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
return Insert(new VAArgInst(List, Ty), Name);
@@ -2546,17 +2401,27 @@ public:
Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
const Twine &Name = "") {
- if (auto *V1C = dyn_cast<Constant>(V1))
- if (auto *V2C = dyn_cast<Constant>(V2))
- if (auto *MC = dyn_cast<Constant>(Mask))
- return Insert(Folder.CreateShuffleVector(V1C, V2C, MC), Name);
- return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
+ SmallVector<int, 16> IntMask;
+ ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
+ return CreateShuffleVector(V1, V2, IntMask, Name);
+ }
+
+ LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,
+ ArrayRef<uint32_t> Mask,
+ const Twine &Name = ""),
+ "Pass indices as 'int' instead") {
+ SmallVector<int, 16> IntMask;
+ IntMask.assign(Mask.begin(), Mask.end());
+ return CreateShuffleVector(V1, V2, IntMask, Name);
}
- Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<uint32_t> IntMask,
+ /// See class ShuffleVectorInst for a description of the mask representation.
+ Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
const Twine &Name = "") {
- Value *Mask = ConstantDataVector::get(Context, IntMask);
- return CreateShuffleVector(V1, V2, Mask, Name);
+ if (auto *V1C = dyn_cast<Constant>(V1))
+ if (auto *V2C = dyn_cast<Constant>(V2))
+ return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
+ return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
}
Value *CreateExtractValue(Value *Agg,
@@ -2607,219 +2472,45 @@ public:
/// This is intended to implement C-style pointer subtraction. As such, the
/// pointers must be appropriately aligned for their element types and
/// pointing into the same object.
- Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "") {
- assert(LHS->getType() == RHS->getType() &&
- "Pointer subtraction operand types must match!");
- auto *ArgType = cast<PointerType>(LHS->getType());
- Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
- Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
- Value *Difference = CreateSub(LHS_int, RHS_int);
- return CreateExactSDiv(Difference,
- ConstantExpr::getSizeOf(ArgType->getElementType()),
- Name);
- }
+ Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
/// Create a launder.invariant.group intrinsic call. If Ptr type is
/// different from pointer to i8, it's casted to pointer to i8 in the same
/// address space before call and casted back to Ptr type after call.
- Value *CreateLaunderInvariantGroup(Value *Ptr) {
- assert(isa<PointerType>(Ptr->getType()) &&
- "launder.invariant.group only applies to pointers.");
- // FIXME: we could potentially avoid casts to/from i8*.
- auto *PtrType = Ptr->getType();
- auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
- if (PtrType != Int8PtrTy)
- Ptr = CreateBitCast(Ptr, Int8PtrTy);
- Module *M = BB->getParent()->getParent();
- Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
- M, Intrinsic::launder_invariant_group, {Int8PtrTy});
-
- assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
- FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
- Int8PtrTy &&
- "LaunderInvariantGroup should take and return the same type");
-
- CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
-
- if (PtrType != Int8PtrTy)
- return CreateBitCast(Fn, PtrType);
- return Fn;
- }
+ Value *CreateLaunderInvariantGroup(Value *Ptr);
/// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
/// different from pointer to i8, it's casted to pointer to i8 in the same
/// address space before call and casted back to Ptr type after call.
- Value *CreateStripInvariantGroup(Value *Ptr) {
- assert(isa<PointerType>(Ptr->getType()) &&
- "strip.invariant.group only applies to pointers.");
-
- // FIXME: we could potentially avoid casts to/from i8*.
- auto *PtrType = Ptr->getType();
- auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
- if (PtrType != Int8PtrTy)
- Ptr = CreateBitCast(Ptr, Int8PtrTy);
- Module *M = BB->getParent()->getParent();
- Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
- M, Intrinsic::strip_invariant_group, {Int8PtrTy});
-
- assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
- FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
- Int8PtrTy &&
- "StripInvariantGroup should take and return the same type");
-
- CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
-
- if (PtrType != Int8PtrTy)
- return CreateBitCast(Fn, PtrType);
- return Fn;
- }
+ Value *CreateStripInvariantGroup(Value *Ptr);
/// Return a vector value that contains \arg V broadcasted to \p
/// NumElts elements.
- Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "") {
- assert(NumElts > 0 && "Cannot splat to an empty vector!");
-
- // First insert it into an undef vector so we can shuffle it.
- Type *I32Ty = getInt32Ty();
- Value *Undef = UndefValue::get(VectorType::get(V->getType(), NumElts));
- V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0),
- Name + ".splatinsert");
-
- // Shuffle the value across the desired number of elements.
- Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32Ty, NumElts));
- return CreateShuffleVector(V, Undef, Zeros, Name + ".splat");
- }
+ Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
/// Return a value that has been extracted from a larger integer type.
Value *CreateExtractInteger(const DataLayout &DL, Value *From,
IntegerType *ExtractedTy, uint64_t Offset,
- const Twine &Name) {
- auto *IntTy = cast<IntegerType>(From->getType());
- assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
- DL.getTypeStoreSize(IntTy) &&
- "Element extends past full value");
- uint64_t ShAmt = 8 * Offset;
- Value *V = From;
- if (DL.isBigEndian())
- ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
- DL.getTypeStoreSize(ExtractedTy) - Offset);
- if (ShAmt) {
- V = CreateLShr(V, ShAmt, Name + ".shift");
- }
- assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
- "Cannot extract to a larger integer!");
- if (ExtractedTy != IntTy) {
- V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
- }
- return V;
- }
+ const Twine &Name);
Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
unsigned Dimension, unsigned LastIndex,
- MDNode *DbgInfo) {
- assert(isa<PointerType>(Base->getType()) &&
- "Invalid Base ptr type for preserve.array.access.index.");
- auto *BaseType = Base->getType();
-
- Value *LastIndexV = getInt32(LastIndex);
- Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
- SmallVector<Value *, 4> IdxList;
- for (unsigned I = 0; I < Dimension; ++I)
- IdxList.push_back(Zero);
- IdxList.push_back(LastIndexV);
-
- Type *ResultType =
- GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
-
- Module *M = BB->getParent()->getParent();
- Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
- M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
-
- Value *DimV = getInt32(Dimension);
- CallInst *Fn =
- CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
- if (DbgInfo)
- Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
-
- return Fn;
- }
+ MDNode *DbgInfo);
Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
- MDNode *DbgInfo) {
- assert(isa<PointerType>(Base->getType()) &&
- "Invalid Base ptr type for preserve.union.access.index.");
- auto *BaseType = Base->getType();
-
- Module *M = BB->getParent()->getParent();
- Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
- M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
-
- Value *DIIndex = getInt32(FieldIndex);
- CallInst *Fn =
- CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
- if (DbgInfo)
- Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
-
- return Fn;
- }
+ MDNode *DbgInfo);
Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
unsigned Index, unsigned FieldIndex,
- MDNode *DbgInfo) {
- assert(isa<PointerType>(Base->getType()) &&
- "Invalid Base ptr type for preserve.struct.access.index.");
- auto *BaseType = Base->getType();
-
- Value *GEPIndex = getInt32(Index);
- Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
- Type *ResultType =
- GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
-
- Module *M = BB->getParent()->getParent();
- Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
- M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
-
- Value *DIIndex = getInt32(FieldIndex);
- CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
- {Base, GEPIndex, DIIndex});
- if (DbgInfo)
- Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
-
- return Fn;
- }
+ MDNode *DbgInfo);
private:
/// Helper function that creates an assume intrinsic call that
- /// represents an alignment assumption on the provided Ptr, Mask, Type
- /// and Offset. It may be sometimes useful to do some other logic
- /// based on this alignment check, thus it can be stored into 'TheCheck'.
+ /// represents an alignment assumption on the provided pointer \p PtrValue
+ /// with offset \p OffsetValue and alignment value \p AlignValue.
CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
- Value *PtrValue, Value *Mask,
- Type *IntPtrTy, Value *OffsetValue,
- Value **TheCheck) {
- Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
-
- if (OffsetValue) {
- bool IsOffsetZero = false;
- if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))
- IsOffsetZero = CI->isZero();
-
- if (!IsOffsetZero) {
- if (OffsetValue->getType() != IntPtrTy)
- OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
- "offsetcast");
- PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
- }
- }
-
- Value *Zero = ConstantInt::get(IntPtrTy, 0);
- Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
- Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
- if (TheCheck)
- *TheCheck = InvCond;
-
- return CreateAssumption(InvCond);
- }
+ Value *PtrValue, Value *AlignValue,
+ Value *OffsetValue);
public:
/// Create an assume intrinsic call that represents an alignment
@@ -2828,23 +2519,9 @@ public:
/// An optional offset can be provided, and if it is provided, the offset
/// must be subtracted from the provided pointer to get the pointer with the
/// specified alignment.
- ///
- /// It may be sometimes useful to do some other logic
- /// based on this alignment check, thus it can be stored into 'TheCheck'.
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
unsigned Alignment,
- Value *OffsetValue = nullptr,
- Value **TheCheck = nullptr) {
- assert(isa<PointerType>(PtrValue->getType()) &&
- "trying to create an alignment assumption on a non-pointer?");
- assert(Alignment != 0 && "Invalid Alignment");
- auto *PtrTy = cast<PointerType>(PtrValue->getType());
- Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
-
- Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1);
- return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
- OffsetValue, TheCheck);
- }
+ Value *OffsetValue = nullptr);
/// Create an assume intrinsic call that represents an alignment
/// assumption on the provided pointer.
@@ -2853,29 +2530,88 @@ public:
/// must be subtracted from the provided pointer to get the pointer with the
/// specified alignment.
///
- /// It may be sometimes useful to do some other logic
- /// based on this alignment check, thus it can be stored into 'TheCheck'.
- ///
/// This overload handles the condition where the Alignment is dependent
/// on an existing value rather than a static value.
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
Value *Alignment,
- Value *OffsetValue = nullptr,
- Value **TheCheck = nullptr) {
- assert(isa<PointerType>(PtrValue->getType()) &&
- "trying to create an alignment assumption on a non-pointer?");
- auto *PtrTy = cast<PointerType>(PtrValue->getType());
- Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
+ Value *OffsetValue = nullptr);
+};
+
+/// This provides a uniform API for creating instructions and inserting
+/// them into a basic block: either at the end of a BasicBlock, or at a specific
+/// iterator location in a block.
+///
+/// Note that the builder does not expose the full generality of LLVM
+/// instructions. For access to extra instruction properties, use the mutators
+/// (e.g. setVolatile) on the instructions after they have been
+/// created. Convenience state exists to specify fast-math flags and fp-math
+/// tags.
+///
+/// The first template argument specifies a class to use for creating constants.
+/// This defaults to creating minimally folded constants. The second template
+/// argument allows clients to specify custom insertion hooks that are called on
+/// every newly created insertion.
+template <typename FolderTy = ConstantFolder,
+ typename InserterTy = IRBuilderDefaultInserter>
+class IRBuilder : public IRBuilderBase {
+private:
+ FolderTy Folder;
+ InserterTy Inserter;
+
+public:
+ IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
+ Folder(Folder), Inserter(Inserter) {}
+
+ explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
+
+ explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles), Folder(Folder) {
+ SetInsertPoint(TheBB);
+ }
- if (Alignment->getType() != IntPtrTy)
- Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false,
- "alignmentcast");
+ explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles) {
+ SetInsertPoint(TheBB);
+ }
- Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask");
+ explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles) {
+ SetInsertPoint(IP);
+ }
- return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
- OffsetValue, TheCheck);
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles), Folder(Folder) {
+ SetInsertPoint(TheBB, IP);
}
+
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles) {
+ SetInsertPoint(TheBB, IP);
+ }
+
+ /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
+ /// or FastMathFlagGuard instead.
+ IRBuilder(const IRBuilder &) = delete;
+
+ InserterTy &getInserter() { return Inserter; }
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
diff --git a/llvm/include/llvm/IR/IRBuilderFolder.h b/llvm/include/llvm/IR/IRBuilderFolder.h
new file mode 100644
index 000000000000..e781e8e094af
--- /dev/null
+++ b/llvm/include/llvm/IR/IRBuilderFolder.h
@@ -0,0 +1,141 @@
+//===- IRBuilderFolder.h - Const folder interface for IRBuilder -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines for constant folding interface used by IRBuilder.
+// It is implemented by ConstantFolder (default), TargetFolder and NoFoler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_IRBUILDERFOLDER_H
+#define LLVM_IR_IRBUILDERFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+
+/// IRBuilderFolder - Interface for constant folding in IRBuilder.
+class IRBuilderFolder {
+public:
+ virtual ~IRBuilderFolder();
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
+ virtual Value *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const = 0;
+ virtual Value *CreateFAdd(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const = 0;
+ virtual Value *CreateFSub(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const = 0;
+ virtual Value *CreateFMul(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateUDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const = 0;
+ virtual Value *CreateSDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const = 0;
+ virtual Value *CreateFDiv(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateURem(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateSRem(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateFRem(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateShl(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const = 0;
+ virtual Value *CreateLShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const = 0;
+ virtual Value *CreateAShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const = 0;
+ virtual Value *CreateAnd(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateOr(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateXor(Constant *LHS, Constant *RHS) const = 0;
+ virtual Value *CreateBinOp(Instruction::BinaryOps Opc,
+ Constant *LHS, Constant *RHS) const = 0;
+
+ //===--------------------------------------------------------------------===//
+ // Unary Operators
+ //===--------------------------------------------------------------------===//
+
+ virtual Value *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const = 0;
+ virtual Value *CreateFNeg(Constant *C) const = 0;
+ virtual Value *CreateNot(Constant *C) const = 0;
+ virtual Value *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const = 0;
+
+ //===--------------------------------------------------------------------===//
+ // Memory Instructions
+ //===--------------------------------------------------------------------===//
+
+ virtual Value *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) const = 0;
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ virtual Value *CreateGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) const = 0;
+ virtual Value *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) const = 0;
+ virtual Value *CreateInBoundsGetElementPtr(
+ Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const = 0;
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ virtual Value *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) const = 0;
+ virtual Value *CreateInBoundsGetElementPtr(
+ Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const = 0;
+
+ //===--------------------------------------------------------------------===//
+ // Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ virtual Value *CreateCast(Instruction::CastOps Op, Constant *C,
+ Type *DestTy) const = 0;
+ virtual Value *CreatePointerCast(Constant *C, Type *DestTy) const = 0;
+ virtual Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
+ Type *DestTy) const = 0;
+ virtual Value *CreateIntCast(Constant *C, Type *DestTy,
+ bool isSigned) const = 0;
+ virtual Value *CreateFPCast(Constant *C, Type *DestTy) const = 0;
+ virtual Value *CreateBitCast(Constant *C, Type *DestTy) const = 0;
+ virtual Value *CreateIntToPtr(Constant *C, Type *DestTy) const = 0;
+ virtual Value *CreatePtrToInt(Constant *C, Type *DestTy) const = 0;
+ virtual Value *CreateZExtOrBitCast(Constant *C, Type *DestTy) const = 0;
+ virtual Value *CreateSExtOrBitCast(Constant *C, Type *DestTy) const = 0;
+ virtual Value *CreateTruncOrBitCast(Constant *C, Type *DestTy) const = 0;
+
+ //===--------------------------------------------------------------------===//
+ // Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ virtual Value *CreateICmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const = 0;
+ virtual Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const = 0;
+
+ //===--------------------------------------------------------------------===//
+ // Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ virtual Value *CreateSelect(Constant *C, Constant *True,
+ Constant *False) const = 0;
+ virtual Value *CreateExtractElement(Constant *Vec, Constant *Idx) const = 0;
+ virtual Value *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const = 0;
+ virtual Value *CreateShuffleVector(Constant *V1, Constant *V2,
+ ArrayRef<int> Mask) const = 0;
+ virtual Value *CreateExtractValue(Constant *Agg,
+ ArrayRef<unsigned> IdxList) const = 0;
+ virtual Value *CreateInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList) const = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_IRBUILDERFOLDER_H
diff --git a/llvm/include/llvm/IR/IRPrintingPasses.h b/llvm/include/llvm/IR/IRPrintingPasses.h
index 230db988f737..3a1c489ee09f 100644
--- a/llvm/include/llvm/IR/IRPrintingPasses.h
+++ b/llvm/include/llvm/IR/IRPrintingPasses.h
@@ -19,17 +19,10 @@
#define LLVM_IR_IRPRINTINGPASSES_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/PassManager.h"
#include <string>
namespace llvm {
-class Pass;
-class Function;
-class FunctionPass;
-class Module;
-class ModulePass;
-class PreservedAnalyses;
-class raw_ostream;
-template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
/// Create and return a pass that writes the module to the specified
/// \c raw_ostream.
@@ -71,7 +64,7 @@ extern bool shouldPrintAfterPass(StringRef);
///
/// Note: This pass is for use with the new pass manager. Use the create...Pass
/// functions above to create passes for use with the legacy pass manager.
-class PrintModulePass {
+class PrintModulePass : public PassInfoMixin<PrintModulePass> {
raw_ostream &OS;
std::string Banner;
bool ShouldPreserveUseListOrder;
@@ -82,15 +75,13 @@ public:
bool ShouldPreserveUseListOrder = false);
PreservedAnalyses run(Module &M, AnalysisManager<Module> &);
-
- static StringRef name() { return "PrintModulePass"; }
};
/// Pass for printing a Function as LLVM's text IR assembly.
///
/// Note: This pass is for use with the new pass manager. Use the create...Pass
/// functions above to create passes for use with the legacy pass manager.
-class PrintFunctionPass {
+class PrintFunctionPass : public PassInfoMixin<PrintFunctionPass> {
raw_ostream &OS;
std::string Banner;
@@ -99,8 +90,6 @@ public:
PrintFunctionPass(raw_ostream &OS, const std::string &Banner = "");
PreservedAnalyses run(Function &F, AnalysisManager<Function> &);
-
- static StringRef name() { return "PrintFunctionPass"; }
};
} // End llvm namespace
diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index 72d8ad1501ae..b6f377093337 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <string>
#include <vector>
@@ -359,6 +360,96 @@ public:
RC = High - 1;
return true;
}
+
+ static std::vector<StringRef> getExtraInfoNames(unsigned ExtraInfo) {
+ std::vector<StringRef> Result;
+ if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
+ Result.push_back("sideeffect");
+ if (ExtraInfo & InlineAsm::Extra_MayLoad)
+ Result.push_back("mayload");
+ if (ExtraInfo & InlineAsm::Extra_MayStore)
+ Result.push_back("maystore");
+ if (ExtraInfo & InlineAsm::Extra_IsConvergent)
+ Result.push_back("isconvergent");
+ if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
+ Result.push_back("alignstack");
+
+ AsmDialect Dialect =
+ InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect));
+
+ if (Dialect == InlineAsm::AD_ATT)
+ Result.push_back("attdialect");
+ if (Dialect == InlineAsm::AD_Intel)
+ Result.push_back("inteldialect");
+
+ return Result;
+ }
+
+ static StringRef getKindName(unsigned Kind) {
+ switch (Kind) {
+ case InlineAsm::Kind_RegUse:
+ return "reguse";
+ case InlineAsm::Kind_RegDef:
+ return "regdef";
+ case InlineAsm::Kind_RegDefEarlyClobber:
+ return "regdef-ec";
+ case InlineAsm::Kind_Clobber:
+ return "clobber";
+ case InlineAsm::Kind_Imm:
+ return "imm";
+ case InlineAsm::Kind_Mem:
+ return "mem";
+ default:
+ llvm_unreachable("Unknown operand kind");
+ }
+ }
+
+ static StringRef getMemConstraintName(unsigned Constraint) {
+ switch (Constraint) {
+ case InlineAsm::Constraint_es:
+ return "es";
+ case InlineAsm::Constraint_i:
+ return "i";
+ case InlineAsm::Constraint_m:
+ return "m";
+ case InlineAsm::Constraint_o:
+ return "o";
+ case InlineAsm::Constraint_v:
+ return "v";
+ case InlineAsm::Constraint_Q:
+ return "Q";
+ case InlineAsm::Constraint_R:
+ return "R";
+ case InlineAsm::Constraint_S:
+ return "S";
+ case InlineAsm::Constraint_T:
+ return "T";
+ case InlineAsm::Constraint_Um:
+ return "Um";
+ case InlineAsm::Constraint_Un:
+ return "Un";
+ case InlineAsm::Constraint_Uq:
+ return "Uq";
+ case InlineAsm::Constraint_Us:
+ return "Us";
+ case InlineAsm::Constraint_Ut:
+ return "Ut";
+ case InlineAsm::Constraint_Uv:
+ return "Uv";
+ case InlineAsm::Constraint_Uy:
+ return "Uy";
+ case InlineAsm::Constraint_X:
+ return "X";
+ case InlineAsm::Constraint_Z:
+ return "Z";
+ case InlineAsm::Constraint_ZC:
+ return "ZC";
+ case InlineAsm::Constraint_Zy:
+ return "Zy";
+ default:
+ llvm_unreachable("Unknown memory constraint");
+ }
+ }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 6168c877a2be..4dbdc66d1366 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -10,7 +10,6 @@
#ifndef LLVM_IR_INSTVISITOR_H
#define LLVM_IR_INSTVISITOR_H
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -217,18 +216,9 @@ public:
RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
-
- // Call, Invoke and CallBr are slightly different as they delegate first
- // through a generic CallSite visitor.
- RetTy visitCallInst(CallInst &I) {
- return static_cast<SubClass*>(this)->visitCallSite(&I);
- }
- RetTy visitInvokeInst(InvokeInst &I) {
- return static_cast<SubClass*>(this)->visitCallSite(&I);
- }
- RetTy visitCallBrInst(CallBrInst &I) {
- return static_cast<SubClass *>(this)->visitCallSite(&I);
- }
+ RetTy visitCallInst(CallInst &I) { DELEGATE(CallBase); }
+ RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(CallBase); }
+ RetTy visitCallBrInst(CallBrInst &I) { DELEGATE(CallBase); }
// While terminators don't have a distinct type modeling them, we support
// intercepting them with dedicated a visitor callback.
@@ -280,16 +270,6 @@ public:
DELEGATE(Instruction);
}
- // Provide a legacy visitor for a 'callsite' that visits calls, invokes,
- // and calbrs.
- //
- // Prefer overriding the type system based `CallBase` instead.
- RetTy visitCallSite(CallSite CS) {
- assert(CS);
- Instruction &I = *CS.getInstruction();
- DELEGATE(CallBase);
- }
-
// If the user wants a 'default' case, they can choose to override this
// function. If this function is not overloaded in the user's subclass, then
// this instruction just gets ignored.
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index b2cdd58a5046..07af00ec9240 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -154,18 +154,20 @@ public:
}
#include "llvm/IR/Instruction.def"
- static UnaryOperator *CreateWithCopiedFlags(UnaryOps Opc,
- Value *V,
- Instruction *CopyO,
- const Twine &Name = "") {
- UnaryOperator *UO = Create(Opc, V, Name);
+ static UnaryOperator *
+ CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
+ const Twine &Name = "",
+ Instruction *InsertBefore = nullptr) {
+ UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
UO->copyIRFlags(CopyO);
return UO;
}
static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
- const Twine &Name = "") {
- return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name);
+ const Twine &Name = "",
+ Instruction *InsertBefore = nullptr) {
+ return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
+ InsertBefore);
}
UnaryOps getOpcode() const {
@@ -280,11 +282,6 @@ public:
const Twine &Name = "") {
return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
}
- static BinaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
- const Twine &Name = "") {
- Value *Zero = ConstantFP::getNegativeZero(Op->getType());
- return CreateWithCopiedFlags(Instruction::FSub, Zero, Op, FMFSource, Name);
- }
static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
const Twine &Name = "") {
@@ -390,10 +387,6 @@ public:
Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
- static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = nullptr);
- static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name,
- BasicBlock *InsertAtEnd);
static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
@@ -729,41 +722,43 @@ public:
/// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
/// FCMP_* values. Changing the bit patterns requires a potential change to
/// those passes.
- enum Predicate {
- // Opcode U L G E Intuitive operation
- FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
- FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
- FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
- FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
- FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
- FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
- FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
- FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
- FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
- FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
- FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
- FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
- FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
- FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
- FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
- FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
+ enum Predicate : unsigned {
+ // Opcode U L G E Intuitive operation
+ FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
+ FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
+ FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
+ FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
+ FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
+ FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
+ FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
+ FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
+ FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
+ FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
+ FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
+ FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
+ FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
+ FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
+ FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
+ FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
FIRST_FCMP_PREDICATE = FCMP_FALSE,
LAST_FCMP_PREDICATE = FCMP_TRUE,
BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
- ICMP_EQ = 32, ///< equal
- ICMP_NE = 33, ///< not equal
- ICMP_UGT = 34, ///< unsigned greater than
- ICMP_UGE = 35, ///< unsigned greater or equal
- ICMP_ULT = 36, ///< unsigned less than
- ICMP_ULE = 37, ///< unsigned less or equal
- ICMP_SGT = 38, ///< signed greater than
- ICMP_SGE = 39, ///< signed greater or equal
- ICMP_SLT = 40, ///< signed less than
- ICMP_SLE = 41, ///< signed less or equal
+ ICMP_EQ = 32, ///< equal
+ ICMP_NE = 33, ///< not equal
+ ICMP_UGT = 34, ///< unsigned greater than
+ ICMP_UGE = 35, ///< unsigned greater or equal
+ ICMP_ULT = 36, ///< unsigned less than
+ ICMP_ULE = 37, ///< unsigned less or equal
+ ICMP_SGT = 38, ///< signed greater than
+ ICMP_SGE = 39, ///< signed greater or equal
+ ICMP_SLT = 40, ///< signed less than
+ ICMP_SLE = 41, ///< signed less or equal
FIRST_ICMP_PREDICATE = ICMP_EQ,
LAST_ICMP_PREDICATE = ICMP_SLE,
BAD_ICMP_PREDICATE = ICMP_SLE + 1
};
+ using PredicateField =
+ Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
protected:
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
@@ -804,15 +799,15 @@ public:
}
/// Return the predicate for this instruction.
- Predicate getPredicate() const {
- return Predicate(getSubclassDataFromInstruction());
- }
+ Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
/// Set the predicate for this instruction to the specified value.
- void setPredicate(Predicate P) { setInstructionSubclassData(P); }
+ void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
static bool isFPPredicate(Predicate P) {
- return P >= FIRST_FCMP_PREDICATE && P <= LAST_FCMP_PREDICATE;
+ assert(FIRST_FCMP_PREDICATE == 0 &&
+ "FIRST_FCMP_PREDICATE is required to be 0");
+ return P <= LAST_FCMP_PREDICATE;
}
static bool isIntPredicate(Predicate P) {
@@ -1066,7 +1061,7 @@ public:
: Tag(std::move(Tag)), Inputs(Inputs) {}
explicit OperandBundleDefT(const OperandBundleUse &OBU) {
- Tag = OBU.getTagName();
+ Tag = std::string(OBU.getTagName());
Inputs.insert(Inputs.end(), OBU.Inputs.begin(), OBU.Inputs.end());
}
@@ -1104,6 +1099,15 @@ using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
/// as cheap as most other operations on the base class.
class CallBase : public Instruction {
protected:
+ // The first two bits are reserved by CallInst for fast retrieval,
+ using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
+ using CallingConvField =
+ Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
+ CallingConv::MaxID>;
+ static_assert(
+ Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
+ "Bitfields must be contiguous");
+
/// The last operand is the called operand.
static constexpr int CalledOperandOpEndIdx = -1;
@@ -1137,6 +1141,15 @@ protected:
public:
using Instruction::getContext;
+ /// Create a clone of \p CB with a different set of operand bundles and
+ /// insert it before \p InsertPt.
+ ///
+ /// The returned call instruction is identical \p CB in every way except that
+ /// the operand bundles for the new instruction are set to the operand bundles
+ /// in \p Bundles.
+ static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
+ Instruction *InsertPt = nullptr);
+
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call ||
I->getOpcode() == Instruction::Invoke ||
@@ -1293,10 +1306,6 @@ public:
Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
- // DEPRECATED: This routine will be removed in favor of `getCalledOperand` in
- // the near future.
- Value *getCalledValue() const { return getCalledOperand(); }
-
const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
@@ -1360,14 +1369,11 @@ public:
}
CallingConv::ID getCallingConv() const {
- return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
+ return getSubclassData<CallingConvField>();
}
void setCallingConv(CallingConv::ID CC) {
- auto ID = static_cast<unsigned>(CC);
- assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
- setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
- (ID << 2));
+ setSubclassData<CallingConvField>(CC);
}
/// Check if this call is an inline asm statement.
@@ -1552,10 +1558,12 @@ public:
return paramHasAttr(ArgNo, Attribute::InAlloca);
}
- /// Determine whether this argument is passed by value or in an alloca.
- bool isByValOrInAllocaArgument(unsigned ArgNo) const {
+ /// Determine whether this argument is passed by value, in an alloca, or is
+ /// preallocated.
+ bool isPassPointeeByValueArgument(unsigned ArgNo) const {
return paramHasAttr(ArgNo, Attribute::ByVal) ||
- paramHasAttr(ArgNo, Attribute::InAlloca);
+ paramHasAttr(ArgNo, Attribute::InAlloca) ||
+ paramHasAttr(ArgNo, Attribute::Preallocated);
}
/// Determine if there are is an inalloca argument. Only the last argument can
@@ -1584,10 +1592,8 @@ public:
dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
}
- /// Extract the alignment of the return value.
- /// FIXME: Remove this function once transition to Align is over.
- /// Use getRetAlign() instead.
- unsigned getRetAlignment() const {
+ LLVM_ATTRIBUTE_DEPRECATED(unsigned getRetAlignment() const,
+ "Use getRetAlign() instead") {
if (const auto MA = Attrs.getRetAlignment())
return MA->value();
return 0;
@@ -1597,9 +1603,8 @@ public:
MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
/// Extract the alignment for a call or parameter (0=unknown).
- /// FIXME: Remove this function once transition to Align is over.
- /// Use getParamAlign() instead.
- unsigned getParamAlignment(unsigned ArgNo) const {
+ LLVM_ATTRIBUTE_DEPRECATED(unsigned getParamAlignment(unsigned ArgNo) const,
+ "Use getParamAlign() instead") {
if (const auto MA = Attrs.getParamAlignment(ArgNo))
return MA->value();
return 0;
@@ -1616,6 +1621,12 @@ public:
return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
}
+ /// Extract the preallocated type for a call or parameter.
+ Type *getParamPreallocatedType(unsigned ArgNo) const {
+ Type *Ty = Attrs.getParamPreallocatedType(ArgNo);
+ return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
+ }
+
/// Extract the number of dereferenceable bytes for a call or
/// parameter (0=unknown).
uint64_t getDereferenceableBytes(unsigned i) const {
@@ -1727,6 +1738,12 @@ public:
addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
}
+ /// Determine if the call cannot be tail merged.
+ bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
+ void setCannotMerge() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::NoMerge);
+ }
+
/// Determine if the invoke is convergent
bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
void setConvergent() {
@@ -1876,10 +1893,7 @@ public:
/// OperandBundleUser to a vector of OperandBundleDefs. Note:
/// OperandBundeUses and OperandBundleDefs are non-trivially *different*
/// representations of operand bundles (see documentation above).
- void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
- for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
- Defs.emplace_back(getOperandBundleAt(i));
- }
+ void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
/// Return the operand bundle for the operand at index OpIdx.
///
@@ -2107,16 +2121,14 @@ public:
op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
const unsigned BeginIndex);
+public:
/// Return the BundleOpInfo for the operand at index OpIdx.
///
/// It is an error to call this with an OpIdx that does not correspond to an
/// bundle operand.
+ BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
- for (auto &BOI : bundle_op_infos())
- if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
- return BOI;
-
- llvm_unreachable("Did not find operand bundle for operand!");
+ return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
}
protected:
@@ -2136,7 +2148,7 @@ private:
bool hasFnAttrOnCalledFunction(StringRef Kind) const;
template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
- if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
+ if (Attrs.hasFnAttribute(Kind))
return true;
// Operand bundles override attributes on the called function, but don't
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index 3bfa0e4afc39..a03eac0ad40d 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -15,6 +15,7 @@
#define LLVM_IR_INSTRUCTION_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Bitfields.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ilist_node.h"
@@ -22,6 +23,7 @@
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
#include <cassert>
@@ -45,11 +47,37 @@ class Instruction : public User,
BasicBlock *Parent;
DebugLoc DbgLoc; // 'dbg' Metadata cache.
- enum {
- /// This is a bit stored in the SubClassData field which indicates whether
- /// this instruction has metadata attached to it or not.
- HasMetadataBit = 1 << 15
- };
+ /// Relative order of this instruction in its parent basic block. Used for
+ /// O(1) local dominance checks between instructions.
+ mutable unsigned Order = 0;
+
+protected:
+ // The 15 first bits of `Value::SubclassData` are available for subclasses of
+ // `Instruction` to use.
+ using OpaqueField = Bitfield::Element<uint16_t, 0, 15>;
+
+ // Template alias so that all Instruction storing alignment use the same
+ // definiton.
+ // Valid alignments are powers of two from 2^0 to 2^MaxAlignmentExponent =
+ // 2^29. We store them as Log2(Alignment), so we need 5 bits to encode the 30
+ // possible values.
+ template <unsigned Offset>
+ using AlignmentBitfieldElementT =
+ typename Bitfield::Element<unsigned, Offset, 5,
+ Value::MaxAlignmentExponent>;
+
+ template <unsigned Offset>
+ using BoolBitfieldElementT = typename Bitfield::Element<bool, Offset, 1>;
+
+ template <unsigned Offset>
+ using AtomicOrderingBitfieldElementT =
+ typename Bitfield::Element<AtomicOrdering, Offset, 3,
+ AtomicOrdering::LAST>;
+
+private:
+ // The last bit is used to store whether the instruction has metadata attached
+ // or not.
+ using HasMetadataField = Bitfield::Element<bool, 15, 1>;
protected:
~Instruction(); // Use deleteValue() to delete a generic Instruction.
@@ -117,6 +145,13 @@ public:
/// the basic block that MovePos lives in, right after MovePos.
void moveAfter(Instruction *MovePos);
+ /// Given an instruction Other in the same basic block as this instruction,
+ /// return true if this instruction comes before Other. In this worst case,
+ /// this takes linear time in the number of instructions in the block. The
+ /// results are cached, so in common cases when the block remains unmodified,
+ /// it takes constant time.
+ bool comesBefore(const Instruction *Other) const;
+
//===--------------------------------------------------------------------===//
// Subclass classification.
//===--------------------------------------------------------------------===//
@@ -321,9 +356,6 @@ public:
/// Returns false if no metadata was found.
bool extractProfTotalWeight(uint64_t &TotalVal) const;
- /// Sets the branch_weights metadata to \p W for CallInst.
- void setProfWeight(uint64_t W);
-
/// Set the debug location information for this instruction.
void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }
@@ -385,6 +417,11 @@ public:
/// this flag.
void setHasAllowReciprocal(bool B);
+ /// Set or clear the allow-contract flag on this instruction, which must be
+ /// an operator which supports this flag. See LangRef.html for the meaning of
+ /// this flag.
+ void setHasAllowContract(bool B);
+
/// Set or clear the approximate-math-functions flag on this instruction,
/// which must be an operator which supports this flag. See LangRef.html for
/// the meaning of this flag.
@@ -458,7 +495,7 @@ public:
private:
/// Return true if we have an entry in the on-the-side metadata hash.
bool hasMetadataHashEntry() const {
- return (getSubclassDataFromValue() & HasMetadataBit) != 0;
+ return Bitfield::test<HasMetadataField>(getSubclassDataFromValue());
}
// These are all implemented in Metadata.cpp.
@@ -738,6 +775,7 @@ public:
private:
friend class SymbolTableListTraits<Instruction>;
+ friend class BasicBlock; // For renumbering.
// Shadow Value::setValueSubclassData with a private forwarding method so that
// subclasses cannot accidentally use it.
@@ -749,10 +787,7 @@ private:
return Value::getSubclassDataFromValue();
}
- void setHasMetadataHashEntry(bool V) {
- setValueSubclassData((getSubclassDataFromValue() & ~HasMetadataBit) |
- (V ? HasMetadataBit : 0));
- }
+ void setHasMetadataHashEntry(bool V) { setSubclassData<HasMetadataField>(V); }
void setParent(BasicBlock *P);
@@ -760,14 +795,24 @@ protected:
// Instruction subclasses can stick up to 15 bits of stuff into the
// SubclassData field of instruction with these members.
- // Verify that only the low 15 bits are used.
- void setInstructionSubclassData(unsigned short D) {
- assert((D & HasMetadataBit) == 0 && "Out of range value put into field");
- setValueSubclassData((getSubclassDataFromValue() & HasMetadataBit) | D);
- }
-
- unsigned getSubclassDataFromInstruction() const {
- return getSubclassDataFromValue() & ~HasMetadataBit;
+ template <typename BitfieldElement>
+ typename BitfieldElement::Type getSubclassData() const {
+ static_assert(
+ std::is_same<BitfieldElement, HasMetadataField>::value ||
+ !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
+ "Must not overlap with the metadata bit");
+ return Bitfield::get<BitfieldElement>(getSubclassDataFromValue());
+ }
+
+ template <typename BitfieldElement>
+ void setSubclassData(typename BitfieldElement::Type Value) {
+ static_assert(
+ std::is_same<BitfieldElement, HasMetadataField>::value ||
+ !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
+ "Must not overlap with the metadata bit");
+ auto Storage = getSubclassDataFromValue();
+ Bitfield::set<BitfieldElement>(Storage, Value);
+ setValueSubclassData(Storage);
}
Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index b73d5274238c..0afc585dfbe5 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -16,6 +16,7 @@
#define LLVM_IR_INSTRUCTIONS_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Bitfields.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
@@ -59,6 +60,13 @@ class LLVMContext;
class AllocaInst : public UnaryInstruction {
Type *AllocatedType;
+ using AlignmentField = AlignmentBitfieldElementT<0>;
+ using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
+ using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
+ static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
+ SwiftErrorField>(),
+ "Bitfields must be contiguous");
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@@ -66,21 +74,19 @@ protected:
AllocaInst *cloneImpl() const;
public:
- explicit AllocaInst(Type *Ty, unsigned AddrSpace,
- Value *ArraySize = nullptr,
- const Twine &Name = "",
- Instruction *InsertBefore = nullptr);
+ explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ const Twine &Name, Instruction *InsertBefore);
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
const Twine &Name, BasicBlock *InsertAtEnd);
- AllocaInst(Type *Ty, unsigned AddrSpace,
- const Twine &Name, Instruction *InsertBefore = nullptr);
+ AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
+ Instruction *InsertBefore);
AllocaInst(Type *Ty, unsigned AddrSpace,
const Twine &Name, BasicBlock *InsertAtEnd);
- AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, MaybeAlign Align,
+ AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
const Twine &Name = "", Instruction *InsertBefore = nullptr);
- AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, MaybeAlign Align,
+ AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
const Twine &Name, BasicBlock *InsertAtEnd);
/// Return true if there is an allocation size parameter to the allocation
@@ -109,12 +115,16 @@ public:
/// Return the alignment of the memory that is being allocated by the
/// instruction.
- unsigned getAlignment() const {
- if (const auto MA = decodeMaybeAlign(getSubclassDataFromInstruction() & 31))
- return MA->value();
- return 0;
+ Align getAlign() const {
+ return Align(1ULL << getSubclassData<AlignmentField>());
}
- void setAlignment(MaybeAlign Align);
+
+ void setAlignment(Align Align) {
+ setSubclassData<AlignmentField>(Log2(Align));
+ }
+
+ // FIXME: Remove this one transition to Align is over.
+ unsigned getAlignment() const { return getAlign().value(); }
/// Return true if this alloca is in the entry block of the function and is a
/// constant size. If so, the code generator will fold it into the
@@ -124,25 +134,18 @@ public:
/// Return true if this alloca is used as an inalloca argument to a call. Such
/// allocas are never considered static even if they are in the entry block.
bool isUsedWithInAlloca() const {
- return getSubclassDataFromInstruction() & 32;
+ return getSubclassData<UsedWithInAllocaField>();
}
/// Specify whether this alloca is used to represent the arguments to a call.
void setUsedWithInAlloca(bool V) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
- (V ? 32 : 0));
+ setSubclassData<UsedWithInAllocaField>(V);
}
/// Return true if this alloca is used as a swifterror argument to a call.
- bool isSwiftError() const {
- return getSubclassDataFromInstruction() & 64;
- }
-
+ bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
/// Specify whether this alloca is used to represent a swifterror.
- void setSwiftError(bool V) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
- (V ? 64 : 0));
- }
+ void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
@@ -155,8 +158,9 @@ public:
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
};
@@ -167,6 +171,13 @@ private:
/// An instruction for reading from memory. This uses the SubclassData field in
/// Value to store whether or not the load is volatile.
class LoadInst : public UnaryInstruction {
+ using VolatileField = BoolBitfieldElementT<0>;
+ using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
+ using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
+ static_assert(
+ Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
+ "Bitfields must be contiguous");
+
void AssertOK();
protected:
@@ -176,94 +187,53 @@ protected:
LoadInst *cloneImpl() const;
public:
- LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "",
- Instruction *InsertBefore = nullptr);
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
+ Instruction *InsertBefore);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- Instruction *InsertBefore = nullptr);
+ Instruction *InsertBefore);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- MaybeAlign Align, Instruction *InsertBefore = nullptr);
+ Align Align, Instruction *InsertBefore = nullptr);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- MaybeAlign Align, BasicBlock *InsertAtEnd);
+ Align Align, BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- MaybeAlign Align, AtomicOrdering Order,
+ Align Align, AtomicOrdering Order,
SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID,
+ Align Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
- // Deprecated [opaque pointer types]
- explicit LoadInst(Value *Ptr, const Twine &NameStr = "",
- Instruction *InsertBefore = nullptr)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- InsertBefore) {}
- LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- InsertAtEnd) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
- Instruction *InsertBefore = nullptr)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- isVolatile, InsertBefore) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
- BasicBlock *InsertAtEnd)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- isVolatile, InsertAtEnd) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
- Instruction *InsertBefore = nullptr)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- isVolatile, Align, InsertBefore) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
- BasicBlock *InsertAtEnd)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- isVolatile, Align, InsertAtEnd) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
- AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
- Instruction *InsertBefore = nullptr)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- isVolatile, Align, Order, SSID, InsertBefore) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
- AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd)
- : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
- isVolatile, Align, Order, SSID, InsertAtEnd) {}
-
/// Return true if this is a load from a volatile memory location.
- bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
+ bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile load or not.
- void setVolatile(bool V) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- (V ? 1 : 0));
- }
+ void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Return the alignment of the access that is being performed.
/// FIXME: Remove this function once transition to Align is over.
/// Use getAlign() instead.
- unsigned getAlignment() const {
- if (const auto MA = getAlign())
- return MA->value();
- return 0;
- }
+ unsigned getAlignment() const { return getAlign().value(); }
/// Return the alignment of the access that is being performed.
- MaybeAlign getAlign() const {
- return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31);
+ Align getAlign() const {
+ return Align(1ULL << (getSubclassData<AlignmentField>()));
}
- void setAlignment(MaybeAlign Alignment);
+ void setAlignment(Align Align) {
+ setSubclassData<AlignmentField>(Log2(Align));
+ }
/// Returns the ordering constraint of this load instruction.
AtomicOrdering getOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
+ return getSubclassData<OrderingField>();
}
-
/// Sets the ordering constraint of this load instruction. May not be Release
/// or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
- ((unsigned)Ordering << 7));
+ setSubclassData<OrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this load instruction.
@@ -313,8 +283,9 @@ public:
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this load instruction. Not quite enough
@@ -329,6 +300,13 @@ private:
/// An instruction for storing to memory.
class StoreInst : public Instruction {
+ using VolatileField = BoolBitfieldElementT<0>;
+ using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
+ using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
+ static_assert(
+ Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
+ "Bitfields must be contiguous");
+
void AssertOK();
protected:
@@ -340,17 +318,16 @@ protected:
public:
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
- Instruction *InsertBefore = nullptr);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
Instruction *InsertBefore = nullptr);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
BasicBlock *InsertAtEnd);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
@@ -359,13 +336,10 @@ public:
}
/// Return true if this is a store to a volatile memory location.
- bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
+ bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile store or not.
- void setVolatile(bool V) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- (V ? 1 : 0));
- }
+ void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -373,28 +347,25 @@ public:
/// Return the alignment of the access that is being performed
/// FIXME: Remove this function once transition to Align is over.
/// Use getAlign() instead.
- unsigned getAlignment() const {
- if (const auto MA = getAlign())
- return MA->value();
- return 0;
- }
+ unsigned getAlignment() const { return getAlign().value(); }
- MaybeAlign getAlign() const {
- return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31);
+ Align getAlign() const {
+ return Align(1ULL << (getSubclassData<AlignmentField>()));
}
- void setAlignment(MaybeAlign Alignment);
+ void setAlignment(Align Align) {
+ setSubclassData<AlignmentField>(Log2(Align));
+ }
/// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
+ return getSubclassData<OrderingField>();
}
/// Sets the ordering constraint of this store instruction. May not be
/// Acquire or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
- ((unsigned)Ordering << 7));
+ setSubclassData<OrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this store instruction.
@@ -447,8 +418,9 @@ public:
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this store instruction. Not quite enough
@@ -469,6 +441,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
/// An instruction for ordering other memory operations.
class FenceInst : public Instruction {
+ using OrderingField = AtomicOrderingBitfieldElementT<0>;
+
void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
protected:
@@ -493,14 +467,13 @@ public:
/// Returns the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const {
- return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
+ return getSubclassData<OrderingField>();
}
/// Sets the ordering constraint of this fence instruction. May only be
/// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
void setOrdering(AtomicOrdering Ordering) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
- ((unsigned)Ordering << 1));
+ setSubclassData<OrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this fence instruction.
@@ -524,8 +497,9 @@ public:
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this fence instruction. Not quite enough
@@ -545,10 +519,15 @@ private:
/// failure (false) as second element.
///
class AtomicCmpXchgInst : public Instruction {
- void Init(Value *Ptr, Value *Cmp, Value *NewVal,
+ void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
SyncScope::ID SSID);
+ template <unsigned Offset>
+ using AtomicOrderingBitfieldElement =
+ typename Bitfield::Element<AtomicOrdering, Offset, 3,
+ AtomicOrdering::LAST>;
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@@ -556,71 +535,82 @@ protected:
AtomicCmpXchgInst *cloneImpl() const;
public:
- AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
- AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID,
+ Instruction *InsertBefore = nullptr);
+ AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SyncScope::ID SSID, BasicBlock *InsertAtEnd);
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID,
+ BasicBlock *InsertAtEnd);
// allocate space for exactly three operands
void *operator new(size_t s) {
return User::operator new(s, 3);
}
+ using VolatileField = BoolBitfieldElementT<0>;
+ using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
+ using SuccessOrderingField =
+ AtomicOrderingBitfieldElementT<WeakField::NextBit>;
+ using FailureOrderingField =
+ AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
+ using AlignmentField =
+ AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
+ static_assert(
+ Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
+ FailureOrderingField, AlignmentField>(),
+ "Bitfields must be contiguous");
+
+ /// Return the alignment of the memory that is being allocated by the
+ /// instruction.
+ Align getAlign() const {
+ return Align(1ULL << getSubclassData<AlignmentField>());
+ }
+
+ void setAlignment(Align Align) {
+ setSubclassData<AlignmentField>(Log2(Align));
+ }
+
/// Return true if this is a cmpxchg from a volatile memory
/// location.
///
- bool isVolatile() const {
- return getSubclassDataFromInstruction() & 1;
- }
+ bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile cmpxchg.
///
- void setVolatile(bool V) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- (unsigned)V);
- }
+ void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Return true if this cmpxchg may spuriously fail.
- bool isWeak() const {
- return getSubclassDataFromInstruction() & 0x100;
- }
+ bool isWeak() const { return getSubclassData<WeakField>(); }
- void setWeak(bool IsWeak) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
- (IsWeak << 8));
- }
+ void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Returns the success ordering constraint of this cmpxchg instruction.
AtomicOrdering getSuccessOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ return getSubclassData<SuccessOrderingField>();
}
/// Sets the success ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
- ((unsigned)Ordering << 2));
+ setSubclassData<SuccessOrderingField>(Ordering);
}
/// Returns the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
+ return getSubclassData<FailureOrderingField>();
}
/// Sets the failure ordering constraint of this cmpxchg instruction.
void setFailureOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
- ((unsigned)Ordering << 5));
+ setSubclassData<FailureOrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this cmpxchg instruction.
@@ -682,8 +672,9 @@ public:
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this cmpxchg instruction. Not quite
@@ -719,7 +710,7 @@ public:
/// the descriptions, 'p' is the pointer to the instruction's memory location,
/// 'old' is the initial value of *p, and 'v' is the other value passed to the
/// instruction. These instructions always return 'old'.
- enum BinOp {
+ enum BinOp : unsigned {
/// *p = v
Xchg,
/// *p = old + v
@@ -754,10 +745,21 @@ public:
BAD_BINOP
};
- AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+private:
+ template <unsigned Offset>
+ using AtomicOrderingBitfieldElement =
+ typename Bitfield::Element<AtomicOrdering, Offset, 3,
+ AtomicOrdering::LAST>;
+
+ template <unsigned Offset>
+ using BinOpBitfieldElement =
+ typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
+
+public:
+ AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
AtomicOrdering Ordering, SyncScope::ID SSID,
Instruction *InsertBefore = nullptr);
- AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
@@ -766,9 +768,16 @@ public:
return User::operator new(s, 2);
}
- BinOp getOperation() const {
- return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
- }
+ using VolatileField = BoolBitfieldElementT<0>;
+ using AtomicOrderingField =
+ AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
+ using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
+ using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
+ static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
+ OperationField, AlignmentField>(),
+ "Bitfields must be contiguous");
+
+ BinOp getOperation() const { return getSubclassData<OperationField>(); }
static StringRef getOperationName(BinOp Op);
@@ -783,38 +792,40 @@ public:
}
void setOperation(BinOp Operation) {
- unsigned short SubclassData = getSubclassDataFromInstruction();
- setInstructionSubclassData((SubclassData & 31) |
- (Operation << 5));
+ setSubclassData<OperationField>(Operation);
+ }
+
+ /// Return the alignment of the memory that is being allocated by the
+ /// instruction.
+ Align getAlign() const {
+ return Align(1ULL << getSubclassData<AlignmentField>());
+ }
+
+ void setAlignment(Align Align) {
+ setSubclassData<AlignmentField>(Log2(Align));
}
/// Return true if this is a RMW on a volatile memory location.
///
- bool isVolatile() const {
- return getSubclassDataFromInstruction() & 1;
- }
+ bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile RMW or not.
///
- void setVolatile(bool V) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- (unsigned)V);
- }
+ void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Returns the ordering constraint of this rmw instruction.
AtomicOrdering getOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ return getSubclassData<AtomicOrderingField>();
}
/// Sets the ordering constraint of this rmw instruction.
void setOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"atomicrmw instructions can only be atomic.");
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
- ((unsigned)Ordering << 2));
+ setSubclassData<AtomicOrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this rmw instruction.
@@ -852,13 +863,14 @@ public:
}
private:
- void Init(BinOp Operation, Value *Ptr, Value *Val,
+ void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
AtomicOrdering Ordering, SyncScope::ID SSID);
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this rmw instruction. Not quite enough
@@ -1004,16 +1016,23 @@ public:
return getPointerAddressSpace();
}
- /// Returns the type of the element that would be loaded with
- /// a load instruction with the specified parameters.
+ /// Returns the result type of a getelementptr with the given source
+ /// element type and indexes.
///
/// Null is returned if the indices are invalid for the specified
- /// pointer type.
- ///
+ /// source element type.
static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
+ /// Return the type of the element at the given index of an indexable
+ /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
+ ///
+ /// Returns null if the type can't be indexed, or the given index is not
+ /// legal for the given type.
+ static Type *getTypeAtIndex(Type *Ty, Value *Idx);
+ static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
+
inline op_iterator idx_begin() { return op_begin()+1; }
inline const_op_iterator idx_begin() const { return op_begin()+1; }
inline op_iterator idx_end() { return op_end(); }
@@ -1055,14 +1074,14 @@ public:
Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
Ptr->getType()->getPointerAddressSpace());
// Vector GEP
- if (Ptr->getType()->isVectorTy()) {
- unsigned NumElem = Ptr->getType()->getVectorNumElements();
- return VectorType::get(PtrTy, NumElem);
+ if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
+ ElementCount EltCount = PtrVTy->getElementCount();
+ return VectorType::get(PtrTy, EltCount);
}
for (Value *Index : IdxList)
- if (Index->getType()->isVectorTy()) {
- unsigned NumElem = Index->getType()->getVectorNumElements();
- return VectorType::get(PtrTy, NumElem);
+ if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
+ ElementCount EltCount = IndexVTy->getElementCount();
+ return VectorType::get(PtrTy, EltCount);
}
// Scalar GEP
return PtrTy;
@@ -1532,58 +1551,6 @@ public:
NameStr, InsertAtEnd);
}
- // Deprecated [opaque pointer types]
- static CallInst *Create(Value *Func, const Twine &NameStr = "",
- Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, NameStr, InsertBefore);
- }
-
- // Deprecated [opaque pointer types]
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
- const Twine &NameStr,
- Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, NameStr, InsertBefore);
- }
-
- // Deprecated [opaque pointer types]
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles = None,
- const Twine &NameStr = "",
- Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, Bundles, NameStr, InsertBefore);
- }
-
- // Deprecated [opaque pointer types]
- static CallInst *Create(Value *Func, const Twine &NameStr,
- BasicBlock *InsertAtEnd) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, NameStr, InsertAtEnd);
- }
-
- // Deprecated [opaque pointer types]
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
- const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, NameStr, InsertAtEnd);
- }
-
- // Deprecated [opaque pointer types]
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles,
- const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, Bundles, NameStr, InsertAtEnd);
- }
-
/// Create a clone of \p CI with a different set of operand bundles and
/// insert it before \p InsertPt.
///
@@ -1632,37 +1599,38 @@ public:
BasicBlock *InsertAtEnd);
// Note that 'musttail' implies 'tail'.
- enum TailCallKind {
+ enum TailCallKind : unsigned {
TCK_None = 0,
TCK_Tail = 1,
TCK_MustTail = 2,
- TCK_NoTail = 3
+ TCK_NoTail = 3,
+ TCK_LAST = TCK_NoTail
};
+
+ using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
+ static_assert(
+ Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
+ "Bitfields must be contiguous");
+
TailCallKind getTailCallKind() const {
- return TailCallKind(getSubclassDataFromInstruction() & 3);
+ return getSubclassData<TailCallKindField>();
}
bool isTailCall() const {
- unsigned Kind = getSubclassDataFromInstruction() & 3;
+ TailCallKind Kind = getTailCallKind();
return Kind == TCK_Tail || Kind == TCK_MustTail;
}
- bool isMustTailCall() const {
- return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
- }
+ bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
- bool isNoTailCall() const {
- return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
- }
+ bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
- void setTailCall(bool isTC = true) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
- unsigned(isTC ? TCK_Tail : TCK_None));
+ void setTailCallKind(TailCallKind TCK) {
+ setSubclassData<TailCallKindField>(TCK);
}
- void setTailCallKind(TailCallKind TCK) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
- unsigned(TCK));
+ void setTailCall(bool IsTc = true) {
+ setTailCallKind(IsTc ? TCK_Tail : TCK_None);
}
/// Return true if the call can return twice
@@ -1685,8 +1653,9 @@ public:
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
};
@@ -1977,10 +1946,22 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
// ShuffleVectorInst Class
//===----------------------------------------------------------------------===//
+constexpr int UndefMaskElem = -1;
+
/// This instruction constructs a fixed permutation of two
/// input vectors.
///
+/// For each element of the result vector, the shuffle mask selects an element
+/// from one of the input vectors to copy to the result. Non-negative elements
+/// in the mask represent an index into the concatenated pair of input vectors.
+/// UndefMaskElem (-1) specifies that the result element is undefined.
+///
+/// For scalable vectors, all the elements of the mask must be 0 or -1. This
+/// requirement may be relaxed in the future.
class ShuffleVectorInst : public Instruction {
+ SmallVector<int, 4> ShuffleMask;
+ Constant *ShuffleMaskForBitcode;
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@@ -1993,13 +1974,15 @@ public:
Instruction *InsertBefor = nullptr);
ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &NameStr, BasicBlock *InsertAtEnd);
+ ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
+ const Twine &NameStr = "",
+ Instruction *InsertBefor = nullptr);
+ ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
- // allocate space for exactly three operands
- void *operator new(size_t s) {
- return User::operator new(s, 3);
- }
+ void *operator new(size_t s) { return User::operator new(s, 2); }
- /// Swap the first 2 operands and adjust the mask to preserve the semantics
+ /// Swap the operands and adjust the mask to preserve the semantics
/// of the instruction.
void commute();
@@ -2007,6 +1990,8 @@ public:
/// formed with the specified operands.
static bool isValidOperands(const Value *V1, const Value *V2,
const Value *Mask);
+ static bool isValidOperands(const Value *V1, const Value *V2,
+ ArrayRef<int> Mask);
/// Overload to return most specific vector type.
///
@@ -2017,44 +2002,42 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- Constant *getMask() const {
- return cast<Constant>(getOperand(2));
- }
-
- /// Return the shuffle mask value for the specified element of the mask.
- /// Return -1 if the element is undef.
- static int getMaskValue(const Constant *Mask, unsigned Elt);
-
/// Return the shuffle mask value of this instruction for the given element
- /// index. Return -1 if the element is undef.
- int getMaskValue(unsigned Elt) const {
- return getMaskValue(getMask(), Elt);
- }
+ /// index. Return UndefMaskElem if the element is undef.
+ int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
/// Convert the input shuffle mask operand to a vector of integers. Undefined
- /// elements of the mask are returned as -1.
+ /// elements of the mask are returned as UndefMaskElem.
static void getShuffleMask(const Constant *Mask,
SmallVectorImpl<int> &Result);
/// Return the mask for this instruction as a vector of integers. Undefined
- /// elements of the mask are returned as -1.
+ /// elements of the mask are returned as UndefMaskElem.
void getShuffleMask(SmallVectorImpl<int> &Result) const {
- return getShuffleMask(getMask(), Result);
+ Result.assign(ShuffleMask.begin(), ShuffleMask.end());
}
- SmallVector<int, 16> getShuffleMask() const {
- SmallVector<int, 16> Mask;
- getShuffleMask(Mask);
- return Mask;
- }
+ /// Return the mask for this instruction, for use in bitcode.
+ ///
+ /// TODO: This is temporary until we decide a new bitcode encoding for
+ /// shufflevector.
+ Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
+
+ static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
+ Type *ResultTy);
+
+ void setShuffleMask(ArrayRef<int> Mask);
+
+ ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
/// Return true if this shuffle returns a vector with a different number of
/// elements than its source vectors.
/// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
/// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
bool changesLength() const {
- unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
- unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
+ unsigned NumSourceElts =
+ cast<VectorType>(Op<0>()->getType())->getElementCount().Min;
+ unsigned NumMaskElts = ShuffleMask.size();
return NumSourceElts != NumMaskElts;
}
@@ -2062,8 +2045,9 @@ public:
/// elements than its source vectors.
/// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
bool increasesLength() const {
- unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
- unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
+ unsigned NumSourceElts =
+ cast<VectorType>(Op<0>()->getType())->getNumElements();
+ unsigned NumMaskElts = ShuffleMask.size();
return NumSourceElts < NumMaskElts;
}
@@ -2084,7 +2068,7 @@ public:
/// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
/// TODO: Optionally allow length-changing shuffles.
bool isSingleSource() const {
- return !changesLength() && isSingleSourceMask(getMask());
+ return !changesLength() && isSingleSourceMask(ShuffleMask);
}
/// Return true if this shuffle mask chooses elements from exactly one source
@@ -2105,7 +2089,7 @@ public:
/// from its input vectors.
/// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
bool isIdentity() const {
- return !changesLength() && isIdentityMask(getShuffleMask());
+ return !changesLength() && isIdentityMask(ShuffleMask);
}
/// Return true if this shuffle lengthens exactly one source vector with
@@ -2146,7 +2130,7 @@ public:
/// In that case, the shuffle is better classified as an identity shuffle.
/// TODO: Optionally allow length-changing shuffles.
bool isSelect() const {
- return !changesLength() && isSelectMask(getMask());
+ return !changesLength() && isSelectMask(ShuffleMask);
}
/// Return true if this shuffle mask swaps the order of elements from exactly
@@ -2166,7 +2150,7 @@ public:
/// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
/// TODO: Optionally allow length-changing shuffles.
bool isReverse() const {
- return !changesLength() && isReverseMask(getMask());
+ return !changesLength() && isReverseMask(ShuffleMask);
}
/// Return true if this shuffle mask chooses all elements with the same value
@@ -2188,7 +2172,7 @@ public:
/// TODO: Optionally allow length-changing shuffles.
/// TODO: Optionally allow splats from other elements.
bool isZeroEltSplat() const {
- return !changesLength() && isZeroEltSplatMask(getMask());
+ return !changesLength() && isZeroEltSplatMask(ShuffleMask);
}
/// Return true if this shuffle mask is a transpose mask.
@@ -2237,7 +2221,7 @@ public:
/// exact specification.
/// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
bool isTranspose() const {
- return !changesLength() && isTransposeMask(getMask());
+ return !changesLength() && isTransposeMask(ShuffleMask);
}
/// Return true if this shuffle mask is an extract subvector mask.
@@ -2255,8 +2239,8 @@ public:
/// Return true if this shuffle mask is an extract subvector mask.
bool isExtractSubvectorMask(int &Index) const {
- int NumSrcElts = Op<0>()->getType()->getVectorNumElements();
- return isExtractSubvectorMask(getMask(), NumSrcElts, Index);
+ int NumSrcElts = cast<VectorType>(Op<0>()->getType())->getNumElements();
+ return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
}
/// Change values in a shuffle permute mask assuming the two vector operands
@@ -2282,9 +2266,8 @@ public:
};
template <>
-struct OperandTraits<ShuffleVectorInst> :
- public FixedNumOperandTraits<ShuffleVectorInst, 3> {
-};
+struct OperandTraits<ShuffleVectorInst>
+ : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
@@ -2610,15 +2593,11 @@ public:
using const_block_iterator = BasicBlock * const *;
block_iterator block_begin() {
- Use::UserRef *ref =
- reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
- return reinterpret_cast<block_iterator>(ref + 1);
+ return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
}
const_block_iterator block_begin() const {
- const Use::UserRef *ref =
- reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
- return reinterpret_cast<const_block_iterator>(ref + 1);
+ return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
}
block_iterator block_end() {
@@ -2795,6 +2774,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
/// cleanup.
///
class LandingPadInst : public Instruction {
+ using CleanupField = BoolBitfieldElementT<0>;
+
/// The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
@@ -2839,13 +2820,10 @@ public:
/// Return 'true' if this landingpad instruction is a
/// cleanup. I.e., it should be run when unwinding even if its landing pad
/// doesn't catch the exception.
- bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
+ bool isCleanup() const { return getSubclassData<CleanupField>(); }
/// Indicate that this landingpad instruction is a cleanup.
- void setCleanup(bool V) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- (V ? 1 : 0));
- }
+ void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
/// Add a catch or filter clause to the landing pad.
void addClause(Constant *ClauseVal);
@@ -3781,49 +3759,6 @@ public:
IfException, Args, Bundles, NameStr, InsertAtEnd);
}
- // Deprecated [opaque pointer types]
- static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
- BasicBlock *IfException, ArrayRef<Value *> Args,
- const Twine &NameStr,
- Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, None, NameStr,
- InsertBefore);
- }
-
- // Deprecated [opaque pointer types]
- static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
- BasicBlock *IfException, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles = None,
- const Twine &NameStr = "",
- Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, Bundles, NameStr,
- InsertBefore);
- }
-
- // Deprecated [opaque pointer types]
- static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
- BasicBlock *IfException, ArrayRef<Value *> Args,
- const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, NameStr, InsertAtEnd);
- }
-
- // Deprecated [opaque pointer types]
- static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
- BasicBlock *IfException, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles,
- const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, Bundles, NameStr,
- InsertAtEnd);
- }
-
/// Create a clone of \p II with a different set of operand bundles and
/// insert it before \p InsertPt.
///
@@ -3833,15 +3768,6 @@ public:
static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
Instruction *InsertPt = nullptr);
- /// Determine if the call should not perform indirect branch tracking.
- bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
-
- /// Determine if the call cannot unwind.
- bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
- void setDoesNotThrow() {
- addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
- }
-
// get*Dest - Return the destination basic blocks...
BasicBlock *getNormalDest() const {
return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
@@ -3884,11 +3810,11 @@ public:
}
private:
-
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
};
@@ -4124,11 +4050,11 @@ public:
}
private:
-
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
};
@@ -4219,6 +4145,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
// CatchSwitchInst Class
//===----------------------------------------------------------------------===//
class CatchSwitchInst : public Instruction {
+ using UnwindDestField = BoolBitfieldElementT<0>;
+
/// The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
@@ -4280,7 +4208,7 @@ public:
void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
// Accessor Methods for CatchSwitch stmt
- bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
+ bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
bool unwindsToCaller() const { return !hasUnwindDest(); }
BasicBlock *getUnwindDest() const {
if (hasUnwindDest())
@@ -4566,6 +4494,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
//===----------------------------------------------------------------------===//
class CleanupReturnInst : public Instruction {
+ using UnwindDestField = BoolBitfieldElementT<0>;
+
private:
CleanupReturnInst(const CleanupReturnInst &RI);
CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
@@ -4606,7 +4536,7 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
+ bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
bool unwindsToCaller() const { return !hasUnwindDest(); }
/// Convenience accessor.
@@ -4650,8 +4580,9 @@ private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
- void setInstructionSubclassData(unsigned short D) {
- Instruction::setInstructionSubclassData(D);
+ template <typename Bitfield>
+ void setSubclassData(typename Bitfield::Type Value) {
+ Instruction::setSubclassData<Bitfield>(Value);
}
};
@@ -5283,12 +5214,12 @@ inline Value *getPointerOperand(Value *V) {
}
/// A helper function that returns the alignment of load or store instruction.
-inline MaybeAlign getLoadStoreAlignment(Value *I) {
+inline Align getLoadStoreAlignment(Value *I) {
assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction");
if (auto *LI = dyn_cast<LoadInst>(I))
- return MaybeAlign(LI->getAlignment());
- return MaybeAlign(cast<StoreInst>(I)->getAlignment());
+ return LI->getAlign();
+ return cast<StoreInst>(I)->getAlign();
}
/// A helper function that returns the address space of the pointer operand of
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 42a5564a4488..7a8898464e66 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -38,829 +38,904 @@
namespace llvm {
- /// A wrapper class for inspecting calls to intrinsic functions.
- /// This allows the standard isa/dyncast/cast functionality to work with calls
- /// to intrinsic functions.
- class IntrinsicInst : public CallInst {
- public:
- IntrinsicInst() = delete;
- IntrinsicInst(const IntrinsicInst &) = delete;
- IntrinsicInst &operator=(const IntrinsicInst &) = delete;
-
- /// Return the intrinsic ID of this intrinsic.
- Intrinsic::ID getIntrinsicID() const {
- return getCalledFunction()->getIntrinsicID();
- }
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const CallInst *I) {
- if (const Function *CF = I->getCalledFunction())
- return CF->isIntrinsic();
+/// A wrapper class for inspecting calls to intrinsic functions.
+/// This allows the standard isa/dyncast/cast functionality to work with calls
+/// to intrinsic functions.
+class IntrinsicInst : public CallInst {
+public:
+ IntrinsicInst() = delete;
+ IntrinsicInst(const IntrinsicInst &) = delete;
+ IntrinsicInst &operator=(const IntrinsicInst &) = delete;
+
+ /// Return the intrinsic ID of this intrinsic.
+ Intrinsic::ID getIntrinsicID() const {
+ return getCalledFunction()->getIntrinsicID();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const CallInst *I) {
+ if (const Function *CF = I->getCalledFunction())
+ return CF->isIntrinsic();
+ return false;
+ }
+ static bool classof(const Value *V) {
+ return isa<CallInst>(V) && classof(cast<CallInst>(V));
+ }
+};
+
+/// Check if \p ID corresponds to a debug info intrinsic.
+static inline bool isDbgInfoIntrinsic(Intrinsic::ID ID) {
+ switch (ID) {
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::dbg_addr:
+ case Intrinsic::dbg_label:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// This is the common base class for debug info intrinsics.
+class DbgInfoIntrinsic : public IntrinsicInst {
+public:
+ /// \name Casting methods
+ /// @{
+ static bool classof(const IntrinsicInst *I) {
+ return isDbgInfoIntrinsic(I->getIntrinsicID());
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ /// @}
+};
+
+/// This is the common base class for debug info intrinsics for variables.
+class DbgVariableIntrinsic : public DbgInfoIntrinsic {
+public:
+ /// Get the location corresponding to the variable referenced by the debug
+ /// info intrinsic. Depending on the intrinsic, this could be the
+ /// variable's value or its address.
+ Value *getVariableLocation(bool AllowNullOp = true) const;
+
+ /// Does this describe the address of a local variable. True for dbg.addr
+ /// and dbg.declare, but not dbg.value, which describes its value.
+ bool isAddressOfVariable() const {
+ return getIntrinsicID() != Intrinsic::dbg_value;
+ }
+
+ DILocalVariable *getVariable() const {
+ return cast<DILocalVariable>(getRawVariable());
+ }
+
+ DIExpression *getExpression() const {
+ return cast<DIExpression>(getRawExpression());
+ }
+
+ Metadata *getRawVariable() const {
+ return cast<MetadataAsValue>(getArgOperand(1))->getMetadata();
+ }
+
+ Metadata *getRawExpression() const {
+ return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
+ }
+
+ /// Get the size (in bits) of the variable, or fragment of the variable that
+ /// is described.
+ Optional<uint64_t> getFragmentSizeInBits() const;
+
+ /// \name Casting methods
+ /// @{
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::dbg_addr:
+ return true;
+ default:
return false;
}
- static bool classof(const Value *V) {
- return isa<CallInst>(V) && classof(cast<CallInst>(V));
- }
- };
-
- /// This is the common base class for debug info intrinsics.
- class DbgInfoIntrinsic : public IntrinsicInst {
- public:
- /// \name Casting methods
- /// @{
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::dbg_declare:
- case Intrinsic::dbg_value:
- case Intrinsic::dbg_addr:
- case Intrinsic::dbg_label:
- return true;
- default: return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- /// @}
- };
-
- /// This is the common base class for debug info intrinsics for variables.
- class DbgVariableIntrinsic : public DbgInfoIntrinsic {
- public:
- /// Get the location corresponding to the variable referenced by the debug
- /// info intrinsic. Depending on the intrinsic, this could be the
- /// variable's value or its address.
- Value *getVariableLocation(bool AllowNullOp = true) const;
-
- /// Does this describe the address of a local variable. True for dbg.addr
- /// and dbg.declare, but not dbg.value, which describes its value.
- bool isAddressOfVariable() const {
- return getIntrinsicID() != Intrinsic::dbg_value;
- }
-
- DILocalVariable *getVariable() const {
- return cast<DILocalVariable>(getRawVariable());
- }
-
- DIExpression *getExpression() const {
- return cast<DIExpression>(getRawExpression());
- }
-
- Metadata *getRawVariable() const {
- return cast<MetadataAsValue>(getArgOperand(1))->getMetadata();
- }
-
- Metadata *getRawExpression() const {
- return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
- }
-
- /// Get the size (in bits) of the variable, or fragment of the variable that
- /// is described.
- Optional<uint64_t> getFragmentSizeInBits() const;
-
- /// \name Casting methods
- /// @{
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::dbg_declare:
- case Intrinsic::dbg_value:
- case Intrinsic::dbg_addr:
- return true;
- default: return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- /// @}
- };
-
- /// This represents the llvm.dbg.declare instruction.
- class DbgDeclareInst : public DbgVariableIntrinsic {
- public:
- Value *getAddress() const { return getVariableLocation(); }
-
- /// \name Casting methods
- /// @{
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::dbg_declare;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- /// @}
- };
-
- /// This represents the llvm.dbg.addr instruction.
- class DbgAddrIntrinsic : public DbgVariableIntrinsic {
- public:
- Value *getAddress() const { return getVariableLocation(); }
-
- /// \name Casting methods
- /// @{
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::dbg_addr;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This represents the llvm.dbg.value instruction.
- class DbgValueInst : public DbgVariableIntrinsic {
- public:
- Value *getValue() const {
- return getVariableLocation(/* AllowNullOp = */ false);
- }
-
- /// \name Casting methods
- /// @{
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::dbg_value;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- /// @}
- };
-
- /// This represents the llvm.dbg.label instruction.
- class DbgLabelInst : public DbgInfoIntrinsic {
- public:
- DILabel *getLabel() const {
- return cast<DILabel>(getRawLabel());
- }
-
- Metadata *getRawLabel() const {
- return cast<MetadataAsValue>(getArgOperand(0))->getMetadata();
- }
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- /// @{
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::dbg_label;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- /// @}
- };
-
- /// This is the common base class for constrained floating point intrinsics.
- class ConstrainedFPIntrinsic : public IntrinsicInst {
- public:
- bool isUnaryOp() const;
- bool isTernaryOp() const;
- Optional<fp::RoundingMode> getRoundingMode() const;
- Optional<fp::ExceptionBehavior> getExceptionBehavior() const;
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I);
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// Constrained floating point compare intrinsics.
- class ConstrainedFPCmpIntrinsic : public ConstrainedFPIntrinsic {
- public:
- FCmpInst::Predicate getPredicate() const;
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::experimental_constrained_fcmp:
- case Intrinsic::experimental_constrained_fcmps:
- return true;
- default: return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class represents an intrinsic that is based on a binary operation.
- /// This includes op.with.overflow and saturating add/sub intrinsics.
- class BinaryOpIntrinsic : public IntrinsicInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::usub_with_overflow:
- case Intrinsic::ssub_with_overflow:
- case Intrinsic::umul_with_overflow:
- case Intrinsic::smul_with_overflow:
- case Intrinsic::uadd_sat:
- case Intrinsic::sadd_sat:
- case Intrinsic::usub_sat:
- case Intrinsic::ssub_sat:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-
- Value *getLHS() const { return const_cast<Value*>(getArgOperand(0)); }
- Value *getRHS() const { return const_cast<Value*>(getArgOperand(1)); }
-
- /// Returns the binary operation underlying the intrinsic.
- Instruction::BinaryOps getBinaryOp() const;
-
- /// Whether the intrinsic is signed or unsigned.
- bool isSigned() const;
-
- /// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
- unsigned getNoWrapKind() const;
- };
-
- /// Represents an op.with.overflow intrinsic.
- class WithOverflowInst : public BinaryOpIntrinsic {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::usub_with_overflow:
- case Intrinsic::ssub_with_overflow:
- case Intrinsic::umul_with_overflow:
- case Intrinsic::smul_with_overflow:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// Represents a saturating add/sub intrinsic.
- class SaturatingInst : public BinaryOpIntrinsic {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::uadd_sat:
- case Intrinsic::sadd_sat:
- case Intrinsic::usub_sat:
- case Intrinsic::ssub_sat:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// Common base class for all memory intrinsics. Simply provides
- /// common methods.
- /// Written as CRTP to avoid a common base class amongst the
- /// three atomicity hierarchies.
- template <typename Derived> class MemIntrinsicBase : public IntrinsicInst {
- private:
- enum { ARG_DEST = 0, ARG_LENGTH = 2 };
-
- public:
- Value *getRawDest() const {
- return const_cast<Value *>(getArgOperand(ARG_DEST));
- }
- const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
- Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
-
- Value *getLength() const {
- return const_cast<Value *>(getArgOperand(ARG_LENGTH));
- }
- const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
- Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
-
- /// This is just like getRawDest, but it strips off any cast
- /// instructions (including addrspacecast) that feed it, giving the
- /// original input. The returned value is guaranteed to be a pointer.
- Value *getDest() const { return getRawDest()->stripPointerCasts(); }
-
- unsigned getDestAddressSpace() const {
- return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ /// @}
+};
+
+/// This represents the llvm.dbg.declare instruction.
+class DbgDeclareInst : public DbgVariableIntrinsic {
+public:
+ Value *getAddress() const { return getVariableLocation(); }
+
+ /// \name Casting methods
+ /// @{
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::dbg_declare;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ /// @}
+};
+
+/// This represents the llvm.dbg.addr instruction.
+class DbgAddrIntrinsic : public DbgVariableIntrinsic {
+public:
+ Value *getAddress() const { return getVariableLocation(); }
+
+ /// \name Casting methods
+ /// @{
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::dbg_addr;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.dbg.value instruction.
+class DbgValueInst : public DbgVariableIntrinsic {
+public:
+ Value *getValue() const {
+ return getVariableLocation(/* AllowNullOp = */ false);
+ }
+
+ /// \name Casting methods
+ /// @{
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::dbg_value;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ /// @}
+};
+
+/// This represents the llvm.dbg.label instruction.
+class DbgLabelInst : public DbgInfoIntrinsic {
+public:
+ DILabel *getLabel() const { return cast<DILabel>(getRawLabel()); }
+
+ Metadata *getRawLabel() const {
+ return cast<MetadataAsValue>(getArgOperand(0))->getMetadata();
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// @{
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::dbg_label;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ /// @}
+};
+
+/// This is the common base class for vector predication intrinsics.
+class VPIntrinsic : public IntrinsicInst {
+public:
+ static Optional<int> GetMaskParamPos(Intrinsic::ID IntrinsicID);
+ static Optional<int> GetVectorLengthParamPos(Intrinsic::ID IntrinsicID);
+
+ /// The llvm.vp.* intrinsics for this instruction Opcode
+ static Intrinsic::ID GetForOpcode(unsigned OC);
+
+ // Whether \p ID is a VP intrinsic ID.
+ static bool IsVPIntrinsic(Intrinsic::ID);
+
+ /// \return the mask parameter or nullptr.
+ Value *getMaskParam() const;
+
+ /// \return the vector length parameter or nullptr.
+ Value *getVectorLengthParam() const;
+
+ /// \return whether the vector length param can be ignored.
+ bool canIgnoreVectorLengthParam() const;
+
+ /// \return the static element count (vector number of elements) the vector
+ /// length parameter applies to.
+ ElementCount getStaticVectorLength() const;
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return IsVPIntrinsic(I->getIntrinsicID());
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ // Equivalent non-predicated opcode
+ unsigned getFunctionalOpcode() const {
+ return GetFunctionalOpcodeForVP(getIntrinsicID());
+ }
+
+ // Equivalent non-predicated opcode
+ static unsigned GetFunctionalOpcodeForVP(Intrinsic::ID ID);
+};
+
+/// This is the common base class for constrained floating point intrinsics.
+class ConstrainedFPIntrinsic : public IntrinsicInst {
+public:
+ bool isUnaryOp() const;
+ bool isTernaryOp() const;
+ Optional<RoundingMode> getRoundingMode() const;
+ Optional<fp::ExceptionBehavior> getExceptionBehavior() const;
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I);
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// Constrained floating point compare intrinsics.
+class ConstrainedFPCmpIntrinsic : public ConstrainedFPIntrinsic {
+public:
+ FCmpInst::Predicate getPredicate() const;
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::experimental_constrained_fcmp:
+ case Intrinsic::experimental_constrained_fcmps:
+ return true;
+ default:
+ return false;
}
-
- /// FIXME: Remove this function once transition to Align is over.
- /// Use getDestAlign() instead.
- unsigned getDestAlignment() const { return getParamAlignment(ARG_DEST); }
- MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); }
-
- /// Set the specified arguments of the instruction.
- void setDest(Value *Ptr) {
- assert(getRawDest()->getType() == Ptr->getType() &&
- "setDest called with pointer of wrong type!");
- setArgOperand(ARG_DEST, Ptr);
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents an intrinsic that is based on a binary operation.
+/// This includes op.with.overflow and saturating add/sub intrinsics.
+class BinaryOpIntrinsic : public IntrinsicInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::uadd_sat:
+ case Intrinsic::sadd_sat:
+ case Intrinsic::usub_sat:
+ case Intrinsic::ssub_sat:
+ return true;
+ default:
+ return false;
}
-
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- void setDestAlignment(unsigned Alignment) {
- setDestAlignment(MaybeAlign(Alignment));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getLHS() const { return const_cast<Value *>(getArgOperand(0)); }
+ Value *getRHS() const { return const_cast<Value *>(getArgOperand(1)); }
+
+ /// Returns the binary operation underlying the intrinsic.
+ Instruction::BinaryOps getBinaryOp() const;
+
+ /// Whether the intrinsic is signed or unsigned.
+ bool isSigned() const;
+
+ /// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
+ unsigned getNoWrapKind() const;
+};
+
+/// Represents an op.with.overflow intrinsic.
+class WithOverflowInst : public BinaryOpIntrinsic {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ return true;
+ default:
+ return false;
}
- void setDestAlignment(MaybeAlign Alignment) {
- removeParamAttr(ARG_DEST, Attribute::Alignment);
- if (Alignment)
- addParamAttr(ARG_DEST,
- Attribute::getWithAlignment(getContext(), *Alignment));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// Represents a saturating add/sub intrinsic.
+class SaturatingInst : public BinaryOpIntrinsic {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::uadd_sat:
+ case Intrinsic::sadd_sat:
+ case Intrinsic::usub_sat:
+ case Intrinsic::ssub_sat:
+ return true;
+ default:
+ return false;
}
- void setDestAlignment(Align Alignment) {
- removeParamAttr(ARG_DEST, Attribute::Alignment);
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// Common base class for all memory intrinsics. Simply provides
+/// common methods.
+/// Written as CRTP to avoid a common base class amongst the
+/// three atomicity hierarchies.
+template <typename Derived> class MemIntrinsicBase : public IntrinsicInst {
+private:
+ enum { ARG_DEST = 0, ARG_LENGTH = 2 };
+
+public:
+ Value *getRawDest() const {
+ return const_cast<Value *>(getArgOperand(ARG_DEST));
+ }
+ const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
+ Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
+
+ Value *getLength() const {
+ return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+ }
+ const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
+ Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
+
+ /// This is just like getRawDest, but it strips off any cast
+ /// instructions (including addrspacecast) that feed it, giving the
+ /// original input. The returned value is guaranteed to be a pointer.
+ Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+ unsigned getDestAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+
+ /// FIXME: Remove this function once transition to Align is over.
+ /// Use getDestAlign() instead.
+ unsigned getDestAlignment() const {
+ if (auto MA = getParamAlign(ARG_DEST))
+ return MA->value();
+ return 0;
+ }
+ MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); }
+
+ /// Set the specified arguments of the instruction.
+ void setDest(Value *Ptr) {
+ assert(getRawDest()->getType() == Ptr->getType() &&
+ "setDest called with pointer of wrong type!");
+ setArgOperand(ARG_DEST, Ptr);
+ }
+
+ /// FIXME: Remove this function once transition to Align is over.
+ /// Use the version that takes MaybeAlign instead of this one.
+ void setDestAlignment(unsigned Alignment) {
+ setDestAlignment(MaybeAlign(Alignment));
+ }
+ void setDestAlignment(MaybeAlign Alignment) {
+ removeParamAttr(ARG_DEST, Attribute::Alignment);
+ if (Alignment)
addParamAttr(ARG_DEST,
- Attribute::getWithAlignment(getContext(), Alignment));
- }
-
- void setLength(Value *L) {
- assert(getLength()->getType() == L->getType() &&
- "setLength called with value of wrong type!");
- setArgOperand(ARG_LENGTH, L);
- }
- };
-
- /// Common base class for all memory transfer intrinsics. Simply provides
- /// common methods.
- template <class BaseCL> class MemTransferBase : public BaseCL {
- private:
- enum { ARG_SOURCE = 1 };
-
- public:
- /// Return the arguments to the instruction.
- Value *getRawSource() const {
- return const_cast<Value *>(BaseCL::getArgOperand(ARG_SOURCE));
- }
- const Use &getRawSourceUse() const {
- return BaseCL::getArgOperandUse(ARG_SOURCE);
- }
- Use &getRawSourceUse() { return BaseCL::getArgOperandUse(ARG_SOURCE); }
-
- /// This is just like getRawSource, but it strips off any cast
- /// instructions that feed it, giving the original input. The returned
- /// value is guaranteed to be a pointer.
- Value *getSource() const { return getRawSource()->stripPointerCasts(); }
-
- unsigned getSourceAddressSpace() const {
- return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
- }
-
- /// FIXME: Remove this function once transition to Align is over.
- /// Use getSourceAlign() instead.
- unsigned getSourceAlignment() const {
- return BaseCL::getParamAlignment(ARG_SOURCE);
- }
-
- MaybeAlign getSourceAlign() const {
- return BaseCL::getParamAlign(ARG_SOURCE);
- }
-
- void setSource(Value *Ptr) {
- assert(getRawSource()->getType() == Ptr->getType() &&
- "setSource called with pointer of wrong type!");
- BaseCL::setArgOperand(ARG_SOURCE, Ptr);
- }
-
- /// FIXME: Remove this function once transition to Align is over.
- /// Use the version that takes MaybeAlign instead of this one.
- void setSourceAlignment(unsigned Alignment) {
- setSourceAlignment(MaybeAlign(Alignment));
- }
- void setSourceAlignment(MaybeAlign Alignment) {
- BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
- if (Alignment)
- BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
- BaseCL::getContext(), *Alignment));
- }
- void setSourceAlignment(Align Alignment) {
- BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
+ Attribute::getWithAlignment(getContext(), *Alignment));
+ }
+ void setDestAlignment(Align Alignment) {
+ removeParamAttr(ARG_DEST, Attribute::Alignment);
+ addParamAttr(ARG_DEST,
+ Attribute::getWithAlignment(getContext(), Alignment));
+ }
+
+ void setLength(Value *L) {
+ assert(getLength()->getType() == L->getType() &&
+ "setLength called with value of wrong type!");
+ setArgOperand(ARG_LENGTH, L);
+ }
+};
+
+/// Common base class for all memory transfer intrinsics. Simply provides
+/// common methods.
+template <class BaseCL> class MemTransferBase : public BaseCL {
+private:
+ enum { ARG_SOURCE = 1 };
+
+public:
+ /// Return the arguments to the instruction.
+ Value *getRawSource() const {
+ return const_cast<Value *>(BaseCL::getArgOperand(ARG_SOURCE));
+ }
+ const Use &getRawSourceUse() const {
+ return BaseCL::getArgOperandUse(ARG_SOURCE);
+ }
+ Use &getRawSourceUse() { return BaseCL::getArgOperandUse(ARG_SOURCE); }
+
+ /// This is just like getRawSource, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+ unsigned getSourceAddressSpace() const {
+ return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+ }
+
+ /// FIXME: Remove this function once transition to Align is over.
+ /// Use getSourceAlign() instead.
+ unsigned getSourceAlignment() const {
+ if (auto MA = BaseCL::getParamAlign(ARG_SOURCE))
+ return MA->value();
+ return 0;
+ }
+
+ MaybeAlign getSourceAlign() const {
+ return BaseCL::getParamAlign(ARG_SOURCE);
+ }
+
+ void setSource(Value *Ptr) {
+ assert(getRawSource()->getType() == Ptr->getType() &&
+ "setSource called with pointer of wrong type!");
+ BaseCL::setArgOperand(ARG_SOURCE, Ptr);
+ }
+
+ /// FIXME: Remove this function once transition to Align is over.
+ /// Use the version that takes MaybeAlign instead of this one.
+ void setSourceAlignment(unsigned Alignment) {
+ setSourceAlignment(MaybeAlign(Alignment));
+ }
+ void setSourceAlignment(MaybeAlign Alignment) {
+ BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
+ if (Alignment)
BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
- BaseCL::getContext(), Alignment));
- }
- };
-
- /// Common base class for all memset intrinsics. Simply provides
- /// common methods.
- template <class BaseCL> class MemSetBase : public BaseCL {
- private:
- enum { ARG_VALUE = 1 };
-
- public:
- Value *getValue() const {
- return const_cast<Value *>(BaseCL::getArgOperand(ARG_VALUE));
- }
- const Use &getValueUse() const {
- return BaseCL::getArgOperandUse(ARG_VALUE);
- }
- Use &getValueUse() { return BaseCL::getArgOperandUse(ARG_VALUE); }
-
- void setValue(Value *Val) {
- assert(getValue()->getType() == Val->getType() &&
- "setValue called with value of wrong type!");
- BaseCL::setArgOperand(ARG_VALUE, Val);
- }
- };
-
- // The common base class for the atomic memset/memmove/memcpy intrinsics
- // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
- class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
- private:
- enum { ARG_ELEMENTSIZE = 3 };
-
- public:
- Value *getRawElementSizeInBytes() const {
- return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
- }
-
- ConstantInt *getElementSizeInBytesCst() const {
- return cast<ConstantInt>(getRawElementSizeInBytes());
- }
-
- uint32_t getElementSizeInBytes() const {
- return getElementSizeInBytesCst()->getZExtValue();
- }
-
- void setElementSizeInBytes(Constant *V) {
- assert(V->getType() == Type::getInt8Ty(getContext()) &&
- "setElementSizeInBytes called with value of wrong type!");
- setArgOperand(ARG_ELEMENTSIZE, V);
- }
-
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- case Intrinsic::memset_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class represents atomic memset intrinsic
- // i.e. llvm.element.unordered.atomic.memset
- class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- // This class wraps the atomic memcpy/memmove intrinsics
- // i.e. llvm.element.unordered.atomic.memcpy/memmove
- class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class represents the atomic memcpy intrinsic
- /// i.e. llvm.element.unordered.atomic.memcpy
- class AtomicMemCpyInst : public AtomicMemTransferInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class represents the atomic memmove intrinsic
- /// i.e. llvm.element.unordered.atomic.memmove
- class AtomicMemMoveInst : public AtomicMemTransferInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This is the common base class for memset/memcpy/memmove.
- class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
- private:
- enum { ARG_VOLATILE = 3 };
-
- public:
- ConstantInt *getVolatileCst() const {
- return cast<ConstantInt>(
- const_cast<Value *>(getArgOperand(ARG_VOLATILE)));
- }
-
- bool isVolatile() const {
- return !getVolatileCst()->isZero();
- }
-
- void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memmove:
- case Intrinsic::memset:
- return true;
- default: return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class wraps the llvm.memset intrinsic.
- class MemSetInst : public MemSetBase<MemIntrinsic> {
- public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class wraps the llvm.memcpy/memmove intrinsics.
- class MemTransferInst : public MemTransferBase<MemIntrinsic> {
- public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy ||
- I->getIntrinsicID() == Intrinsic::memmove;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class wraps the llvm.memcpy intrinsic.
- class MemCpyInst : public MemTransferInst {
- public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class wraps the llvm.memmove intrinsic.
- class MemMoveInst : public MemTransferInst {
- public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memmove;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- // The common base class for any memset/memmove/memcpy intrinsics;
- // whether they be atomic or non-atomic.
- // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
- // and llvm.memset/memcpy/memmove
- class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
- public:
- bool isVolatile() const {
- // Only the non-atomic intrinsics can be volatile
- if (auto *MI = dyn_cast<MemIntrinsic>(this))
- return MI->isVolatile();
+ BaseCL::getContext(), *Alignment));
+ }
+ void setSourceAlignment(Align Alignment) {
+ BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
+ BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
+ BaseCL::getContext(), Alignment));
+ }
+};
+
+/// Common base class for all memset intrinsics. Simply provides
+/// common methods.
+template <class BaseCL> class MemSetBase : public BaseCL {
+private:
+ enum { ARG_VALUE = 1 };
+
+public:
+ Value *getValue() const {
+ return const_cast<Value *>(BaseCL::getArgOperand(ARG_VALUE));
+ }
+ const Use &getValueUse() const { return BaseCL::getArgOperandUse(ARG_VALUE); }
+ Use &getValueUse() { return BaseCL::getArgOperandUse(ARG_VALUE); }
+
+ void setValue(Value *Val) {
+ assert(getValue()->getType() == Val->getType() &&
+ "setValue called with value of wrong type!");
+ BaseCL::setArgOperand(ARG_VALUE, Val);
+ }
+};
+
+// The common base class for the atomic memset/memmove/memcpy intrinsics
+// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
+class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
+private:
+ enum { ARG_ELEMENTSIZE = 3 };
+
+public:
+ Value *getRawElementSizeInBytes() const {
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ ConstantInt *getElementSizeInBytesCst() const {
+ return cast<ConstantInt>(getRawElementSizeInBytes());
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ return getElementSizeInBytesCst()->getZExtValue();
+ }
+
+ void setElementSizeInBytes(Constant *V) {
+ assert(V->getType() == Type::getInt8Ty(getContext()) &&
+ "setElementSizeInBytes called with value of wrong type!");
+ setArgOperand(ARG_ELEMENTSIZE, V);
+ }
+
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ case Intrinsic::memset_element_unordered_atomic:
+ return true;
+ default:
return false;
}
-
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memmove:
- case Intrinsic::memset:
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- case Intrinsic::memset_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class represents any memset intrinsic
- // i.e. llvm.element.unordered.atomic.memset
- // and llvm.memset
- class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memset:
- case Intrinsic::memset_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- // This class wraps any memcpy/memmove intrinsics
- // i.e. llvm.element.unordered.atomic.memcpy/memmove
- // and llvm.memcpy/memmove
- class AnyMemTransferInst : public MemTransferBase<AnyMemIntrinsic> {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memmove:
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class represents any memcpy intrinsic
- /// i.e. llvm.element.unordered.atomic.memcpy
- /// and llvm.memcpy
- class AnyMemCpyInst : public AnyMemTransferInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memcpy_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This class represents any memmove intrinsic
- /// i.e. llvm.element.unordered.atomic.memmove
- /// and llvm.memmove
- class AnyMemMoveInst : public AnyMemTransferInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memmove:
- case Intrinsic::memmove_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// This represents the llvm.va_start intrinsic.
- class VAStartInst : public IntrinsicInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::vastart;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-
- Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
- };
-
- /// This represents the llvm.va_end intrinsic.
- class VAEndInst : public IntrinsicInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::vaend;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-
- Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
- };
-
- /// This represents the llvm.va_copy intrinsic.
- class VACopyInst : public IntrinsicInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::vacopy;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-
- Value *getDest() const { return const_cast<Value*>(getArgOperand(0)); }
- Value *getSrc() const { return const_cast<Value*>(getArgOperand(1)); }
- };
-
- /// This represents the llvm.instrprof_increment intrinsic.
- class InstrProfIncrementInst : public IntrinsicInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::instrprof_increment;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-
- GlobalVariable *getName() const {
- return cast<GlobalVariable>(
- const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
- }
-
- ConstantInt *getHash() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
- }
-
- ConstantInt *getNumCounters() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
- }
-
- ConstantInt *getIndex() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
- }
-
- Value *getStep() const;
- };
-
- class InstrProfIncrementInstStep : public InstrProfIncrementInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::instrprof_increment_step;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents atomic memset intrinsic
+// i.e. llvm.element.unordered.atomic.memset
+class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+// This class wraps the atomic memcpy/memmove intrinsics
+// i.e. llvm.element.unordered.atomic.memcpy/memmove
+class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ return true;
+ default:
+ return false;
}
- };
-
- /// This represents the llvm.instrprof_value_profile intrinsic.
- class InstrProfValueProfileInst : public IntrinsicInst {
- public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::instrprof_value_profile;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents the atomic memcpy intrinsic
+/// i.e. llvm.element.unordered.atomic.memcpy
+class AtomicMemCpyInst : public AtomicMemTransferInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents the atomic memmove intrinsic
+/// i.e. llvm.element.unordered.atomic.memmove
+class AtomicMemMoveInst : public AtomicMemTransferInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This is the common base class for memset/memcpy/memmove.
+class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
+private:
+ enum { ARG_VOLATILE = 3 };
+
+public:
+ ConstantInt *getVolatileCst() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(ARG_VOLATILE)));
+ }
+
+ bool isVolatile() const { return !getVolatileCst()->isZero(); }
+
+ void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset:
+ case Intrinsic::memcpy_inline:
+ return true;
+ default:
+ return false;
}
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class wraps the llvm.memset intrinsic.
+class MemSetInst : public MemSetBase<MemIntrinsic> {
+public:
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memset;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class wraps the llvm.memcpy/memmove intrinsics.
+class MemTransferInst : public MemTransferBase<MemIntrinsic> {
+public:
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memcpy_inline:
+ return true;
+ default:
+ return false;
}
-
- GlobalVariable *getName() const {
- return cast<GlobalVariable>(
- const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class wraps the llvm.memcpy intrinsic.
+class MemCpyInst : public MemTransferInst {
+public:
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memcpy;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class wraps the llvm.memmove intrinsic.
+class MemMoveInst : public MemTransferInst {
+public:
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memmove;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class wraps the llvm.memcpy.inline intrinsic.
+class MemCpyInlineInst : public MemTransferInst {
+public:
+ ConstantInt *getLength() const {
+ return cast<ConstantInt>(MemTransferInst::getLength());
+ }
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memcpy_inline;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+// The common base class for any memset/memmove/memcpy intrinsics;
+// whether they be atomic or non-atomic.
+// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
+// and llvm.memset/memcpy/memmove
+class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
+public:
+ bool isVolatile() const {
+ // Only the non-atomic intrinsics can be volatile
+ if (auto *MI = dyn_cast<MemIntrinsic>(this))
+ return MI->isVolatile();
+ return false;
+ }
+
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memcpy_inline:
+ case Intrinsic::memmove:
+ case Intrinsic::memset:
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ case Intrinsic::memset_element_unordered_atomic:
+ return true;
+ default:
+ return false;
}
-
- ConstantInt *getHash() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents any memset intrinsic
+// i.e. llvm.element.unordered.atomic.memset
+// and llvm.memset
+class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memset:
+ case Intrinsic::memset_element_unordered_atomic:
+ return true;
+ default:
+ return false;
}
-
- Value *getTargetValue() const {
- return cast<Value>(const_cast<Value *>(getArgOperand(2)));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+// This class wraps any memcpy/memmove intrinsics
+// i.e. llvm.element.unordered.atomic.memcpy/memmove
+// and llvm.memcpy/memmove
+class AnyMemTransferInst : public MemTransferBase<AnyMemIntrinsic> {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memcpy_inline:
+ case Intrinsic::memmove:
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ return true;
+ default:
+ return false;
}
-
- ConstantInt *getValueKind() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents any memcpy intrinsic
+/// i.e. llvm.element.unordered.atomic.memcpy
+/// and llvm.memcpy
+class AnyMemCpyInst : public AnyMemTransferInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memcpy_inline:
+ case Intrinsic::memcpy_element_unordered_atomic:
+ return true;
+ default:
+ return false;
}
-
- // Returns the value site index.
- ConstantInt *getIndex() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4)));
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents any memmove intrinsic
+/// i.e. llvm.element.unordered.atomic.memmove
+/// and llvm.memmove
+class AnyMemMoveInst : public AnyMemTransferInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memmove:
+ case Intrinsic::memmove_element_unordered_atomic:
+ return true;
+ default:
+ return false;
}
- };
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.va_start intrinsic.
+class VAStartInst : public IntrinsicInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vastart;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); }
+};
+
+/// This represents the llvm.va_end intrinsic.
+class VAEndInst : public IntrinsicInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vaend;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); }
+};
+
+/// This represents the llvm.va_copy intrinsic.
+class VACopyInst : public IntrinsicInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vacopy;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getDest() const { return const_cast<Value *>(getArgOperand(0)); }
+ Value *getSrc() const { return const_cast<Value *>(getArgOperand(1)); }
+};
+
+/// This represents the llvm.instrprof_increment intrinsic.
+class InstrProfIncrementInst : public IntrinsicInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::instrprof_increment;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ GlobalVariable *getName() const {
+ return cast<GlobalVariable>(
+ const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
+ }
+
+ ConstantInt *getHash() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
+ }
+
+ ConstantInt *getNumCounters() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
+ }
+
+ ConstantInt *getIndex() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+ }
+
+ Value *getStep() const;
+};
+
+class InstrProfIncrementInstStep : public InstrProfIncrementInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::instrprof_increment_step;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.instrprof_value_profile intrinsic.
+class InstrProfValueProfileInst : public IntrinsicInst {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::instrprof_value_profile;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ GlobalVariable *getName() const {
+ return cast<GlobalVariable>(
+ const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
+ }
+
+ ConstantInt *getHash() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
+ }
+
+ Value *getTargetValue() const {
+ return cast<Value>(const_cast<Value *>(getArgOperand(2)));
+ }
+
+ ConstantInt *getValueKind() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+ }
+
+ // Returns the value site index.
+ ConstantInt *getIndex() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4)));
+ }
+};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/Intrinsics.h b/llvm/include/llvm/IR/Intrinsics.h
index 58e7725fc0df..a9e6525e2f3d 100644
--- a/llvm/include/llvm/IR/Intrinsics.h
+++ b/llvm/include/llvm/IR/Intrinsics.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/Support/TypeSize.h"
#include <string>
namespace llvm {
@@ -99,21 +100,41 @@ namespace Intrinsic {
/// intrinsic. This is returned by getIntrinsicInfoTableEntries.
struct IITDescriptor {
enum IITDescriptorKind {
- Void, VarArg, MMX, Token, Metadata, Half, Float, Double, Quad,
- Integer, Vector, Pointer, Struct,
- Argument, ExtendArgument, TruncArgument, HalfVecArgument,
- SameVecWidthArgument, PtrToArgument, PtrToElt, VecOfAnyPtrsToElt,
- VecElementArgument, ScalableVecArgument, Subdivide2Argument,
- Subdivide4Argument, VecOfBitcastsToInt
+ Void,
+ VarArg,
+ MMX,
+ Token,
+ Metadata,
+ Half,
+ BFloat,
+ Float,
+ Double,
+ Quad,
+ Integer,
+ Vector,
+ Pointer,
+ Struct,
+ Argument,
+ ExtendArgument,
+ TruncArgument,
+ HalfVecArgument,
+ SameVecWidthArgument,
+ PtrToArgument,
+ PtrToElt,
+ VecOfAnyPtrsToElt,
+ VecElementArgument,
+ Subdivide2Argument,
+ Subdivide4Argument,
+ VecOfBitcastsToInt
} Kind;
union {
unsigned Integer_Width;
unsigned Float_Width;
- unsigned Vector_Width;
unsigned Pointer_AddressSpace;
unsigned Struct_NumElements;
unsigned Argument_Info;
+ ElementCount Vector_Width;
};
enum ArgKind {
@@ -165,6 +186,14 @@ namespace Intrinsic {
IITDescriptor Result = {K, {Field}};
return Result;
}
+
+ static IITDescriptor getVector(unsigned Width, bool IsScalable) {
+ IITDescriptor Result;
+ Result.Kind = Vector;
+ Result.Vector_Width.Min = Width;
+ Result.Vector_Width.Scalable = IsScalable;
+ return Result;
+ }
};
/// Return the IIT table descriptor for the specified intrinsic into an array
@@ -193,6 +222,13 @@ namespace Intrinsic {
/// This method returns true on error.
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef<IITDescriptor> &Infos);
+ /// Gets the type arguments of an intrinsic call by matching type contraints
+ /// specified by the .td file. The overloaded types are pushed into the
+ /// AgTys vector.
+ ///
+ /// Returns false if the given function is not a valid intrinsic call.
+ bool getIntrinsicSignature(Function *F, SmallVectorImpl<Type *> &ArgTys);
+
// Checks if the intrinsic name matches with its signature and if not
// returns the declaration with the same signature and remangled name.
llvm::Optional<Function*> remangleIntrinsicFunction(Function *F);
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 865e4ccc9bc4..4918ea876df6 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -27,6 +27,10 @@ class IntrinsicProperty;
// effects. It may be CSE'd deleted if dead, etc.
def IntrNoMem : IntrinsicProperty;
+// IntrNoSync - Threads executing the intrinsic will not synchronize using
+// memory or other means.
+def IntrNoSync : IntrinsicProperty;
+
// IntrReadMem - This intrinsic only reads from memory. It does not write to
// memory and has no other side effects. Therefore, it cannot be moved across
// potentially aliasing stores. However, it can be reordered otherwise and can
@@ -58,48 +62,63 @@ def Commutative : IntrinsicProperty;
// Throws - This intrinsic can throw.
def Throws : IntrinsicProperty;
+// Attribute index needs to match `AttrIndex` defined `Attributes.h`.
+class AttrIndex<int idx> {
+ int Value = idx;
+}
+def FuncIndex : AttrIndex<-1>;
+def RetIndex : AttrIndex<0>;
+class ArgIndex<int argNo> : AttrIndex<!add(argNo, 1)>;
+
// NoCapture - The specified argument pointer is not captured by the intrinsic.
-class NoCapture<int argNo> : IntrinsicProperty {
- int ArgNo = argNo;
+class NoCapture<AttrIndex idx> : IntrinsicProperty {
+ int ArgNo = idx.Value;
}
// NoAlias - The specified argument pointer is not aliasing other "noalias" pointer
// arguments of the intrinsic wrt. the intrinsic scope.
-class NoAlias<int argNo> : IntrinsicProperty {
- int ArgNo = argNo;
+class NoAlias<AttrIndex idx> : IntrinsicProperty {
+ int ArgNo = idx.Value;
+}
+
+class Align<AttrIndex idx, int align> : IntrinsicProperty {
+ int ArgNo = idx.Value;
+ int Align = align;
}
// Returned - The specified argument is always the return value of the
// intrinsic.
-class Returned<int argNo> : IntrinsicProperty {
- int ArgNo = argNo;
+class Returned<AttrIndex idx> : IntrinsicProperty {
+ int ArgNo = idx.Value;
}
// ImmArg - The specified argument must be an immediate.
-class ImmArg<int argNo> : IntrinsicProperty {
- int ArgNo = argNo;
+class ImmArg<AttrIndex idx> : IntrinsicProperty {
+ int ArgNo = idx.Value;
}
// ReadOnly - The specified argument pointer is not written to through the
// pointer by the intrinsic.
-class ReadOnly<int argNo> : IntrinsicProperty {
- int ArgNo = argNo;
+class ReadOnly<AttrIndex idx> : IntrinsicProperty {
+ int ArgNo = idx.Value;
}
// WriteOnly - The intrinsic does not read memory through the specified
// argument pointer.
-class WriteOnly<int argNo> : IntrinsicProperty {
- int ArgNo = argNo;
+class WriteOnly<AttrIndex idx> : IntrinsicProperty {
+ int ArgNo = idx.Value;
}
// ReadNone - The specified argument pointer is not dereferenced by the
// intrinsic.
-class ReadNone<int argNo> : IntrinsicProperty {
- int ArgNo = argNo;
+class ReadNone<AttrIndex idx> : IntrinsicProperty {
+ int ArgNo = idx.Value;
}
def IntrNoReturn : IntrinsicProperty;
+def IntrNoFree : IntrinsicProperty;
+
def IntrWillReturn : IntrinsicProperty;
// IntrCold - Calls to this intrinsic are cold.
@@ -210,6 +229,7 @@ def llvm_i16_ty : LLVMType<i16>;
def llvm_i32_ty : LLVMType<i32>;
def llvm_i64_ty : LLVMType<i64>;
def llvm_half_ty : LLVMType<f16>;
+def llvm_bfloat_ty : LLVMType<bf16>;
def llvm_float_ty : LLVMType<f32>;
def llvm_double_ty : LLVMType<f64>;
def llvm_f80_ty : LLVMType<f80>;
@@ -232,6 +252,7 @@ def llvm_v8i1_ty : LLVMType<v8i1>; // 8 x i1
def llvm_v16i1_ty : LLVMType<v16i1>; // 16 x i1
def llvm_v32i1_ty : LLVMType<v32i1>; // 32 x i1
def llvm_v64i1_ty : LLVMType<v64i1>; // 64 x i1
+def llvm_v128i1_ty : LLVMType<v128i1>; // 128 x i1
def llvm_v512i1_ty : LLVMType<v512i1>; // 512 x i1
def llvm_v1024i1_ty : LLVMType<v1024i1>; //1024 x i1
@@ -274,6 +295,9 @@ def llvm_v1i128_ty : LLVMType<v1i128>; // 1 x i128
def llvm_v2f16_ty : LLVMType<v2f16>; // 2 x half (__fp16)
def llvm_v4f16_ty : LLVMType<v4f16>; // 4 x half (__fp16)
def llvm_v8f16_ty : LLVMType<v8f16>; // 8 x half (__fp16)
+def llvm_v2bf16_ty : LLVMType<v2bf16>; // 2 x bfloat (__bf16)
+def llvm_v4bf16_ty : LLVMType<v4bf16>; // 4 x bfloat (__bf16)
+def llvm_v8bf16_ty : LLVMType<v8bf16>; // 8 x bfloat (__bf16)
def llvm_v1f32_ty : LLVMType<v1f32>; // 1 x float
def llvm_v2f32_ty : LLVMType<v2f32>; // 2 x float
def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float
@@ -284,6 +308,7 @@ def llvm_v1f64_ty : LLVMType<v1f64>; // 1 x double
def llvm_v2f64_ty : LLVMType<v2f64>; // 2 x double
def llvm_v4f64_ty : LLVMType<v4f64>; // 4 x double
def llvm_v8f64_ty : LLVMType<v8f64>; // 8 x double
+def llvm_v16f64_ty : LLVMType<v16f64>; // 16 x double
def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here
@@ -346,7 +371,8 @@ def int_gcread : Intrinsic<[llvm_ptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
def int_gcwrite : Intrinsic<[],
[llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
- [IntrArgMemOnly, NoCapture<1>, NoCapture<2>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<1>>,
+ NoCapture<ArgIndex<2>>]>;
//===------------------- ObjC ARC runtime Intrinsics --------------------===//
//
@@ -422,14 +448,19 @@ def int_objc_arc_annotation_bottomup_bbend : Intrinsic<[],
//===--------------------- Code Generator Intrinsics ----------------------===//
//
-def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
+def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_addressofreturnaddress : Intrinsic<[llvm_anyptr_ty], [], [IntrNoMem]>;
-def int_frameaddress : Intrinsic<[llvm_anyptr_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
+def int_frameaddress : Intrinsic<[llvm_anyptr_ty], [llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_sponentry : Intrinsic<[llvm_anyptr_ty], [], [IntrNoMem]>;
def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
[IntrReadMem], "llvm.read_register">;
def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
[], "llvm.write_register">;
+def int_read_volatile_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
+ [IntrHasSideEffects],
+ "llvm.read_volatile_register">;
// Gets the address of the local variable area. This is typically a copy of the
// stack, frame, or base pointer depending on the type of prologue.
@@ -442,7 +473,7 @@ def int_localescape : Intrinsic<[], [llvm_vararg_ty]>;
// to an escaped allocation indicated by the index.
def int_localrecover : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
// Given the frame pointer passed into an SEH filter function, returns a
// pointer to the local variable area suitable for use with llvm.localrecover.
@@ -468,8 +499,9 @@ def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>,
// memory while not impeding optimization.
def int_prefetch
: Intrinsic<[], [ llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ],
- [ IntrInaccessibleMemOrArgMemOnly, IntrWillReturn, ReadOnly<0>, NoCapture<0>,
- ImmArg<1>, ImmArg<2>]>;
+ [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn,
+ ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
+ ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>;
def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>;
@@ -503,24 +535,47 @@ def int_instrprof_value_profile : Intrinsic<[],
llvm_i32_ty],
[]>;
+def int_call_preallocated_setup : Intrinsic<[llvm_token_ty], [llvm_i32_ty]>;
+def int_call_preallocated_arg : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_i32_ty]>;
+def int_call_preallocated_teardown : Intrinsic<[], [llvm_token_ty]>;
+
//===------------------- Standard C Library Intrinsics --------------------===//
//
def int_memcpy : Intrinsic<[],
- [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
- llvm_i1_ty],
- [IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>,
- NoAlias<0>, NoAlias<1>, WriteOnly<0>, ReadOnly<1>, ImmArg<3>]>;
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+ llvm_i1_ty],
+ [IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
+ NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
+ WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>,
+ ImmArg<ArgIndex<3>>]>;
+
+// Memcpy semantic that is guaranteed to be inlined.
+// In particular this means that the generated code is not allowed to call any
+// external function.
+// The third argument (specifying the size) must be a constant.
+def int_memcpy_inline
+ : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i1_ty],
+ [IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
+ NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
+ WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
def int_memmove : Intrinsic<[],
[llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
llvm_i1_ty],
- [IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>,
- ReadOnly<1>, ImmArg<3>]>;
+ [IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
+ ReadOnly<ArgIndex<1>>, ImmArg<ArgIndex<3>>]>;
def int_memset : Intrinsic<[],
[llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty,
llvm_i1_ty],
- [IntrArgMemOnly, IntrWillReturn, NoCapture<0>, WriteOnly<0>,
- ImmArg<3>]>;
+ [IntrWriteMem, IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
+ ImmArg<ArgIndex<3>>]>;
// FIXME: Add version of these floating point intrinsics which allow non-default
// rounding modes and FP exception handling.
@@ -556,6 +611,7 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
def int_rint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_nearbyint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_roundeven : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_canonicalize : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>],
[IntrNoMem]>;
@@ -586,9 +642,18 @@ def int_maximum : Intrinsic<[llvm_anyfloat_ty],
def int_objectsize : Intrinsic<[llvm_anyint_ty],
[llvm_anyptr_ty, llvm_i1_ty,
llvm_i1_ty, llvm_i1_ty],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<1>, ImmArg<2>, ImmArg<3>]>,
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>,
+ ImmArg<ArgIndex<3>>]>,
GCCBuiltin<"__builtin_object_size">;
+//===--------------- Access to Floating Point Environment -----------------===//
+//
+
+let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
+ def int_flt_rounds : Intrinsic<[llvm_i32_ty], []>;
+}
+
//===--------------- Constrained Floating Point Intrinsics ----------------===//
//
@@ -626,6 +691,13 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_fmuladd : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+
def int_experimental_constrained_fptosi : Intrinsic<[ llvm_anyint_ty ],
[ llvm_anyfloat_ty,
llvm_metadata_ty ]>;
@@ -746,6 +818,9 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
def int_experimental_constrained_round : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_roundeven : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty ]>;
def int_experimental_constrained_trunc : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty ]>;
@@ -768,6 +843,10 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
def int_expect : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, IntrWillReturn]>;
+def int_expect_with_probability : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_double_ty],
+ [IntrNoMem, IntrWillReturn]>;
+
//===-------------------- Bit Manipulation Intrinsics ---------------------===//
//
@@ -782,7 +861,8 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>;
}
-let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<1>] in {
+let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<1>>] in {
def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
}
@@ -872,12 +952,12 @@ def int_codeview_annotation : Intrinsic<[], [llvm_metadata_ty],
//
def int_init_trampoline : Intrinsic<[],
[llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<0>]>,
- GCCBuiltin<"__builtin_init_trampoline">;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>,
+ GCCBuiltin<"__builtin_init_trampoline">;
def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
[IntrReadMem, IntrArgMemOnly]>,
- GCCBuiltin<"__builtin_adjust_trampoline">;
+ GCCBuiltin<"__builtin_adjust_trampoline">;
//===------------------------ Overflow Intrinsics -------------------------===//
//
@@ -924,44 +1004,64 @@ def int_usub_sat : Intrinsic<[llvm_anyint_ty],
//
def int_smul_fix : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ Commutative, ImmArg<ArgIndex<2>>]>;
def int_umul_fix : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ Commutative, ImmArg<ArgIndex<2>>]>;
def int_sdiv_fix : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_udiv_fix : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
//===------------------- Fixed Point Saturation Arithmetic Intrinsics ----------------===//
//
def int_smul_fix_sat : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ Commutative, ImmArg<ArgIndex<2>>]>;
def int_umul_fix_sat : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ Commutative, ImmArg<ArgIndex<2>>]>;
+
+def int_sdiv_fix_sat : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_udiv_fix_sat : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
//===------------------------- Memory Use Markers -------------------------===//
//
def int_lifetime_start : Intrinsic<[],
[llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, IntrWillReturn, NoCapture<1>, ImmArg<0>]>;
+ [IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<1>>,
+ ImmArg<ArgIndex<0>>]>;
def int_lifetime_end : Intrinsic<[],
[llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, IntrWillReturn, NoCapture<1>, ImmArg<0>]>;
+ [IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<1>>,
+ ImmArg<ArgIndex<0>>]>;
def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
[llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, IntrWillReturn, NoCapture<1>, ImmArg<0>]>;
+ [IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<1>>,
+ ImmArg<ArgIndex<0>>]>;
def int_invariant_end : Intrinsic<[],
[llvm_descriptor_ty, llvm_i64_ty,
llvm_anyptr_ty],
- [IntrArgMemOnly, IntrWillReturn, NoCapture<2>, ImmArg<1>]>;
+ [IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<2>>,
+ ImmArg<ArgIndex<1>>]>;
// launder.invariant.group can't be marked with 'readnone' (IntrNoMem),
// because it would cause CSE of two barriers with the same argument.
@@ -1008,13 +1108,17 @@ def int_experimental_gc_statepoint : Intrinsic<[llvm_token_ty],
[llvm_i64_ty, llvm_i32_ty,
llvm_anyptr_ty, llvm_i32_ty,
llvm_i32_ty, llvm_vararg_ty],
- [Throws, ImmArg<0>, ImmArg<1>, ImmArg<3>, ImmArg<4>]>;
+ [Throws, ImmArg<ArgIndex<0>>,
+ ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<3>>,
+ ImmArg<ArgIndex<4>>]>;
def int_experimental_gc_result : Intrinsic<[llvm_any_ty], [llvm_token_ty],
[IntrReadMem]>;
def int_experimental_gc_relocate : Intrinsic<[llvm_any_ty],
- [llvm_token_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<1>, ImmArg<2>]>;
+ [llvm_token_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrReadMem, ImmArg<ArgIndex<1>>,
+ ImmArg<ArgIndex<2>>]>;
//===------------------------ Coroutine Intrinsics ---------------===//
// These are documented in docs/Coroutines.rst
@@ -1024,7 +1128,8 @@ def int_experimental_gc_relocate : Intrinsic<[llvm_any_ty],
def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty,
llvm_ptr_ty, llvm_ptr_ty],
[IntrArgMemOnly, IntrReadMem,
- ReadNone<1>, ReadOnly<2>, NoCapture<2>]>;
+ ReadNone<ArgIndex<1>>, ReadOnly<ArgIndex<2>>,
+ NoCapture<ArgIndex<2>>]>;
def int_coro_id_retcon : Intrinsic<[llvm_token_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty,
llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
@@ -1035,11 +1140,12 @@ def int_coro_id_retcon_once : Intrinsic<[llvm_token_ty],
[]>;
def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>;
def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
- [WriteOnly<1>]>;
+ [WriteOnly<ArgIndex<1>>]>;
def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
- [IntrReadMem, IntrArgMemOnly, ReadOnly<1>,
- NoCapture<1>]>;
+ [IntrReadMem, IntrArgMemOnly,
+ ReadOnly<ArgIndex<1>>,
+ NoCapture<ArgIndex<1>>]>;
def int_coro_end : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty], []>;
def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
@@ -1057,28 +1163,29 @@ def int_coro_alloca_get : Intrinsic<[llvm_ptr_ty], [llvm_token_ty], []>;
def int_coro_alloca_free : Intrinsic<[], [llvm_token_ty], []>;
def int_coro_param : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_ptr_ty],
- [IntrNoMem, ReadNone<0>, ReadNone<1>]>;
+ [IntrNoMem, ReadNone<ArgIndex<0>>,
+ ReadNone<ArgIndex<1>>]>;
// Coroutine Manipulation Intrinsics.
def int_coro_resume : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
def int_coro_destroy : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
def int_coro_done : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
- [IntrArgMemOnly, ReadOnly<0>, NoCapture<0>]>;
+ [IntrArgMemOnly, ReadOnly<ArgIndex<0>>,
+ NoCapture<ArgIndex<0>>]>;
def int_coro_promise : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty, llvm_i32_ty, llvm_i1_ty],
- [IntrNoMem, NoCapture<0>]>;
+ [IntrNoMem, NoCapture<ArgIndex<0>>]>;
// Coroutine Lowering Intrinsics. Used internally by coroutine passes.
def int_coro_subfn_addr : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty],
- [IntrReadMem, IntrArgMemOnly, ReadOnly<0>,
- NoCapture<0>]>;
+ [IntrReadMem, IntrArgMemOnly,
+ ReadOnly<ArgIndex<0>>,
+ NoCapture<ArgIndex<0>>]>;
///===-------------------------- Other Intrinsics --------------------------===//
//
-def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
- GCCBuiltin<"__builtin_flt_rounds">;
def int_trap : Intrinsic<[], [], [IntrNoReturn, IntrCold]>,
GCCBuiltin<"__builtin_trap">;
def int_debugtrap : Intrinsic<[]>,
@@ -1117,36 +1224,117 @@ def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
[], "llvm.clear_cache">;
// Intrinsic to detect whether its argument is a constant.
-def int_is_constant : Intrinsic<[llvm_i1_ty], [llvm_any_ty], [IntrNoMem, IntrWillReturn], "llvm.is.constant">;
+def int_is_constant : Intrinsic<[llvm_i1_ty], [llvm_any_ty],
+ [IntrNoMem, IntrWillReturn, IntrConvergent],
+ "llvm.is.constant">;
// Intrinsic to mask out bits of a pointer.
-def int_ptrmask: Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty, llvm_anyint_ty],
+def int_ptrmask: Intrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_anyint_ty],
[IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
+//===---------------- Vector Predication Intrinsics --------------===//
+
+// Binary operators
+let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in {
+ def int_vp_add : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_sub : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_mul : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_sdiv : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_udiv : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_srem : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_urem : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_ashr : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_lshr : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_shl : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_or : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_and : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_xor : Intrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+
+}
+
+def int_get_active_lane_mask:
+ Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyint_ty, LLVMMatchType<1>],
+ [IntrNoMem, IntrNoSync, IntrWillReturn]>;
+
//===-------------------------- Masked Intrinsics -------------------------===//
//
def int_masked_store : Intrinsic<[], [llvm_anyvector_ty,
LLVMAnyPointerType<LLVMMatchType<0>>,
llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [IntrArgMemOnly, IntrWillReturn, ImmArg<2>]>;
+ [IntrArgMemOnly, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
def int_masked_load : Intrinsic<[llvm_anyvector_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
- [IntrReadMem, IntrArgMemOnly, IntrWillReturn, ImmArg<1>]>;
+ [IntrReadMem, IntrArgMemOnly, IntrWillReturn,
+ ImmArg<ArgIndex<1>>]>;
def int_masked_gather: Intrinsic<[llvm_anyvector_ty],
[LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<0>],
- [IntrReadMem, IntrWillReturn, ImmArg<1>]>;
+ [IntrReadMem, IntrWillReturn,
+ ImmArg<ArgIndex<1>>]>;
def int_masked_scatter: Intrinsic<[],
[llvm_anyvector_ty,
LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [IntrWillReturn, ImmArg<2>]>;
+ [IntrWillReturn, ImmArg<ArgIndex<2>>]>;
def int_masked_expandload: Intrinsic<[llvm_anyvector_ty],
[LLVMPointerToElt<0>,
@@ -1177,20 +1365,24 @@ def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
def int_hwasan_check_memaccess :
- Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [IntrInaccessibleMemOnly, ImmArg<2>]>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrInaccessibleMemOnly, ImmArg<ArgIndex<2>>]>;
def int_hwasan_check_memaccess_shortgranules :
- Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [IntrInaccessibleMemOnly, ImmArg<2>]>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrInaccessibleMemOnly, ImmArg<ArgIndex<2>>]>;
// Xray intrinsics
//===----------------------------------------------------------------------===//
// Custom event logging for x-ray.
// Takes a pointer to a string and the length of the string.
def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
- [NoCapture<0>, ReadOnly<0>, IntrWriteMem]>;
+ [IntrWriteMem, NoCapture<ArgIndex<0>>,
+ ReadOnly<ArgIndex<0>>]>;
// Typed event logging for x-ray.
// Takes a numeric type tag, a pointer to a string and the length of the string.
def int_xray_typedevent : Intrinsic<[], [llvm_i16_ty, llvm_ptr_ty, llvm_i32_ty],
- [NoCapture<1>, ReadOnly<1>, IntrWriteMem]>;
+ [IntrWriteMem, NoCapture<ArgIndex<1>>,
+ ReadOnly<ArgIndex<1>>]>;
//===----------------------------------------------------------------------===//
//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
@@ -1199,29 +1391,25 @@ def int_xray_typedevent : Intrinsic<[], [llvm_i16_ty, llvm_ptr_ty, llvm_i32_ty],
// @llvm.memcpy.element.unordered.atomic.*(dest, src, length, elementsize)
def int_memcpy_element_unordered_atomic
: Intrinsic<[],
- [
- llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
- ],
- [
- IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
- ReadOnly<1>, ImmArg<3>
- ]>;
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty],
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>,
+ NoCapture<ArgIndex<1>>, WriteOnly<ArgIndex<0>>,
+ ReadOnly<ArgIndex<1>>, ImmArg<ArgIndex<3>>]>;
// @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize)
def int_memmove_element_unordered_atomic
: Intrinsic<[],
- [
- llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
- ],
- [
- IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
- ReadOnly<1>, ImmArg<3>
- ]>;
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty],
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>,
+ NoCapture<ArgIndex<1>>, WriteOnly<ArgIndex<0>>,
+ ReadOnly<ArgIndex<1>>, ImmArg<ArgIndex<3>>]>;
// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize)
def int_memset_element_unordered_atomic
- : Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ],
- [ IntrArgMemOnly, IntrWillReturn, NoCapture<0>, WriteOnly<0>, ImmArg<3> ]>;
+ : Intrinsic<[], [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty],
+ [IntrWriteMem, IntrArgMemOnly, IntrWillReturn,
+ NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
+ ImmArg<ArgIndex<3>>]>;
//===------------------------ Reduction Intrinsics ------------------------===//
//
@@ -1258,39 +1446,34 @@ let IntrProperties = [IntrNoMem, IntrWillReturn] in {
//===----- Matrix intrinsics ---------------------------------------------===//
-def int_matrix_transpose : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable,
- IntrWillReturn, ImmArg<1>, ImmArg<2>]>;
-
-def int_matrix_multiply : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty,
- llvm_anyvector_ty,
- llvm_i32_ty,
- llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable,
- IntrWillReturn, ImmArg<2>, ImmArg<3>,
- ImmArg<4>]>;
-
-def int_matrix_columnwise_load : Intrinsic<[llvm_anyvector_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>,
- llvm_i32_ty,
- llvm_i32_ty,
- llvm_i32_ty],
- [IntrReadMem, IntrWillReturn,
- ImmArg<2>, ImmArg<3>]>;
-
-def int_matrix_columnwise_store : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMAnyPointerType<LLVMMatchType<0>>,
- llvm_i32_ty,
- llvm_i32_ty,
- llvm_i32_ty],
- [WriteOnly<1>, IntrWillReturn,
- ImmArg<3>, ImmArg<4>]>;
+def int_matrix_transpose
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+ [ IntrNoSync, IntrWillReturn, IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>,
+ ImmArg<ArgIndex<2>>]>;
+
+def int_matrix_multiply
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, llvm_anyvector_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoSync, IntrWillReturn, IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
+
+def int_matrix_column_major_load
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMPointerToElt<0>, llvm_i64_ty, llvm_i1_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrNoSync, IntrWillReturn, IntrArgMemOnly, IntrReadMem,
+ NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>,
+ ImmArg<ArgIndex<4>>]>;
+
+def int_matrix_column_major_store
+ : Intrinsic<[],
+ [llvm_anyvector_ty, LLVMPointerToElt<0>,
+ llvm_i64_ty, llvm_i1_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoSync, IntrWillReturn, IntrArgMemOnly, IntrWriteMem,
+ WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
//===---------- Intrinsics to control hardware supported loops ----------===//
@@ -1319,27 +1502,36 @@ def int_loop_decrement :
// may be optimised.
def int_loop_decrement_reg :
Intrinsic<[llvm_anyint_ty],
- [llvm_anyint_ty, llvm_anyint_ty], [IntrNoDuplicate]>;
+ [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoDuplicate]>;
//===----- Intrinsics that are used to provide predicate information -----===//
def int_ssa_copy : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>],
- [IntrNoMem, Returned<0>]>;
+ [IntrNoMem, Returned<ArgIndex<0>>]>;
//===------- Intrinsics that are used to preserve debug information -------===//
def int_preserve_array_access_index : Intrinsic<[llvm_anyptr_ty],
[llvm_anyptr_ty, llvm_i32_ty,
llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem,
+ ImmArg<ArgIndex<1>>,
+ ImmArg<ArgIndex<2>>]>;
def int_preserve_union_access_index : Intrinsic<[llvm_anyptr_ty],
[llvm_anyptr_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem,
+ ImmArg<ArgIndex<1>>]>;
def int_preserve_struct_access_index : Intrinsic<[llvm_anyptr_ty],
[llvm_anyptr_ty, llvm_i32_ty,
llvm_i32_ty],
- [IntrNoMem, ImmArg<1>,
- ImmArg<2>]>;
+ [IntrNoMem,
+ ImmArg<ArgIndex<1>>,
+ ImmArg<ArgIndex<2>>]>;
+
+//===---------- Intrinsics to query properties of scalable vectors --------===//
+def int_vscale : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Target-specific intrinsics
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 27a2550d1857..3f71f644f9a1 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -133,6 +133,10 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: Intrinsic<[llvm_anyvector_ty],
[LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
[IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Lane_Intrinsic
+ : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty],
+ [IntrNoMem]>;
class AdvSIMD_3VectorArg_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -169,6 +173,17 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem]>;
+
+ class AdvSIMD_MatMul_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_FML_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+
}
// Arithmetic ops
@@ -207,9 +222,13 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
// Vector Saturating Doubling Multiply High
def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_sqdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic;
+ def int_aarch64_neon_sqdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic;
// Vector Saturating Rounding Doubling Multiply High
def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_sqrdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic;
+ def int_aarch64_neon_sqrdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic;
// Vector Polynominal Multiply
def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
@@ -441,6 +460,27 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
def int_aarch64_neon_udot : AdvSIMD_Dot_Intrinsic;
def int_aarch64_neon_sdot : AdvSIMD_Dot_Intrinsic;
+// v8.6-A Matrix Multiply Intrinsics
+ def int_aarch64_neon_ummla : AdvSIMD_MatMul_Intrinsic;
+ def int_aarch64_neon_smmla : AdvSIMD_MatMul_Intrinsic;
+ def int_aarch64_neon_usmmla : AdvSIMD_MatMul_Intrinsic;
+ def int_aarch64_neon_usdot : AdvSIMD_Dot_Intrinsic;
+ def int_aarch64_neon_bfdot : AdvSIMD_Dot_Intrinsic;
+ def int_aarch64_neon_bfmmla : AdvSIMD_MatMul_Intrinsic;
+ def int_aarch64_neon_bfmlalb : AdvSIMD_FML_Intrinsic;
+ def int_aarch64_neon_bfmlalt : AdvSIMD_FML_Intrinsic;
+
+
+ // v8.6-A Bfloat Intrinsics
+ def int_aarch64_neon_bfcvt
+ : Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_aarch64_neon_bfcvtn
+ : Intrinsic<[llvm_v8bf16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_aarch64_neon_bfcvtn2
+ : Intrinsic<[llvm_v8bf16_ty],
+ [llvm_v8bf16_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+
// v8.2-A FP16 Fused Multiply-Add Long
def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic;
def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic;
@@ -468,7 +508,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_1Vec_Store_Lane_Intrinsic
: Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<2>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
class AdvSIMD_2Vec_Load_Intrinsic
: Intrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
@@ -482,11 +522,11 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_2Vec_Store_Intrinsic
: Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrArgMemOnly, NoCapture<2>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
class AdvSIMD_2Vec_Store_Lane_Intrinsic
: Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<3>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
class AdvSIMD_3Vec_Load_Intrinsic
: Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
@@ -500,12 +540,12 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_3Vec_Store_Intrinsic
: Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrArgMemOnly, NoCapture<3>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
class AdvSIMD_3Vec_Store_Lane_Intrinsic
: Intrinsic<[], [llvm_anyvector_ty,
LLVMMatchType<0>, LLVMMatchType<0>,
llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<4>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
class AdvSIMD_4Vec_Load_Intrinsic
: Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
@@ -523,12 +563,12 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>,
LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrArgMemOnly, NoCapture<4>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
class AdvSIMD_4Vec_Store_Lane_Intrinsic
: Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>,
llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<5>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>;
}
// Memory ops
@@ -611,7 +651,7 @@ def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
let TargetPrefix = "aarch64" in {
class FPCR_Get_Intrinsic
- : Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>;
+ : Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects]>;
}
// FPCR
@@ -725,20 +765,20 @@ def int_aarch64_irg_sp : Intrinsic<[llvm_ptr_ty], [llvm_i64_ty],
// ADDG ptr1, baseptr, (ptr0 - baseptr), tag_offset
// It is intended that ptr0 is an alloca address, and baseptr is the direct output of llvm.aarch64.irg.sp.
def int_aarch64_tagp : Intrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_ptr_ty, llvm_i64_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
// Update allocation tags for the memory range to match the tag in the pointer argument.
def int_aarch64_settag : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
- [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
+ [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
// Update allocation tags for the memory range to match the tag in the pointer argument,
// and set memory contents to zero.
def int_aarch64_settag_zero : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
- [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
+ [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
// Update allocation tags for 16-aligned, 16-sized memory region, and store a pair 8-byte values.
def int_aarch64_stgp : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
- [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
+ [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
}
// Transactional Memory Extension (TME) Intrinsics
@@ -749,7 +789,7 @@ def int_aarch64_tstart : GCCBuiltin<"__builtin_arm_tstart">,
def int_aarch64_tcommit : GCCBuiltin<"__builtin_arm_tcommit">, Intrinsic<[]>;
def int_aarch64_tcancel : GCCBuiltin<"__builtin_arm_tcancel">,
- Intrinsic<[], [llvm_i64_ty], [ImmArg<0>]>;
+ Intrinsic<[], [llvm_i64_ty], [ImmArg<ArgIndex<0>>]>;
def int_aarch64_ttest : GCCBuiltin<"__builtin_arm_ttest">,
Intrinsic<[llvm_i64_ty], [],
@@ -764,23 +804,78 @@ def llvm_nxv16i8_ty : LLVMType<nxv16i8>;
def llvm_nxv4i32_ty : LLVMType<nxv4i32>;
def llvm_nxv2i64_ty : LLVMType<nxv2i64>;
def llvm_nxv8f16_ty : LLVMType<nxv8f16>;
+def llvm_nxv8bf16_ty : LLVMType<nxv8bf16>;
def llvm_nxv4f32_ty : LLVMType<nxv4f32>;
def llvm_nxv2f64_ty : LLVMType<nxv2f64>;
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_SVE_Create_2Vector_Tuple
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrReadMem]>;
+
+ class AdvSIMD_SVE_Create_3Vector_Tuple
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>],
+ [IntrReadMem]>;
+
+ class AdvSIMD_SVE_Create_4Vector_Tuple
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
+ LLVMMatchType<1>],
+ [IntrReadMem]>;
+
+ class AdvSIMD_SVE_Set_Vector_Tuple
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_anyvector_ty],
+ [IntrReadMem, ImmArg<ArgIndex<1>>]>;
+
+ class AdvSIMD_SVE_Get_Vector_Tuple
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly, ImmArg<ArgIndex<1>>]>;
+
+ class AdvSIMD_ManyVec_PredLoad_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMPointerToElt<0>],
+ [IntrReadMem, IntrArgMemOnly]>;
+
class AdvSIMD_1Vec_PredLoad_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMPointerTo<0>],
+ LLVMPointerToElt<0>],
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_1Vec_PredStore_Intrinsic
: Intrinsic<[],
[llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMPointerTo<0>],
- [IntrArgMemOnly, NoCapture<2>]>;
+ LLVMPointerToElt<0>],
+ [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
+
+ class AdvSIMD_2Vec_PredStore_Intrinsic
+ : Intrinsic<[],
+ [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>],
+ [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
+
+ class AdvSIMD_3Vec_PredStore_Intrinsic
+ : Intrinsic<[],
+ [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>],
+ [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
+
+ class AdvSIMD_4Vec_PredStore_Intrinsic
+ : Intrinsic<[],
+ [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>],
+ [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>;
+
+ class AdvSIMD_SVE_Index_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMVectorElementType<0>,
+ LLVMVectorElementType<0>],
+ [IntrNoMem]>;
class AdvSIMD_Merged1VectorArg_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -794,7 +889,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
[LLVMMatchType<0>,
LLVMMatchType<0>,
llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
class AdvSIMD_3VectorArgIndexed_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -802,7 +897,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMMatchType<0>,
LLVMMatchType<0>,
llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
class AdvSIMD_Pred1VectorArg_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -850,7 +945,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
[LLVMMatchType<0>,
llvm_i32_ty,
llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
class AdvSIMD_SVE_Saturating_N_Intrinsic<LLVMType T>
: Intrinsic<[T],
@@ -860,7 +955,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<LLVMType T>
: Intrinsic<[T],
[T, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
class AdvSIMD_SVE_CNT_Intrinsic
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
@@ -869,12 +964,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
llvm_anyvector_ty],
[IntrNoMem]>;
- class AdvSIMD_SVE_FP_Reduce_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty],
- [LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
- llvm_anyvector_ty],
- [IntrNoMem]>;
-
class AdvSIMD_SVE_ReduceWithInit_Intrinsic
: Intrinsic<[LLVMVectorElementType<0>],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -882,19 +971,12 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
llvm_anyvector_ty],
[IntrNoMem]>;
- class AdvSIMD_SVE_FP_ReduceWithInit_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty],
- [LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
- LLVMMatchType<0>,
- llvm_anyvector_ty],
- [IntrNoMem]>;
-
class AdvSIMD_SVE_ShiftByImm_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<0>,
llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
class AdvSIMD_SVE_ShiftWide_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -914,7 +996,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMMatchType<0>,
LLVMMatchType<0>,
llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
class AdvSIMD_SVE_CMLA_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -923,7 +1005,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMMatchType<0>,
LLVMMatchType<0>,
llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
class AdvSIMD_SVE_CMLA_LANE_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -932,6 +1014,23 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMMatchType<0>,
llvm_i32_ty,
llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
+
+ class AdvSIMD_SVE_DUP_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ LLVMVectorElementType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_SVE_DUP_Unpred_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMVectorElementType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_SVE_DUPQ_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ llvm_i64_ty],
[IntrNoMem]>;
class AdvSIMD_SVE_EXPA_Intrinsic
@@ -962,7 +1061,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_SVE_PTRUE_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[llvm_i32_ty],
- [IntrNoMem, ImmArg<0>]>;
+ [IntrNoMem, ImmArg<ArgIndex<0>>]>;
class AdvSIMD_SVE_PUNPKHI_Intrinsic
: Intrinsic<[LLVMHalfElementsVectorType<0>],
@@ -992,7 +1091,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_SVE_CNTB_Intrinsic
: Intrinsic<[llvm_i64_ty],
[llvm_i32_ty],
- [IntrNoMem, ImmArg<0>]>;
+ [IntrNoMem, ImmArg<ArgIndex<0>>]>;
class AdvSIMD_SVE_CNTP_Intrinsic
: Intrinsic<[llvm_i64_ty],
@@ -1012,7 +1111,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMSubdivide4VectorType<0>,
LLVMSubdivide4VectorType<0>,
llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
class AdvSIMD_SVE_PTEST_Intrinsic
: Intrinsic<[llvm_i1_ty],
@@ -1026,6 +1125,45 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMVectorOfBitcastsToInt<0>],
[IntrNoMem]>;
+ class AdvSIMD_SVE2_TBX_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMVectorOfBitcastsToInt<0>],
+ [IntrNoMem]>;
+
+ class SVE2_1VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMSubdivide2VectorType<0>,
+ llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+ class SVE2_2VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMSubdivide2VectorType<0>,
+ LLVMSubdivide2VectorType<0>],
+ [IntrNoMem]>;
+
+ class SVE2_2VectorArgIndexed_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMSubdivide2VectorType<0>,
+ LLVMSubdivide2VectorType<0>,
+ llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+ class SVE2_2VectorArg_Wide_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMSubdivide2VectorType<0>],
+ [IntrNoMem]>;
+
+ class SVE2_2VectorArg_Pred_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ LLVMMatchType<0>,
+ LLVMSubdivide2VectorType<0>],
+ [IntrNoMem]>;
+
class SVE2_3VectorArg_Long_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>,
@@ -1039,7 +1177,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMSubdivide2VectorType<0>,
LLVMSubdivide2VectorType<0>,
llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
class SVE2_1VectorArg_Narrowing_Intrinsic
: Intrinsic<[LLVMSubdivide2VectorType<0>],
@@ -1066,23 +1204,46 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class SVE2_1VectorArg_Imm_Narrowing_Intrinsic
: Intrinsic<[LLVMSubdivide2VectorType<0>],
[llvm_anyvector_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
class SVE2_2VectorArg_Imm_Narrowing_Intrinsic
: Intrinsic<[LLVMSubdivide2VectorType<0>],
[LLVMSubdivide2VectorType<0>, llvm_anyvector_ty,
llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+ class SVE2_CONFLICT_DETECT_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMAnyPointerType<llvm_any_ty>,
+ LLVMMatchType<1>]>;
+
+ class SVE2_3VectorArg_Indexed_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMSubdivide2VectorType<0>,
+ LLVMSubdivide2VectorType<0>,
+ llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+ class AdvSIMD_SVE_CDOT_LANE_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMSubdivide4VectorType<0>,
+ LLVMSubdivide4VectorType<0>,
+ llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
// NOTE: There is no relationship between these intrinsics beyond an attempt
// to reuse currently identical class definitions.
class AdvSIMD_SVE_LOGB_Intrinsic : AdvSIMD_SVE_CNT_Intrinsic;
+ class AdvSIMD_SVE2_CADD_Intrinsic : AdvSIMD_2VectorArgIndexed_Intrinsic;
+ class AdvSIMD_SVE2_CMLA_Intrinsic : AdvSIMD_3VectorArgIndexed_Intrinsic;
// This class of intrinsics are not intended to be useful within LLVM IR but
// are instead here to support some of the more regid parts of the ACLE.
- class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN>
- : GCCBuiltin<"__builtin_sve_" # name>,
- Intrinsic<[OUT], [OUT, llvm_nxv16i1_ty, IN], [IntrNoMem]>;
+ class Builtin_SVCVT<string name, LLVMType OUT, LLVMType PRED, LLVMType IN>
+ : Intrinsic<[OUT], [OUT, PRED, IN], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
@@ -1107,7 +1268,7 @@ class AdvSIMD_SVE_WHILE_Intrinsic
[llvm_anyint_ty, LLVMMatchType<1>],
[IntrNoMem]>;
-class AdvSIMD_GatherLoad_64bitOffset_Intrinsic
+class AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1116,7 +1277,7 @@ class AdvSIMD_GatherLoad_64bitOffset_Intrinsic
],
[IntrReadMem, IntrArgMemOnly]>;
-class AdvSIMD_GatherLoad_32bitOffset_Intrinsic
+class AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1125,7 +1286,7 @@ class AdvSIMD_GatherLoad_32bitOffset_Intrinsic
],
[IntrReadMem, IntrArgMemOnly]>;
-class AdvSIMD_GatherLoad_VecTorBase_Intrinsic
+class AdvSIMD_GatherLoad_VS_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1134,7 +1295,7 @@ class AdvSIMD_GatherLoad_VecTorBase_Intrinsic
],
[IntrReadMem, IntrArgMemOnly]>;
-class AdvSIMD_ScatterStore_64bitOffset_Intrinsic
+class AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic
: Intrinsic<[],
[
llvm_anyvector_ty,
@@ -1144,7 +1305,7 @@ class AdvSIMD_ScatterStore_64bitOffset_Intrinsic
],
[IntrWriteMem, IntrArgMemOnly]>;
-class AdvSIMD_ScatterStore_32bitOffset_Intrinsic
+class AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic
: Intrinsic<[],
[
llvm_anyvector_ty,
@@ -1154,28 +1315,148 @@ class AdvSIMD_ScatterStore_32bitOffset_Intrinsic
],
[IntrWriteMem, IntrArgMemOnly]>;
-class AdvSIMD_ScatterStore_VectorBase_Intrinsic
+class AdvSIMD_ScatterStore_VS_Intrinsic
: Intrinsic<[],
[
llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyvector_ty, llvm_i64_ty
],
- [IntrWriteMem, IntrArgMemOnly, ImmArg<3>]>;
+ [IntrWriteMem, IntrArgMemOnly]>;
+
+
+class SVE_gather_prf_SV
+ : Intrinsic<[],
+ [
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
+ llvm_ptr_ty, // Base address
+ llvm_anyvector_ty, // Offsets
+ llvm_i32_ty // Prfop
+ ],
+ [IntrInaccessibleMemOrArgMemOnly, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<3>>]>;
+
+class SVE_gather_prf_VS
+ : Intrinsic<[],
+ [
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
+ llvm_anyvector_ty, // Base addresses
+ llvm_i64_ty, // Scalar offset
+ llvm_i32_ty // Prfop
+ ],
+ [IntrInaccessibleMemOrArgMemOnly, ImmArg<ArgIndex<3>>]>;
+
+class SVE_MatMul_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMSubdivide4VectorType<0>, LLVMSubdivide4VectorType<0>],
+ [IntrNoMem]>;
+
+class SVE_4Vec_BF16
+ : Intrinsic<[llvm_nxv4f32_ty],
+ [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty],
+ [IntrNoMem]>;
+
+class SVE_4Vec_BF16_Indexed
+ : Intrinsic<[llvm_nxv4f32_ty],
+ [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty, llvm_i64_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+//
+// Vector tuple creation intrinsics (ACLE)
+//
+
+def int_aarch64_sve_tuple_create2 : AdvSIMD_SVE_Create_2Vector_Tuple;
+def int_aarch64_sve_tuple_create3 : AdvSIMD_SVE_Create_3Vector_Tuple;
+def int_aarch64_sve_tuple_create4 : AdvSIMD_SVE_Create_4Vector_Tuple;
+
+//
+// Vector tuple insertion/extraction intrinsics (ACLE)
+//
+
+def int_aarch64_sve_tuple_get : AdvSIMD_SVE_Get_Vector_Tuple;
+def int_aarch64_sve_tuple_set : AdvSIMD_SVE_Set_Vector_Tuple;
//
// Loads
//
+def int_aarch64_sve_ld1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
+
+def int_aarch64_sve_ld2 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
+def int_aarch64_sve_ld3 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
+def int_aarch64_sve_ld4 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
+
def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
+def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
+def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
+
+def int_aarch64_sve_ld1rq : AdvSIMD_1Vec_PredLoad_Intrinsic;
+def int_aarch64_sve_ld1ro : AdvSIMD_1Vec_PredLoad_Intrinsic;
//
// Stores
//
+def int_aarch64_sve_st1 : AdvSIMD_1Vec_PredStore_Intrinsic;
+def int_aarch64_sve_st2 : AdvSIMD_2Vec_PredStore_Intrinsic;
+def int_aarch64_sve_st3 : AdvSIMD_3Vec_PredStore_Intrinsic;
+def int_aarch64_sve_st4 : AdvSIMD_4Vec_PredStore_Intrinsic;
+
def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic;
//
+// Prefetches
+//
+
+def int_aarch64_sve_prf
+ : Intrinsic<[], [llvm_anyvector_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrArgMemOnly, ImmArg<ArgIndex<2>>]>;
+
+// Scalar + 32-bit scaled offset vector, zero extend, packed and
+// unpacked.
+def int_aarch64_sve_prfb_gather_uxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfh_gather_uxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfw_gather_uxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfd_gather_uxtw_index : SVE_gather_prf_SV;
+
+// Scalar + 32-bit scaled offset vector, sign extend, packed and
+// unpacked.
+def int_aarch64_sve_prfb_gather_sxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfw_gather_sxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfh_gather_sxtw_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfd_gather_sxtw_index : SVE_gather_prf_SV;
+
+// Scalar + 64-bit scaled offset vector.
+def int_aarch64_sve_prfb_gather_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfh_gather_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfw_gather_index : SVE_gather_prf_SV;
+def int_aarch64_sve_prfd_gather_index : SVE_gather_prf_SV;
+
+// Vector + scalar.
+def int_aarch64_sve_prfb_gather_scalar_offset : SVE_gather_prf_VS;
+def int_aarch64_sve_prfh_gather_scalar_offset : SVE_gather_prf_VS;
+def int_aarch64_sve_prfw_gather_scalar_offset : SVE_gather_prf_VS;
+def int_aarch64_sve_prfd_gather_scalar_offset : SVE_gather_prf_VS;
+
+//
+// Scalar to vector operations
+//
+
+def int_aarch64_sve_dup : AdvSIMD_SVE_DUP_Intrinsic;
+def int_aarch64_sve_dup_x : AdvSIMD_SVE_DUP_Unpred_Intrinsic;
+
+
+def int_aarch64_sve_index : AdvSIMD_SVE_Index_Intrinsic;
+
+//
+// Address calculation
+//
+
+def int_aarch64_sve_adrb : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_adrh : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_adrw : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_adrd : AdvSIMD_2VectorArg_Intrinsic;
+
+//
// Integer arithmetic
//
@@ -1183,7 +1464,10 @@ def int_aarch64_sve_add : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sub : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_subr : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_pmul : AdvSIMD_2VectorArg_Intrinsic;
+
def int_aarch64_sve_mul : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_mul_lane : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_smulh : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umulh : AdvSIMD_Pred2VectorArg_Intrinsic;
@@ -1202,7 +1486,9 @@ def int_aarch64_sve_uabd : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_mad : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_msb : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mla : AdvSIMD_Pred3VectorArg_Intrinsic;
+def int_aarch64_sve_mla_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_mls : AdvSIMD_Pred3VectorArg_Intrinsic;
+def int_aarch64_sve_mls_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_saddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
def int_aarch64_sve_uaddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
@@ -1225,6 +1511,11 @@ def int_aarch64_sve_sdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
def int_aarch64_sve_udot : AdvSIMD_SVE_DOT_Intrinsic;
def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
+def int_aarch64_sve_sqadd_x : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_sqsub_x : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uqadd_x : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uqsub_x : AdvSIMD_2VectorArg_Intrinsic;
+
// Shifts
def int_aarch64_sve_asr : AdvSIMD_Pred2VectorArg_Intrinsic;
@@ -1278,6 +1569,15 @@ def int_aarch64_sve_cntd : AdvSIMD_SVE_CNTB_Intrinsic;
def int_aarch64_sve_cntp : AdvSIMD_SVE_CNTP_Intrinsic;
//
+// FFR manipulation
+//
+
+def int_aarch64_sve_rdffr : GCCBuiltin<"__builtin_sve_svrdffr">, Intrinsic<[llvm_nxv16i1_ty], []>;
+def int_aarch64_sve_rdffr_z : GCCBuiltin<"__builtin_sve_svrdffr_z">, Intrinsic<[llvm_nxv16i1_ty], [llvm_nxv16i1_ty]>;
+def int_aarch64_sve_setffr : GCCBuiltin<"__builtin_sve_svsetffr">, Intrinsic<[], []>;
+def int_aarch64_sve_wrffr : GCCBuiltin<"__builtin_sve_svwrffr">, Intrinsic<[], [llvm_nxv16i1_ty]>;
+
+//
// Saturating scalar arithmetic
//
@@ -1363,7 +1663,9 @@ def int_aarch64_sve_clasta_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
def int_aarch64_sve_clastb : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_clastb_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
def int_aarch64_sve_compact : AdvSIMD_Pred1VectorArg_Intrinsic;
+def int_aarch64_sve_dupq_lane : AdvSIMD_SVE_DUPQ_Intrinsic;
def int_aarch64_sve_ext : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_sel : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_lasta : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_lastb : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_rev : AdvSIMD_1VectorArg_Intrinsic;
@@ -1373,12 +1675,18 @@ def int_aarch64_sve_sunpklo : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_tbl : AdvSIMD_SVE_TBL_Intrinsic;
def int_aarch64_sve_trn1 : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_trn2 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_trn1q : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_trn2q : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uunpkhi : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_uunpklo : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_uzp1 : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uzp2 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uzp1q : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uzp2q : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip1 : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip2 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_zip1q : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_zip2q : AdvSIMD_2VectorArg_Intrinsic;
//
// Logical operations
@@ -1470,12 +1778,12 @@ def int_aarch64_sve_ftssel_x : AdvSIMD_SVE_TSMUL_Intrinsic;
// Floating-point reductions
//
-def int_aarch64_sve_fadda : AdvSIMD_SVE_FP_ReduceWithInit_Intrinsic;
-def int_aarch64_sve_faddv : AdvSIMD_SVE_FP_Reduce_Intrinsic;
-def int_aarch64_sve_fmaxv : AdvSIMD_SVE_FP_Reduce_Intrinsic;
-def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_FP_Reduce_Intrinsic;
-def int_aarch64_sve_fminv : AdvSIMD_SVE_FP_Reduce_Intrinsic;
-def int_aarch64_sve_fminnmv : AdvSIMD_SVE_FP_Reduce_Intrinsic;
+def int_aarch64_sve_fadda : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
+def int_aarch64_sve_faddv : AdvSIMD_SVE_Reduce_Intrinsic;
+def int_aarch64_sve_fmaxv : AdvSIMD_SVE_Reduce_Intrinsic;
+def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_Reduce_Intrinsic;
+def int_aarch64_sve_fminv : AdvSIMD_SVE_Reduce_Intrinsic;
+def int_aarch64_sve_fminnmv : AdvSIMD_SVE_Reduce_Intrinsic;
//
// Floating-point conversions
@@ -1500,41 +1808,44 @@ def int_aarch64_sve_fcmpgt : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_fcmpne : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_fcmpuo : AdvSIMD_SVE_Compare_Intrinsic;
-def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>;
-def int_aarch64_sve_fcvtzs_i32f64 : Builtin_SVCVT<"svcvt_s32_f64_m", llvm_nxv4i32_ty, llvm_nxv2f64_ty>;
-def int_aarch64_sve_fcvtzs_i64f16 : Builtin_SVCVT<"svcvt_s64_f16_m", llvm_nxv2i64_ty, llvm_nxv8f16_ty>;
-def int_aarch64_sve_fcvtzs_i64f32 : Builtin_SVCVT<"svcvt_s64_f32_m", llvm_nxv2i64_ty, llvm_nxv4f32_ty>;
+def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
+def int_aarch64_sve_fcvtzs_i32f64 : Builtin_SVCVT<"svcvt_s32_f64_m", llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
+def int_aarch64_sve_fcvtzs_i64f16 : Builtin_SVCVT<"svcvt_s64_f16_m", llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
+def int_aarch64_sve_fcvtzs_i64f32 : Builtin_SVCVT<"svcvt_s64_f32_m", llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
+
+def int_aarch64_sve_fcvt_bf16f32 : Builtin_SVCVT<"svcvt_bf16_f32_m", llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>;
+def int_aarch64_sve_fcvtnt_bf16f32 : Builtin_SVCVT<"svcvtnt_bf16_f32_m", llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>;
-def int_aarch64_sve_fcvtzu_i32f16 : Builtin_SVCVT<"svcvt_u32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>;
-def int_aarch64_sve_fcvtzu_i32f64 : Builtin_SVCVT<"svcvt_u32_f64_m", llvm_nxv4i32_ty, llvm_nxv2f64_ty>;
-def int_aarch64_sve_fcvtzu_i64f16 : Builtin_SVCVT<"svcvt_u64_f16_m", llvm_nxv2i64_ty, llvm_nxv8f16_ty>;
-def int_aarch64_sve_fcvtzu_i64f32 : Builtin_SVCVT<"svcvt_u64_f32_m", llvm_nxv2i64_ty, llvm_nxv4f32_ty>;
+def int_aarch64_sve_fcvtzu_i32f16 : Builtin_SVCVT<"svcvt_u32_f16_m", llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
+def int_aarch64_sve_fcvtzu_i32f64 : Builtin_SVCVT<"svcvt_u32_f64_m", llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
+def int_aarch64_sve_fcvtzu_i64f16 : Builtin_SVCVT<"svcvt_u64_f16_m", llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
+def int_aarch64_sve_fcvtzu_i64f32 : Builtin_SVCVT<"svcvt_u64_f32_m", llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
-def int_aarch64_sve_fcvt_f16f32 : Builtin_SVCVT<"svcvt_f16_f32_m", llvm_nxv8f16_ty, llvm_nxv4f32_ty>;
-def int_aarch64_sve_fcvt_f16f64 : Builtin_SVCVT<"svcvt_f16_f64_m", llvm_nxv8f16_ty, llvm_nxv2f64_ty>;
-def int_aarch64_sve_fcvt_f32f64 : Builtin_SVCVT<"svcvt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
+def int_aarch64_sve_fcvt_f16f32 : Builtin_SVCVT<"svcvt_f16_f32_m", llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>;
+def int_aarch64_sve_fcvt_f16f64 : Builtin_SVCVT<"svcvt_f16_f64_m", llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
+def int_aarch64_sve_fcvt_f32f64 : Builtin_SVCVT<"svcvt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
-def int_aarch64_sve_fcvt_f32f16 : Builtin_SVCVT<"svcvt_f32_f16_m", llvm_nxv4f32_ty, llvm_nxv8f16_ty>;
-def int_aarch64_sve_fcvt_f64f16 : Builtin_SVCVT<"svcvt_f64_f16_m", llvm_nxv2f64_ty, llvm_nxv8f16_ty>;
-def int_aarch64_sve_fcvt_f64f32 : Builtin_SVCVT<"svcvt_f64_f32_m", llvm_nxv2f64_ty, llvm_nxv4f32_ty>;
+def int_aarch64_sve_fcvt_f32f16 : Builtin_SVCVT<"svcvt_f32_f16_m", llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
+def int_aarch64_sve_fcvt_f64f16 : Builtin_SVCVT<"svcvt_f64_f16_m", llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
+def int_aarch64_sve_fcvt_f64f32 : Builtin_SVCVT<"svcvt_f64_f32_m", llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
-def int_aarch64_sve_fcvtlt_f32f16 : Builtin_SVCVT<"svcvtlt_f32_f16_m", llvm_nxv4f32_ty, llvm_nxv8f16_ty>;
-def int_aarch64_sve_fcvtlt_f64f32 : Builtin_SVCVT<"svcvtlt_f64_f32_m", llvm_nxv2f64_ty, llvm_nxv4f32_ty>;
-def int_aarch64_sve_fcvtnt_f16f32 : Builtin_SVCVT<"svcvtnt_f16_f32_m", llvm_nxv8f16_ty, llvm_nxv4f32_ty>;
-def int_aarch64_sve_fcvtnt_f32f64 : Builtin_SVCVT<"svcvtnt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
+def int_aarch64_sve_fcvtlt_f32f16 : Builtin_SVCVT<"svcvtlt_f32_f16_m", llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
+def int_aarch64_sve_fcvtlt_f64f32 : Builtin_SVCVT<"svcvtlt_f64_f32_m", llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
+def int_aarch64_sve_fcvtnt_f16f32 : Builtin_SVCVT<"svcvtnt_f16_f32_m", llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>;
+def int_aarch64_sve_fcvtnt_f32f64 : Builtin_SVCVT<"svcvtnt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
-def int_aarch64_sve_fcvtx_f32f64 : Builtin_SVCVT<"svcvtx_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
-def int_aarch64_sve_fcvtxnt_f32f64 : Builtin_SVCVT<"svcvtxnt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
+def int_aarch64_sve_fcvtx_f32f64 : Builtin_SVCVT<"svcvtx_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
+def int_aarch64_sve_fcvtxnt_f32f64 : Builtin_SVCVT<"svcvtxnt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
-def int_aarch64_sve_scvtf_f16i32 : Builtin_SVCVT<"svcvt_f16_s32_m", llvm_nxv8f16_ty, llvm_nxv4i32_ty>;
-def int_aarch64_sve_scvtf_f16i64 : Builtin_SVCVT<"svcvt_f16_s64_m", llvm_nxv8f16_ty, llvm_nxv2i64_ty>;
-def int_aarch64_sve_scvtf_f32i64 : Builtin_SVCVT<"svcvt_f32_s64_m", llvm_nxv4f32_ty, llvm_nxv2i64_ty>;
-def int_aarch64_sve_scvtf_f64i32 : Builtin_SVCVT<"svcvt_f64_s32_m", llvm_nxv2f64_ty, llvm_nxv4i32_ty>;
+def int_aarch64_sve_scvtf_f16i32 : Builtin_SVCVT<"svcvt_f16_s32_m", llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>;
+def int_aarch64_sve_scvtf_f16i64 : Builtin_SVCVT<"svcvt_f16_s64_m", llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
+def int_aarch64_sve_scvtf_f32i64 : Builtin_SVCVT<"svcvt_f32_s64_m", llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
+def int_aarch64_sve_scvtf_f64i32 : Builtin_SVCVT<"svcvt_f64_s32_m", llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>;
-def int_aarch64_sve_ucvtf_f16i32 : Builtin_SVCVT<"svcvt_f16_u32_m", llvm_nxv8f16_ty, llvm_nxv4i32_ty>;
-def int_aarch64_sve_ucvtf_f16i64 : Builtin_SVCVT<"svcvt_f16_u64_m", llvm_nxv8f16_ty, llvm_nxv2i64_ty>;
-def int_aarch64_sve_ucvtf_f32i64 : Builtin_SVCVT<"svcvt_f32_u64_m", llvm_nxv4f32_ty, llvm_nxv2i64_ty>;
-def int_aarch64_sve_ucvtf_f64i32 : Builtin_SVCVT<"svcvt_f64_u32_m", llvm_nxv2f64_ty, llvm_nxv4i32_ty>;
+def int_aarch64_sve_ucvtf_f16i32 : Builtin_SVCVT<"svcvt_f16_u32_m", llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>;
+def int_aarch64_sve_ucvtf_f16i64 : Builtin_SVCVT<"svcvt_f16_u64_m", llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
+def int_aarch64_sve_ucvtf_f32i64 : Builtin_SVCVT<"svcvt_f32_u64_m", llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
+def int_aarch64_sve_ucvtf_f64i32 : Builtin_SVCVT<"svcvt_f64_u32_m", llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>;
//
// Predicate creation
@@ -1548,6 +1859,13 @@ def int_aarch64_sve_ptrue : AdvSIMD_SVE_PTRUE_Intrinsic;
def int_aarch64_sve_and_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bic_z : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_brka : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_brka_z : AdvSIMD_Pred1VectorArg_Intrinsic;
+def int_aarch64_sve_brkb : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_brkb_z : AdvSIMD_Pred1VectorArg_Intrinsic;
+def int_aarch64_sve_brkn_z : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_brkpa_z : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_brkpb_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_eor_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nand_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nor_z : AdvSIMD_Pred2VectorArg_Intrinsic;
@@ -1567,67 +1885,267 @@ def int_aarch64_sve_ptest_first : AdvSIMD_SVE_PTEST_Intrinsic;
def int_aarch64_sve_ptest_last : AdvSIMD_SVE_PTEST_Intrinsic;
//
-// Gather loads:
+// Reinterpreting data
+//
+
+def int_aarch64_sve_convert_from_svbool : Intrinsic<[llvm_anyvector_ty],
+ [llvm_nxv16i1_ty],
+ [IntrNoMem]>;
+
+def int_aarch64_sve_convert_to_svbool : Intrinsic<[llvm_nxv16i1_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+//
+// Gather loads: scalar base + vector offsets
+//
+
+// 64 bit unscaled offsets
+def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
+
+// 64 bit scaled offsets
+def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
+
+// 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
+def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+
+// 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
+def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+
+//
+// Gather loads: vector base + scalar offset
+//
+
+def int_aarch64_sve_ld1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_Intrinsic;
+
+
+//
+// First-faulting gather loads: scalar base + vector offsets
+//
+
+// 64 bit unscaled offsets
+def int_aarch64_sve_ldff1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
+
+// 64 bit scaled offsets
+def int_aarch64_sve_ldff1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
+
+// 32 bit unscaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits
+def int_aarch64_sve_ldff1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+def int_aarch64_sve_ldff1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+
+// 32 bit scaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits
+def int_aarch64_sve_ldff1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+def int_aarch64_sve_ldff1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
+
+//
+// First-faulting gather loads: vector base + scalar offset
//
-// scalar + vector, 64 bit unscaled offsets
-def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
+def int_aarch64_sve_ldff1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_Intrinsic;
-// scalar + vector, 64 bit scaled offsets
-def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
-// scalar + vector, 32 bit unscaled offsets, sign (sxtw) or zero (zxtw)
-// extended to 64 bits
-def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
-def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
+//
+// Non-temporal gather loads: scalar base + vector offsets
+//
+
+// 64 bit unscaled offsets
+def int_aarch64_sve_ldnt1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
-// scalar + vector, 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended
-// to 64 bits
-def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
-def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
+// 64 bit indices
+def int_aarch64_sve_ldnt1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
-// vector base + immediate index
-def int_aarch64_sve_ld1_gather_imm : AdvSIMD_GatherLoad_VecTorBase_Intrinsic;
+// 32 bit unscaled offsets, zero (zxtw) extended to 64 bits
+def int_aarch64_sve_ldnt1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
//
-// Scatter stores:
+// Non-temporal gather loads: vector base + scalar offset
//
-// scalar + vector, 64 bit unscaled offsets
-def int_aarch64_sve_st1_scatter : AdvSIMD_ScatterStore_64bitOffset_Intrinsic;
+def int_aarch64_sve_ldnt1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_Intrinsic;
-// scalar + vector, 64 bit scaled offsets
+//
+// Scatter stores: scalar base + vector offsets
+//
+
+// 64 bit unscaled offsets
+def int_aarch64_sve_st1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
+
+// 64 bit scaled offsets
def int_aarch64_sve_st1_scatter_index
- : AdvSIMD_ScatterStore_64bitOffset_Intrinsic;
+ : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
-// scalar + vector, 32 bit unscaled offsets, sign (sxtw) or zero (zxtw)
-// extended to 64 bits
+// 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
def int_aarch64_sve_st1_scatter_sxtw
- : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
+ : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
def int_aarch64_sve_st1_scatter_uxtw
- : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
+ : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
-// scalar + vector, 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended
-// to 64 bits
+// 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
def int_aarch64_sve_st1_scatter_sxtw_index
- : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
+ : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
def int_aarch64_sve_st1_scatter_uxtw_index
- : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
+ : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
+
+//
+// Scatter stores: vector base + scalar offset
+//
-// vector base + immediate index
-def int_aarch64_sve_st1_scatter_imm : AdvSIMD_ScatterStore_VectorBase_Intrinsic;
+def int_aarch64_sve_st1_scatter_scalar_offset : AdvSIMD_ScatterStore_VS_Intrinsic;
+
+//
+// Non-temporal scatter stores: scalar base + vector offsets
+//
+
+// 64 bit unscaled offsets
+def int_aarch64_sve_stnt1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
+
+// 64 bit indices
+def int_aarch64_sve_stnt1_scatter_index
+ : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
+
+// 32 bit unscaled offsets, zero (zxtw) extended to 64 bits
+def int_aarch64_sve_stnt1_scatter_uxtw : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
+
+//
+// Non-temporal scatter stores: vector base + scalar offset
+//
+
+def int_aarch64_sve_stnt1_scatter_scalar_offset : AdvSIMD_ScatterStore_VS_Intrinsic;
+
+//
+// SVE2 - Uniform DSP operations
+//
+
+def int_aarch64_sve_saba : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_shadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_shsub : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_shsubr : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_sli : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_sqabs : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_sqadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_sqdmulh : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_sqdmulh_lane : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_sqneg : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_sqrdmlah : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_sqrdmlah_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_sqrdmlsh : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_sqrdmlsh_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_sqrdmulh : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_sqrdmulh_lane : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_sqrshl : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_sqshl : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_sqshlu : AdvSIMD_SVE_ShiftByImm_Intrinsic;
+def int_aarch64_sve_sqsub : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_sqsubr : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_srhadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_sri : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_srshl : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_srshr : AdvSIMD_SVE_ShiftByImm_Intrinsic;
+def int_aarch64_sve_srsra : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_ssra : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_suqadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uaba : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_uhadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uhsub : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uhsubr : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uqadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uqrshl : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uqshl : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uqsub : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uqsubr : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_urecpe : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_urhadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_urshl : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_urshr : AdvSIMD_SVE_ShiftByImm_Intrinsic;
+def int_aarch64_sve_ursqrte : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_ursra : AdvSIMD_2VectorArgIndexed_Intrinsic;
+def int_aarch64_sve_usqadd : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_usra : AdvSIMD_2VectorArgIndexed_Intrinsic;
+
+//
+// SVE2 - Widening DSP operations
+//
+
+def int_aarch64_sve_sabalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sabalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sabdlb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sabdlt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_saddlb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_saddlt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_saddwb : SVE2_2VectorArg_Wide_Intrinsic;
+def int_aarch64_sve_saddwt : SVE2_2VectorArg_Wide_Intrinsic;
+def int_aarch64_sve_sshllb : SVE2_1VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sshllt : SVE2_1VectorArg_Long_Intrinsic;
+def int_aarch64_sve_ssublb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_ssublt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_ssubwb : SVE2_2VectorArg_Wide_Intrinsic;
+def int_aarch64_sve_ssubwt : SVE2_2VectorArg_Wide_Intrinsic;
+def int_aarch64_sve_uabalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_uabalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_uabdlb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_uabdlt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_uaddlb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_uaddlt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_uaddwb : SVE2_2VectorArg_Wide_Intrinsic;
+def int_aarch64_sve_uaddwt : SVE2_2VectorArg_Wide_Intrinsic;
+def int_aarch64_sve_ushllb : SVE2_1VectorArg_Long_Intrinsic;
+def int_aarch64_sve_ushllt : SVE2_1VectorArg_Long_Intrinsic;
+def int_aarch64_sve_usublb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_usublt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_usubwb : SVE2_2VectorArg_Wide_Intrinsic;
+def int_aarch64_sve_usubwt : SVE2_2VectorArg_Wide_Intrinsic;
//
// SVE2 - Non-widening pairwise arithmetic
//
+def int_aarch64_sve_addp : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_faddp : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmaxp : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmaxnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fminp : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fminnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_smaxp : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_sminp : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_umaxp : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_uminp : AdvSIMD_Pred2VectorArg_Intrinsic;
+
+//
+// SVE2 - Widening pairwise arithmetic
+//
+
+def int_aarch64_sve_sadalp : SVE2_2VectorArg_Pred_Long_Intrinsic;
+def int_aarch64_sve_uadalp : SVE2_2VectorArg_Pred_Long_Intrinsic;
+
+//
+// SVE2 - Uniform complex integer arithmetic
+//
+
+def int_aarch64_sve_cadd_x : AdvSIMD_SVE2_CADD_Intrinsic;
+def int_aarch64_sve_sqcadd_x : AdvSIMD_SVE2_CADD_Intrinsic;
+def int_aarch64_sve_cmla_x : AdvSIMD_SVE2_CMLA_Intrinsic;
+def int_aarch64_sve_cmla_lane_x : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
+def int_aarch64_sve_sqrdcmlah_x : AdvSIMD_SVE2_CMLA_Intrinsic;
+def int_aarch64_sve_sqrdcmlah_lane_x : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
+
+//
+// SVE2 - Widening complex integer arithmetic
+//
+
+def int_aarch64_sve_saddlbt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_ssublbt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_ssubltb : SVE2_2VectorArg_Long_Intrinsic;
+
+//
+// SVE2 - Widening complex integer dot product
+//
+
+def int_aarch64_sve_cdot : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
+def int_aarch64_sve_cdot_lane : AdvSIMD_SVE_CDOT_LANE_Intrinsic;
//
// SVE2 - Floating-point widening multiply-accumulate
@@ -1649,6 +2167,20 @@ def int_aarch64_sve_fmlslt_lane : SVE2_3VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_flogb : AdvSIMD_SVE_LOGB_Intrinsic;
//
+// SVE2 - Vector histogram count
+//
+
+def int_aarch64_sve_histcnt : AdvSIMD_Pred2VectorArg_Intrinsic;
+def int_aarch64_sve_histseg : AdvSIMD_2VectorArg_Intrinsic;
+
+//
+// SVE2 - Character match
+//
+
+def int_aarch64_sve_match : AdvSIMD_SVE_Compare_Intrinsic;
+def int_aarch64_sve_nmatch : AdvSIMD_SVE_Compare_Intrinsic;
+
+//
// SVE2 - Unary narrowing operations
//
@@ -1701,4 +2233,163 @@ def int_aarch64_sve_sqshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
+
+// SVE2 MLA LANE.
+def int_aarch64_sve_smlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
+def int_aarch64_sve_smullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
+def int_aarch64_sve_umullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
+def int_aarch64_sve_umullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
+def int_aarch64_sve_sqdmlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
+def int_aarch64_sve_sqdmullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
+
+// SVE2 MLA Unpredicated.
+def int_aarch64_sve_smlalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smlalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smlslb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smlslt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlslb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlslt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smullb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smullt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umullb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umullt : SVE2_2VectorArg_Long_Intrinsic;
+
+def int_aarch64_sve_sqdmlalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlslb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlslt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmullb : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmullt : SVE2_2VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlalbt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlslbt : SVE2_3VectorArg_Long_Intrinsic;
+
+// SVE2 ADDSUB Long Unpredicated.
+def int_aarch64_sve_adclb : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_adclt : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_sbclb : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_sbclt : AdvSIMD_3VectorArg_Intrinsic;
+
+//
+// SVE2 - Polynomial arithmetic
+//
+def int_aarch64_sve_eorbt : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_eortb : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_pmullb_pair : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_pmullt_pair : AdvSIMD_2VectorArg_Intrinsic;
+
+//
+// SVE2 bitwise ternary operations.
+//
+def int_aarch64_sve_eor3 : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_bcax : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_bsl : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_bsl1n : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_bsl2n : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_nbsl : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_xar : AdvSIMD_2VectorArgIndexed_Intrinsic;
+
+//
+// SVE2 - Optional AES, SHA-3 and SM4
+//
+
+def int_aarch64_sve_aesd : GCCBuiltin<"__builtin_sve_svaesd_u8">,
+ Intrinsic<[llvm_nxv16i8_ty],
+ [llvm_nxv16i8_ty, llvm_nxv16i8_ty],
+ [IntrNoMem]>;
+def int_aarch64_sve_aesimc : GCCBuiltin<"__builtin_sve_svaesimc_u8">,
+ Intrinsic<[llvm_nxv16i8_ty],
+ [llvm_nxv16i8_ty],
+ [IntrNoMem]>;
+def int_aarch64_sve_aese : GCCBuiltin<"__builtin_sve_svaese_u8">,
+ Intrinsic<[llvm_nxv16i8_ty],
+ [llvm_nxv16i8_ty, llvm_nxv16i8_ty],
+ [IntrNoMem]>;
+def int_aarch64_sve_aesmc : GCCBuiltin<"__builtin_sve_svaesmc_u8">,
+ Intrinsic<[llvm_nxv16i8_ty],
+ [llvm_nxv16i8_ty],
+ [IntrNoMem]>;
+def int_aarch64_sve_rax1 : GCCBuiltin<"__builtin_sve_svrax1_u64">,
+ Intrinsic<[llvm_nxv2i64_ty],
+ [llvm_nxv2i64_ty, llvm_nxv2i64_ty],
+ [IntrNoMem]>;
+def int_aarch64_sve_sm4e : GCCBuiltin<"__builtin_sve_svsm4e_u32">,
+ Intrinsic<[llvm_nxv4i32_ty],
+ [llvm_nxv4i32_ty, llvm_nxv4i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_sve_sm4ekey : GCCBuiltin<"__builtin_sve_svsm4ekey_u32">,
+ Intrinsic<[llvm_nxv4i32_ty],
+ [llvm_nxv4i32_ty, llvm_nxv4i32_ty],
+ [IntrNoMem]>;
+//
+// SVE2 - Extended table lookup/permute
+//
+
+def int_aarch64_sve_tbl2 : AdvSIMD_SVE2_TBX_Intrinsic;
+def int_aarch64_sve_tbx : AdvSIMD_SVE2_TBX_Intrinsic;
+
+//
+// SVE2 - Optional bit permutation
+//
+
+def int_aarch64_sve_bdep_x : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_bext_x : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_bgrp_x : AdvSIMD_2VectorArg_Intrinsic;
+
+
+//
+// SVE ACLE: 7.3. INT8 matrix multiply extensions
+//
+def int_aarch64_sve_ummla : SVE_MatMul_Intrinsic;
+def int_aarch64_sve_smmla : SVE_MatMul_Intrinsic;
+def int_aarch64_sve_usmmla : SVE_MatMul_Intrinsic;
+
+def int_aarch64_sve_usdot : AdvSIMD_SVE_DOT_Intrinsic;
+def int_aarch64_sve_usdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
+def int_aarch64_sve_sudot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
+
+//
+// SVE ACLE: 7.4/5. FP64/FP32 matrix multiply extensions
+//
+def int_aarch64_sve_fmmla : AdvSIMD_3VectorArg_Intrinsic;
+
+//
+// SVE ACLE: 7.2. BFloat16 extensions
+//
+
+def int_aarch64_sve_bfdot : SVE_4Vec_BF16;
+def int_aarch64_sve_bfmlalb : SVE_4Vec_BF16;
+def int_aarch64_sve_bfmlalt : SVE_4Vec_BF16;
+
+def int_aarch64_sve_bfmmla : SVE_4Vec_BF16;
+
+def int_aarch64_sve_bfdot_lane : SVE_4Vec_BF16_Indexed;
+def int_aarch64_sve_bfmlalb_lane : SVE_4Vec_BF16_Indexed;
+def int_aarch64_sve_bfmlalt_lane : SVE_4Vec_BF16_Indexed;
}
+
+//
+// SVE2 - Contiguous conflict detection
+//
+
+def int_aarch64_sve_whilerw_b : SVE2_CONFLICT_DETECT_Intrinsic;
+def int_aarch64_sve_whilerw_h : SVE2_CONFLICT_DETECT_Intrinsic;
+def int_aarch64_sve_whilerw_s : SVE2_CONFLICT_DETECT_Intrinsic;
+def int_aarch64_sve_whilerw_d : SVE2_CONFLICT_DETECT_Intrinsic;
+def int_aarch64_sve_whilewr_b : SVE2_CONFLICT_DETECT_Intrinsic;
+def int_aarch64_sve_whilewr_h : SVE2_CONFLICT_DETECT_Intrinsic;
+def int_aarch64_sve_whilewr_s : SVE2_CONFLICT_DETECT_Intrinsic;
+def int_aarch64_sve_whilewr_d : SVE2_CONFLICT_DETECT_Intrinsic;
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 07ca3a9229d6..01380afae006 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -11,10 +11,10 @@
//===----------------------------------------------------------------------===//
class AMDGPUReadPreloadRegisterIntrinsic
- : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
- : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, GCCBuiltin<name>;
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>, GCCBuiltin<name>;
// Used to tag image and resource intrinsics with information used to generate
// mem operands.
@@ -48,35 +48,35 @@ defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;
def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">,
- Intrinsic<[], [], [IntrConvergent]>;
+ Intrinsic<[], [], [IntrConvergent, IntrWillReturn]>;
// AS 7 is PARAM_I_ADDRESS, used for kernel arguments
def int_r600_implicitarg_ptr :
GCCBuiltin<"__builtin_r600_implicitarg_ptr">,
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
- [IntrNoMem, IntrSpeculatable]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_r600_rat_store_typed :
// 1st parameter: Data
// 2nd parameter: Index
// 3rd parameter: Constant RAT ID
- Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
+ Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrWillReturn]>,
GCCBuiltin<"__builtin_r600_rat_store_typed">;
def int_r600_recipsqrt_ieee : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_r600_recipsqrt_clamped : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_r600_cube : Intrinsic<
- [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
+ [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_r600_store_stream_output : Intrinsic<
- [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []
+ [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn]
>;
class TextureIntrinsicFloatInput : Intrinsic<[llvm_v4f32_ty], [
@@ -90,7 +90,7 @@ class TextureIntrinsicFloatInput : Intrinsic<[llvm_v4f32_ty], [
llvm_i32_ty, // coord_type_y
llvm_i32_ty, // coord_type_z
llvm_i32_ty], // coord_type_w
- [IntrNoMem]
+ [IntrNoMem, IntrWillReturn]
>;
class TextureIntrinsicInt32Input : Intrinsic<[llvm_v4i32_ty], [
@@ -104,11 +104,11 @@ class TextureIntrinsicInt32Input : Intrinsic<[llvm_v4i32_ty], [
llvm_i32_ty, // coord_type_y
llvm_i32_ty, // coord_type_z
llvm_i32_ty], // coord_type_w
- [IntrNoMem]
+ [IntrNoMem, IntrWillReturn]
>;
def int_r600_store_swizzle :
- Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []
+ Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn]
>;
def int_r600_tex : TextureIntrinsicFloatInput;
@@ -123,10 +123,10 @@ def int_r600_ddx : TextureIntrinsicFloatInput;
def int_r600_ddy : TextureIntrinsicFloatInput;
def int_r600_dot4 : Intrinsic<[llvm_float_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
+ [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
-def int_r600_kill : Intrinsic<[], [llvm_float_ty], []>;
+def int_r600_kill : Intrinsic<[], [llvm_float_ty], [IntrWillReturn]>;
} // End TargetPrefix = "r600"
@@ -141,44 +141,43 @@ defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
<"__builtin_amdgcn_workgroup_id">;
def int_amdgcn_dispatch_ptr :
- GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
- [IntrNoMem, IntrSpeculatable]>;
+ [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_amdgcn_queue_ptr :
GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
- [IntrNoMem, IntrSpeculatable]>;
+ [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_amdgcn_kernarg_segment_ptr :
GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
- [IntrNoMem, IntrSpeculatable]>;
+ [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_amdgcn_implicitarg_ptr :
GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
- [IntrNoMem, IntrSpeculatable]>;
+ [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_amdgcn_groupstaticsize :
GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
- Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_amdgcn_dispatch_id :
GCCBuiltin<"__builtin_amdgcn_dispatch_id">,
- Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
+ Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_amdgcn_implicit_buffer_ptr :
GCCBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
- [IntrNoMem, IntrSpeculatable]>;
+ [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
// Set EXEC to the 64-bit value given.
// This is always moved to the beginning of the basic block.
// FIXME: Should be mangled for wave size.
def int_amdgcn_init_exec : Intrinsic<[],
[llvm_i64_ty], // 64-bit literal constant
- [IntrConvergent, ImmArg<0>]>;
+ [IntrConvergent, ImmArg<ArgIndex<0>>]>;
// Set EXEC according to a thread count packed in an SGPR input:
// thread_count = (input >> bitoffset) & 0x7f;
@@ -186,11 +185,11 @@ def int_amdgcn_init_exec : Intrinsic<[],
def int_amdgcn_init_exec_from_input : Intrinsic<[],
[llvm_i32_ty, // 32-bit SGPR input
llvm_i32_ty], // bit offset of the thread count
- [IntrConvergent, ImmArg<1>]>;
+ [IntrConvergent, ImmArg<ArgIndex<1>>]>;
def int_amdgcn_wavefrontsize :
GCCBuiltin<"__builtin_amdgcn_wavefrontsize">,
- Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
//===----------------------------------------------------------------------===//
@@ -201,180 +200,186 @@ def int_amdgcn_wavefrontsize :
// the second one is copied to m0
def int_amdgcn_s_sendmsg : GCCBuiltin<"__builtin_amdgcn_s_sendmsg">,
Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
- [ImmArg<0>, IntrNoMem, IntrHasSideEffects]>;
+ [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
def int_amdgcn_s_sendmsghalt : GCCBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
- [ImmArg<0>, IntrNoMem, IntrHasSideEffects]>;
+ [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
- Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent]>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn]>;
def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">,
- Intrinsic<[], [], [IntrConvergent]>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn]>;
def int_amdgcn_s_waitcnt : GCCBuiltin<"__builtin_amdgcn_s_waitcnt">,
- Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]>;
+ Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_div_scale : Intrinsic<
// 1st parameter: Numerator
// 2nd parameter: Denominator
- // 3rd parameter: Constant to select between first and
- // second. (0 = first, 1 = second).
+ // 3rd parameter: Select quotient. Must equal Numerator or Denominator.
+ // (0 = Denominator, 1 = Numerator).
[llvm_anyfloat_ty, llvm_i1_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
- [IntrNoMem, IntrSpeculatable, ImmArg<2>]
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>, IntrWillReturn]
>;
def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
+// Look Up 2.0 / pi src0 with segment select src1[4:0]
def int_amdgcn_trig_preop : Intrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_sin : Intrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cos : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_log_clamp : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">,
Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_rcp : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">,
Intrinsic<[llvm_float_ty], [llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
+>;
+
+def int_amdgcn_sqrt : Intrinsic<
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_rsq : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_rsq_legacy : GCCBuiltin<"__builtin_amdgcn_rsq_legacy">,
Intrinsic<
- [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+ [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
+// out = 1.0 / sqrt(a) result clamped to +/- max_float.
def int_amdgcn_rsq_clamp : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_amdgcn_ldexp : Intrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_frexp_mant : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_frexp_exp : Intrinsic<
- [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
// v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
// and always uses rtz, so is not suitable for implementing the OpenCL
// fract function. It should be ok on VI.
def int_amdgcn_fract : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cvt_pkrtz : GCCBuiltin<"__builtin_amdgcn_cvt_pkrtz">,
Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cvt_pknorm_i16 :
GCCBuiltin<"__builtin_amdgcn_cvt_pknorm_i16">,
Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cvt_pknorm_u16 :
GCCBuiltin<"__builtin_amdgcn_cvt_pknorm_u16">,
Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cvt_pk_i16 :
GCCBuiltin<"__builtin_amdgcn_cvt_pk_i16">,
Intrinsic<
[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cvt_pk_u16 : GCCBuiltin<"__builtin_amdgcn_cvt_pk_u16">,
Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_class : Intrinsic<
[llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_fmed3 : GCCBuiltin<"__builtin_amdgcn_fmed3">,
Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
// v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
// should be used.
def int_amdgcn_sffbh :
Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
// v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support.
def int_amdgcn_fmad_ftz :
Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
// Fields should mirror atomicrmw
@@ -384,7 +389,8 @@ class AMDGPUAtomicIncIntrin : Intrinsic<[llvm_anyint_ty],
llvm_i32_ty, // ordering
llvm_i32_ty, // scope
llvm_i1_ty], // isVolatile
- [IntrArgMemOnly, NoCapture<0>, ImmArg<2>, ImmArg<3>, ImmArg<4>], "",
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "",
[SDNPMemOperand]
>;
@@ -399,7 +405,8 @@ class AMDGPULDSF32Intrin<string clang_builtin> :
llvm_i32_ty, // ordering
llvm_i32_ty, // scope
llvm_i1_ty], // isVolatile
- [IntrArgMemOnly, NoCapture<0>, ImmArg<2>, ImmArg<3>, ImmArg<4>]
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]
>;
// FIXME: The m0 argument should be moved after the normal arguments
@@ -416,9 +423,9 @@ class AMDGPUDSOrderedIntrinsic : Intrinsic<
// gfx10: bits 24-27 indicate the number of active threads/dwords
llvm_i1_ty, // wave release, usually set to 1
llvm_i1_ty], // wave done, set to 1 for the last ordered instruction
- [NoCapture<0>,
- ImmArg<2>, ImmArg<3>, ImmArg<4>,
- ImmArg<5>, ImmArg<6>, ImmArg<7>
+ [IntrWillReturn, NoCapture<ArgIndex<0>>,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>,
+ ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>, ImmArg<ArgIndex<7>>
]
>;
@@ -426,7 +433,8 @@ class AMDGPUDSAppendConsumedIntrinsic : Intrinsic<
[llvm_i32_ty],
[llvm_anyptr_ty, // LDS or GDS ptr
llvm_i1_ty], // isVolatile
- [IntrConvergent, IntrArgMemOnly, NoCapture<0>, ImmArg<1>],
+ [IntrConvergent, IntrWillReturn, IntrArgMemOnly,
+ NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>],
"",
[SDNPMemOperand]
>;
@@ -591,7 +599,7 @@ class AMDGPUDimProfile<string opmod,
AMDGPUDimProps Dim = dim;
string OpMod = opmod; // the corresponding instruction is named IMAGE_OpMod
- // These are entended to be overwritten by subclasses
+ // These are intended to be overwritten by subclasses
bit IsSample = 0;
bit IsAtomic = 0;
list<LLVMType> RetTypes = [];
@@ -697,11 +705,15 @@ class AMDGPUImageDimIntrinsic<AMDGPUDimProfile P_,
llvm_i1_ty], []), // unorm(imm)
[llvm_i32_ty, // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe)
llvm_i32_ty]), // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc)
+
!listconcat(props,
- !if(P_.IsAtomic, [], [ImmArg<AMDGPUImageDimIntrinsicEval<P_>.DmaskArgIndex>]),
- !if(P_.IsSample, [ImmArg<AMDGPUImageDimIntrinsicEval<P_>.UnormArgIndex>], []),
- [ImmArg<AMDGPUImageDimIntrinsicEval<P_>.TexFailCtrlArgIndex>,
- ImmArg<AMDGPUImageDimIntrinsicEval<P_>.CachePolicyArgIndex>]),
+ !if(P_.IsAtomic, [], [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.DmaskArgIndex>>]),
+ !if(P_.IsSample, [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.UnormArgIndex>>], []),
+ [IntrWillReturn],
+ [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.TexFailCtrlArgIndex>>,
+ ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.CachePolicyArgIndex>>]),
+
+
"", sdnodeprops>,
AMDGPURsrcIntrinsic<!add(!size(P_.DataArgs), !size(P_.AddrTypes),
!if(P_.IsAtomic, 0, 1)), 1> {
@@ -755,15 +767,20 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
AMDGPUImageDMaskIntrinsic;
defm int_amdgcn_image_load_mip
: AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
- [IntrReadMem], [SDNPMemOperand], 1>,
+ [IntrReadMem, IntrWillReturn], [SDNPMemOperand], 1>,
AMDGPUImageDMaskIntrinsic;
defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll<
"STORE", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
- [IntrWriteMem], [SDNPMemOperand]>;
+ [IntrWriteMem, IntrWillReturn], [SDNPMemOperand]>;
defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa<
"STORE_MIP", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
- [IntrWriteMem], [SDNPMemOperand], 1>;
+ [IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>;
+
+ defm int_amdgcn_image_msaa_load
+ : AMDGPUImageDimIntrinsicsAll<"MSAA_LOAD", [llvm_any_ty], [], [IntrReadMem],
+ [SDNPMemOperand]>,
+ AMDGPUImageDMaskIntrinsic;
//////////////////////////////////////////////////////////////////////////
// sample and getlod intrinsics
@@ -861,7 +878,8 @@ class AMDGPUBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- [IntrReadMem, ImmArg<3>, ImmArg<4>], "", [SDNPMemOperand]>,
+ [IntrReadMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
def int_amdgcn_buffer_load_format : AMDGPUBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_buffer_load : AMDGPUBufferLoad;
@@ -871,7 +889,7 @@ def int_amdgcn_s_buffer_load : Intrinsic <
[llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // byte offset(SGPR/imm)
llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 2 = dlc)
- [IntrNoMem, ImmArg<2>]>,
+ [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<2>>]>,
AMDGPURsrcIntrinsic<0>;
class AMDGPUBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
@@ -882,7 +900,8 @@ class AMDGPUBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- [IntrWriteMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
+ [IntrWriteMem, IntrWillReturn,
+ ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
def int_amdgcn_buffer_store_format : AMDGPUBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_buffer_store : AMDGPUBufferStore;
@@ -903,7 +922,7 @@ class AMDGPURawBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrReadMem, ImmArg<3>], "", [SDNPMemOperand]>,
+ [IntrReadMem, IntrWillReturn, ImmArg<ArgIndex<3>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
@@ -918,9 +937,9 @@ class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrReadMem, ImmArg<4>], "", [SDNPMemOperand]>,
+ [IntrReadMem, IntrWillReturn, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
-def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad<llvm_anyfloat_ty>;
+def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
class AMDGPURawBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
@@ -933,7 +952,7 @@ class AMDGPURawBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrWriteMem, ImmArg<4>], "", [SDNPMemOperand]>,
+ [IntrWriteMem, IntrWillReturn, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;
@@ -949,9 +968,9 @@ class AMDGPUStructBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrWriteMem, ImmArg<5>], "", [SDNPMemOperand]>,
+ [IntrWriteMem, IntrWillReturn, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
-def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore<llvm_anyfloat_ty>;
+def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore;
def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;
class AMDGPURawBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
@@ -961,7 +980,7 @@ class AMDGPURawBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
- [ImmArg<4>], "", [SDNPMemOperand]>,
+ [ImmArg<ArgIndex<4>>, IntrWillReturn], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1, 0>;
def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic;
@@ -983,7 +1002,7 @@ def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
- [ImmArg<5>], "", [SDNPMemOperand]>,
+ [ImmArg<ArgIndex<5>>, IntrWillReturn], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<2, 0>;
class AMDGPUStructBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
@@ -994,7 +1013,7 @@ class AMDGPUStructBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
- [ImmArg<5>], "", [SDNPMemOperand]>,
+ [ImmArg<ArgIndex<5>>, IntrWillReturn], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1, 0>;
def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic;
@@ -1017,7 +1036,7 @@ def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
- [ImmArg<6>], "", [SDNPMemOperand]>,
+ [ImmArg<ArgIndex<6>>, IntrWillReturn], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<2, 0>;
// Obsolescent tbuffer intrinsics.
@@ -1032,8 +1051,9 @@ def int_amdgcn_tbuffer_load : Intrinsic <
llvm_i32_ty, // nfmt(imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- [IntrReadMem, ImmArg<4>, ImmArg<5>, ImmArg<6>,
- ImmArg<7>, ImmArg<8>], "", [SDNPMemOperand]>,
+ [IntrReadMem, IntrWillReturn,
+ ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>,
+ ImmArg<ArgIndex<7>>, ImmArg<ArgIndex<8>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
def int_amdgcn_tbuffer_store : Intrinsic <
@@ -1048,8 +1068,9 @@ def int_amdgcn_tbuffer_store : Intrinsic <
llvm_i32_ty, // nfmt(imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- [IntrWriteMem, ImmArg<5>, ImmArg<6>, ImmArg<7>,
- ImmArg<8>, ImmArg<9>], "", [SDNPMemOperand]>,
+ [IntrWriteMem, IntrWillReturn, ImmArg<ArgIndex<5>>,
+ ImmArg<ArgIndex<6>>, ImmArg<ArgIndex<7>>,
+ ImmArg<ArgIndex<8>>, ImmArg<ArgIndex<9>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
// New tbuffer intrinsics, with:
@@ -1066,7 +1087,8 @@ def int_amdgcn_raw_tbuffer_load : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrReadMem, ImmArg<3>, ImmArg<4>], "", [SDNPMemOperand]>,
+ [IntrReadMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
def int_amdgcn_raw_tbuffer_store : Intrinsic <
@@ -1080,7 +1102,8 @@ def int_amdgcn_raw_tbuffer_store : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrWriteMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
+ [IntrWriteMem, IntrWillReturn,
+ ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
def int_amdgcn_struct_tbuffer_load : Intrinsic <
@@ -1094,7 +1117,8 @@ def int_amdgcn_struct_tbuffer_load : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrReadMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
+ [IntrReadMem, IntrWillReturn,
+ ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
def int_amdgcn_struct_tbuffer_store : Intrinsic <
@@ -1109,7 +1133,8 @@ def int_amdgcn_struct_tbuffer_store : Intrinsic <
// bit 1 = slc,
// bit 2 = dlc on gfx10+),
// swizzled buffer (bit 3 = swz))
- [IntrWriteMem, ImmArg<5>, ImmArg<6>], "", [SDNPMemOperand]>,
+ [IntrWriteMem, IntrWillReturn,
+ ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
class AMDGPUBufferAtomic : Intrinsic <
@@ -1119,7 +1144,7 @@ class AMDGPUBufferAtomic : Intrinsic <
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty], // slc(imm)
- [ImmArg<4>], "", [SDNPMemOperand]>,
+ [ImmArg<ArgIndex<4>>, IntrWillReturn], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1, 0>;
def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
@@ -1139,9 +1164,10 @@ def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty], // slc(imm)
- [ImmArg<5>], "", [SDNPMemOperand]>,
+ [ImmArg<ArgIndex<5>>, IntrWillReturn], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<2, 0>;
+def int_amdgcn_buffer_atomic_csub : AMDGPUBufferAtomic;
} // defset AMDGPUBufferIntrinsics
// Uses that do not set the done bit should set IntrWriteMem on the
@@ -1156,7 +1182,9 @@ def int_amdgcn_exp : Intrinsic <[], [
llvm_i1_ty, // done
llvm_i1_ty // vm
],
- [ImmArg<0>, ImmArg<1>, ImmArg<6>, ImmArg<7>, IntrInaccessibleMemOnly]
+ [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<6>>,
+ ImmArg<ArgIndex<7>>, IntrWriteMem, IntrInaccessibleMemOnly,
+ IntrWillReturn]
>;
// exp with compr bit set.
@@ -1167,44 +1195,60 @@ def int_amdgcn_exp_compr : Intrinsic <[], [
LLVMMatchType<0>, // src1
llvm_i1_ty, // done
llvm_i1_ty], // vm
- [ImmArg<0>, ImmArg<1>, ImmArg<4>, ImmArg<5>, IntrInaccessibleMemOnly]
+ [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>,
+ ImmArg<ArgIndex<5>>, IntrWriteMem, IntrInaccessibleMemOnly,
+ IntrWillReturn]
>;
def int_amdgcn_buffer_wbinvl1_sc :
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_buffer_wbinvl1 :
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_s_dcache_inv :
GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_s_memtime :
GCCBuiltin<"__builtin_amdgcn_s_memtime">,
- Intrinsic<[llvm_i64_ty], []>;
+ Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>;
def int_amdgcn_s_sleep :
GCCBuiltin<"__builtin_amdgcn_s_sleep">,
- Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]> {
+ Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
+ IntrHasSideEffects, IntrWillReturn]> {
}
def int_amdgcn_s_incperflevel :
GCCBuiltin<"__builtin_amdgcn_s_incperflevel">,
- Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]> {
+ Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
+ IntrHasSideEffects, IntrWillReturn]> {
}
def int_amdgcn_s_decperflevel :
GCCBuiltin<"__builtin_amdgcn_s_decperflevel">,
- Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]> {
+ Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
+ IntrHasSideEffects, IntrWillReturn]> {
}
def int_amdgcn_s_getreg :
GCCBuiltin<"__builtin_amdgcn_s_getreg">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrReadMem, IntrSpeculatable, ImmArg<0>]
+ [IntrInaccessibleMemOnly, IntrReadMem, IntrSpeculatable,
+ IntrWillReturn, ImmArg<ArgIndex<0>>]
+>;
+
+// Note this can be used to set FP environment properties that are
+// unsafe to change in non-strictfp functions. The register properties
+// available (and value required to access them) may differ per
+// subtarget. llvm.amdgcn.s.setreg(hwmode, value)
+def int_amdgcn_s_setreg :
+ GCCBuiltin<"__builtin_amdgcn_s_setreg">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<0>>]
>;
// int_amdgcn_s_getpc is provided to allow a specific style of position
@@ -1215,7 +1259,8 @@ def int_amdgcn_s_getreg :
// especially as we explicitly use IntrNoMem to allow optimizations.
def int_amdgcn_s_getpc :
GCCBuiltin<"__builtin_amdgcn_s_getpc">,
- Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
+ Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable,
+ IntrWillReturn]>;
// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
// param values: 0 = P10, 1 = P20, 2 = P0
@@ -1223,7 +1268,8 @@ def int_amdgcn_interp_mov :
GCCBuiltin<"__builtin_amdgcn_interp_mov">,
Intrinsic<[llvm_float_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
// This intrinsic reads from lds, but the memory values are constant,
@@ -1232,14 +1278,16 @@ def int_amdgcn_interp_p1 :
GCCBuiltin<"__builtin_amdgcn_interp_p1">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
// __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
def int_amdgcn_interp_p2 :
GCCBuiltin<"__builtin_amdgcn_interp_p2">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, ImmArg<2>, ImmArg<3>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
// See int_amdgcn_v_interp_p1 for why this is IntrNoMem.
// __builtin_amdgcn_interp_p1_f16 <i>, <attr_chan>, <attr>, <high>, <m0>
@@ -1247,117 +1295,130 @@ def int_amdgcn_interp_p1_f16 :
GCCBuiltin<"__builtin_amdgcn_interp_p1_f16">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>, ImmArg<3>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
// __builtin_amdgcn_interp_p2_f16 <p1>, <j>, <attr_chan>, <attr>, <high>, <m0>
def int_amdgcn_interp_p2_f16 :
GCCBuiltin<"__builtin_amdgcn_interp_p2_f16">,
Intrinsic<[llvm_half_ty],
[llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, ImmArg<2>, ImmArg<3>, ImmArg<4>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
// Pixel shaders only: whether the current pixel is live (i.e. not a helper
// invocation for derivative computation).
def int_amdgcn_ps_live : Intrinsic <
[llvm_i1_ty],
[],
- [IntrNoMem]>;
+ [IntrNoMem, IntrWillReturn]>;
def int_amdgcn_mbcnt_lo :
GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrWillReturn]>;
def int_amdgcn_mbcnt_hi :
GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrWillReturn]>;
// llvm.amdgcn.ds.swizzle src offset
def int_amdgcn_ds_swizzle :
GCCBuiltin<"__builtin_amdgcn_ds_swizzle">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrConvergent, ImmArg<1>]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<1>>]>;
def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_lerp :
GCCBuiltin<"__builtin_amdgcn_lerp">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_sad_u8 :
GCCBuiltin<"__builtin_amdgcn_sad_u8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_msad_u8 :
GCCBuiltin<"__builtin_amdgcn_msad_u8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_sad_hi_u8 :
GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_sad_u16 :
GCCBuiltin<"__builtin_amdgcn_sad_u16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_qsad_pk_u16_u8 :
GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_mqsad_pk_u16_u8 :
GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_mqsad_u32_u8 :
GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_cvt_pk_u8_f32 :
GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_icmp :
Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty, LLVMMatchType<1>, llvm_i32_ty],
- [IntrNoMem, IntrConvergent, ImmArg<2>]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<2>>]>;
def int_amdgcn_fcmp :
Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty],
- [IntrNoMem, IntrConvergent, ImmArg<2>]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<2>>]>;
+
+def int_amdgcn_ballot :
+ Intrinsic<[llvm_anyint_ty], [llvm_i1_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn]>;
def int_amdgcn_readfirstlane :
GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn]>;
// The lane argument must be uniform across the currently active threads of the
// current wave. Otherwise, the result is undefined.
def int_amdgcn_readlane :
GCCBuiltin<"__builtin_amdgcn_readlane">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn]>;
// The value to write and lane select arguments must be uniform across the
// currently active threads of the current wave. Otherwise, the result is
@@ -1369,28 +1430,28 @@ def int_amdgcn_writelane :
llvm_i32_ty, // uniform lane select
llvm_i32_ty // returned by all lanes other than the selected one
],
- [IntrNoMem, IntrConvergent]
+ [IntrNoMem, IntrConvergent, IntrWillReturn]
>;
-def int_amdgcn_alignbit :
- GCCBuiltin<"__builtin_amdgcn_alignbit">, Intrinsic<[llvm_i32_ty],
+// FIXME: Deprecated. This is equivalent to llvm.fshr
+def int_amdgcn_alignbit : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_alignbyte : GCCBuiltin<"__builtin_amdgcn_alignbyte">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_mul_i24 : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
def int_amdgcn_mul_u24 : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
// llvm.amdgcn.ds.gws.init(i32 bar_val, i32 resource_id)
@@ -1401,7 +1462,8 @@ def int_amdgcn_ds_gws_init :
GCCBuiltin<"__builtin_amdgcn_ds_gws_init">,
Intrinsic<[],
[llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrWriteMem, IntrInaccessibleMemOnly], "",
+ [IntrConvergent, IntrWriteMem,
+ IntrInaccessibleMemOnly, IntrWillReturn], "",
[SDNPMemOperand]
>;
@@ -1412,7 +1474,7 @@ def int_amdgcn_ds_gws_barrier :
GCCBuiltin<"__builtin_amdgcn_ds_gws_barrier">,
Intrinsic<[],
[llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrInaccessibleMemOnly], "",
+ [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "",
[SDNPMemOperand]
>;
@@ -1421,7 +1483,7 @@ def int_amdgcn_ds_gws_sema_v :
GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_v">,
Intrinsic<[],
[llvm_i32_ty],
- [IntrConvergent, IntrInaccessibleMemOnly], "",
+ [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "",
[SDNPMemOperand]
>;
@@ -1430,7 +1492,7 @@ def int_amdgcn_ds_gws_sema_br :
GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_br">,
Intrinsic<[],
[llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrInaccessibleMemOnly], "",
+ [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "",
[SDNPMemOperand]
>;
@@ -1439,7 +1501,7 @@ def int_amdgcn_ds_gws_sema_p :
GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_p">,
Intrinsic<[],
[llvm_i32_ty],
- [IntrConvergent, IntrInaccessibleMemOnly], "",
+ [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "",
[SDNPMemOperand]
>;
@@ -1448,7 +1510,7 @@ def int_amdgcn_ds_gws_sema_release_all :
GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_release_all">,
Intrinsic<[],
[llvm_i32_ty],
- [IntrConvergent, IntrInaccessibleMemOnly], "",
+ [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "",
[SDNPMemOperand]
>;
@@ -1456,23 +1518,24 @@ def int_amdgcn_ds_gws_sema_release_all :
// Copies the source value to the destination value, with the guarantee that
// the source value is computed as if the entire program were executed in WQM.
def int_amdgcn_wqm : Intrinsic<[llvm_any_ty],
- [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
// Copies the source value to the destination value, such that the source
// is computed as if the entire program were executed in WQM if any other
// program code executes in WQM.
def int_amdgcn_softwqm : Intrinsic<[llvm_any_ty],
- [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+ [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
// Return true if at least one thread within the pixel quad passes true into
// the function.
def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty],
- [llvm_i1_ty], [IntrNoMem, IntrConvergent]
+ [llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn]
>;
// If false, set EXEC=0 for the current thread until the end of program.
+// FIXME: Should this be IntrNoMem, IntrHasSideEffects, or IntrWillReturn?
def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], []>;
// Copies the active channels of the source value to the destination value,
@@ -1481,7 +1544,8 @@ def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], []>;
// enabled, with a few exceptions: - Phi nodes with require WWM return an
// undefined value.
def int_amdgcn_wwm : Intrinsic<[llvm_any_ty],
- [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrConvergent]
+ [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable,
+ IntrConvergent, IntrWillReturn]
>;
// Given a value, copies it while setting all the inactive lanes to a given
@@ -1492,18 +1556,18 @@ def int_amdgcn_set_inactive :
Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, // value to be copied
LLVMMatchType<0>], // value for the inactive lanes to take
- [IntrNoMem, IntrConvergent]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn]>;
// Return if the given flat pointer points to a local memory address.
def int_amdgcn_is_shared : GCCBuiltin<"__builtin_amdgcn_is_shared">,
Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
- [IntrNoMem, IntrSpeculatable, NoCapture<0>]
+ [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>, IntrWillReturn]
>;
// Return if the given flat pointer points to a prvate memory address.
def int_amdgcn_is_private : GCCBuiltin<"__builtin_amdgcn_is_private">,
Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
- [IntrNoMem, IntrSpeculatable, NoCapture<0>]
+ [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>, IntrWillReturn]
>;
//===----------------------------------------------------------------------===//
@@ -1512,11 +1576,11 @@ def int_amdgcn_is_private : GCCBuiltin<"__builtin_amdgcn_is_private">,
def int_amdgcn_s_dcache_inv_vol :
GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_buffer_wbinvl1_vol :
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
//===----------------------------------------------------------------------===//
// VI Intrinsics
@@ -1526,8 +1590,10 @@ def int_amdgcn_buffer_wbinvl1_vol :
def int_amdgcn_mov_dpp :
Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i1_ty], [IntrNoMem, IntrConvergent, ImmArg<1>,
- ImmArg<2>, ImmArg<3>, ImmArg<4>]>;
+ llvm_i1_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
// llvm.amdgcn.update.dpp.i32 <old> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
// Should be equivalent to:
@@ -1537,30 +1603,33 @@ def int_amdgcn_update_dpp :
Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i1_ty],
- [IntrNoMem, IntrConvergent,
- ImmArg<2>, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>,
+ ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_amdgcn_s_dcache_wb :
GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_s_dcache_wb_vol :
GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_s_memrealtime :
GCCBuiltin<"__builtin_amdgcn_s_memrealtime">,
- Intrinsic<[llvm_i64_ty]>;
+ Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>;
// llvm.amdgcn.ds.permute <index> <src>
def int_amdgcn_ds_permute :
GCCBuiltin<"__builtin_amdgcn_ds_permute">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn]>;
// llvm.amdgcn.ds.bpermute <index> <src>
def int_amdgcn_ds_bpermute :
GCCBuiltin<"__builtin_amdgcn_ds_bpermute">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn]>;
//===----------------------------------------------------------------------===//
// GFX10 Intrinsics
@@ -1570,13 +1639,15 @@ def int_amdgcn_ds_bpermute :
def int_amdgcn_permlane16 : GCCBuiltin<"__builtin_amdgcn_permlane16">,
Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
- [IntrNoMem, IntrConvergent, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
// llvm.amdgcn.permlanex16 <old> <src0> <src1> <src2> <fi> <bound_control>
def int_amdgcn_permlanex16 : GCCBuiltin<"__builtin_amdgcn_permlanex16">,
Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
- [IntrNoMem, IntrConvergent, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
// llvm.amdgcn.mov.dpp8.i32 <src> <sel>
// <sel> is a 32-bit constant whose high 8 bits must be zero which selects
@@ -1584,11 +1655,21 @@ def int_amdgcn_permlanex16 : GCCBuiltin<"__builtin_amdgcn_permlanex16">,
def int_amdgcn_mov_dpp8 :
Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrConvergent, ImmArg<1>]>;
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<1>>]>;
def int_amdgcn_s_get_waveid_in_workgroup :
GCCBuiltin<"__builtin_amdgcn_s_get_waveid_in_workgroup">,
- Intrinsic<[llvm_i32_ty], [], [IntrReadMem, IntrInaccessibleMemOnly]>;
+ Intrinsic<[llvm_i32_ty], [],
+ [IntrReadMem, IntrInaccessibleMemOnly, IntrWillReturn]>;
+
+class AMDGPUGlobalAtomicRtn<LLVMType vt> : Intrinsic <
+ [vt],
+ [llvm_anyptr_ty, // vaddr
+ vt], // vdata(VGPR)
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>], "", [SDNPMemOperand]>;
+
+def int_amdgcn_global_atomic_csub : AMDGPUGlobalAtomicRtn<llvm_i32_ty>;
//===----------------------------------------------------------------------===//
// Deep learning intrinsics.
@@ -1606,7 +1687,7 @@ def int_amdgcn_fdot2 :
llvm_float_ty, // %c
llvm_i1_ty // %clamp
],
- [IntrNoMem, IntrSpeculatable, ImmArg<3>]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<3>>]
>;
// i32 %r = llvm.amdgcn.sdot2(v2i16 %a, v2i16 %b, i32 %c, i1 %clamp)
@@ -1621,7 +1702,7 @@ def int_amdgcn_sdot2 :
llvm_i32_ty, // %c
llvm_i1_ty // %clamp
],
- [IntrNoMem, IntrSpeculatable, ImmArg<3>]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<3>>]
>;
// u32 %r = llvm.amdgcn.udot2(v2u16 %a, v2u16 %b, u32 %c, i1 %clamp)
@@ -1636,7 +1717,7 @@ def int_amdgcn_udot2 :
llvm_i32_ty, // %c
llvm_i1_ty // %clamp
],
- [IntrNoMem, IntrSpeculatable, ImmArg<3>]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<3>>]
>;
// i32 %r = llvm.amdgcn.sdot4(v4i8 (as i32) %a, v4i8 (as i32) %b, i32 %c, i1 %clamp)
@@ -1651,7 +1732,7 @@ def int_amdgcn_sdot4 :
llvm_i32_ty, // %c
llvm_i1_ty // %clamp
],
- [IntrNoMem, IntrSpeculatable, ImmArg<3>]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<3>>]
>;
// u32 %r = llvm.amdgcn.udot4(v4u8 (as u32) %a, v4u8 (as u32) %b, u32 %c, i1 %clamp)
@@ -1666,7 +1747,7 @@ def int_amdgcn_udot4 :
llvm_i32_ty, // %c
llvm_i1_ty // %clamp
],
- [IntrNoMem, IntrSpeculatable, ImmArg<3>]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<3>>]
>;
// i32 %r = llvm.amdgcn.sdot8(v8i4 (as i32) %a, v8i4 (as i32) %b, i32 %c, i1 %clamp)
@@ -1682,7 +1763,7 @@ def int_amdgcn_sdot8 :
llvm_i32_ty, // %c
llvm_i1_ty // %clamp
],
- [IntrNoMem, IntrSpeculatable, ImmArg<3>]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<3>>]
>;
// u32 %r = llvm.amdgcn.udot8(v8u4 (as u32) %a, v8u4 (as u32) %b, u32 %c, i1 %clamp)
@@ -1698,7 +1779,7 @@ def int_amdgcn_udot8 :
llvm_i32_ty, // %c
llvm_i1_ty // %clamp
],
- [IntrNoMem, IntrSpeculatable, ImmArg<3>]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<3>>]
>;
//===----------------------------------------------------------------------===//
@@ -1712,140 +1793,183 @@ class AMDGPUBufferAtomicNoRtn : Intrinsic <
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty], // slc(imm)
- [], "", [SDNPMemOperand]>,
+ [ImmArg<ArgIndex<4>>, IntrWillReturn], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1, 0>;
class AMDGPUGlobalAtomicNoRtn : Intrinsic <
[],
[llvm_anyptr_ty, // vaddr
llvm_anyfloat_ty], // vdata(VGPR)
- [IntrArgMemOnly, NoCapture<0>], "", [SDNPMemOperand]>;
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>], "",
+ [SDNPMemOperand]>;
def int_amdgcn_buffer_atomic_fadd : AMDGPUBufferAtomicNoRtn;
def int_amdgcn_global_atomic_fadd : AMDGPUGlobalAtomicNoRtn;
// llvm.amdgcn.mfma.f32.* vdst, srcA, srcB, srcC, cbsz, abid, blgp
-def int_amdgcn_mfma_f32_32x32x1f32 : Intrinsic<[llvm_v32f32_ty],
- [llvm_float_ty, llvm_float_ty, llvm_v32f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_16x16x1f32 : Intrinsic<[llvm_v16f32_ty],
- [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_4x4x1f32 : Intrinsic<[llvm_v4f32_ty],
- [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_32x32x2f32 : Intrinsic<[llvm_v16f32_ty],
- [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_16x16x4f32 : Intrinsic<[llvm_v4f32_ty],
- [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_32x32x4f16 : Intrinsic<[llvm_v32f32_ty],
- [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v32f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_16x16x4f16 : Intrinsic<[llvm_v16f32_ty],
- [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_4x4x4f16 : Intrinsic<[llvm_v4f32_ty],
- [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_32x32x8f16 : Intrinsic<[llvm_v16f32_ty],
- [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_16x16x16f16 : Intrinsic<[llvm_v4f32_ty],
- [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_i32_32x32x4i8 : Intrinsic<[llvm_v32i32_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_v32i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_i32_16x16x4i8 : Intrinsic<[llvm_v16i32_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_i32_4x4x4i8 : Intrinsic<[llvm_v4i32_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_i32_32x32x8i8 : Intrinsic<[llvm_v16i32_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_i32_16x16x16i8 : Intrinsic<[llvm_v4i32_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_32x32x2bf16 : Intrinsic<[llvm_v32f32_ty],
- [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v32f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_16x16x2bf16 : Intrinsic<[llvm_v16f32_ty],
- [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_4x4x2bf16 : Intrinsic<[llvm_v4f32_ty],
- [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_32x32x4bf16 : Intrinsic<[llvm_v16f32_ty],
- [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
-
-def int_amdgcn_mfma_f32_16x16x8bf16 : Intrinsic<[llvm_v4f32_ty],
- [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
+def int_amdgcn_mfma_f32_32x32x1f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x1f32">,
+ Intrinsic<[llvm_v32f32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_v32f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_16x16x1f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x1f32">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_4x4x1f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_4x4x1f32">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_32x32x2f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x2f32">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_16x16x4f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x4f32">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_32x32x4f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x4f16">,
+ Intrinsic<[llvm_v32f32_ty],
+ [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v32f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_16x16x4f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x4f16">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_4x4x4f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_4x4x4f16">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_32x32x8f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x8f16">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_16x16x16f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x16f16">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_i32_32x32x4i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_32x32x4i8">,
+ Intrinsic<[llvm_v32i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_v32i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_i32_16x16x4i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_16x16x4i8">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_i32_4x4x4i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_4x4x4i8">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_i32_32x32x8i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_32x32x8i8">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_i32_16x16x16i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_16x16x16i8">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_32x32x2bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x2bf16">,
+ Intrinsic<[llvm_v32f32_ty],
+ [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v32f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_16x16x2bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x2bf16">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_4x4x2bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_4x4x2bf16">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_32x32x4bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x4bf16">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+
+def int_amdgcn_mfma_f32_16x16x8bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x8bf16">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrConvergent, IntrNoMem, IntrWillReturn,
+ ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
//===----------------------------------------------------------------------===//
// Special Intrinsics for backend internal use only. No frontend
// should emit calls to these.
// ===----------------------------------------------------------------------===//
def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
- [llvm_i1_ty], [IntrConvergent]
+ [llvm_i1_ty], [IntrConvergent, IntrWillReturn]
>;
def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
- [llvm_anyint_ty], [IntrConvergent]
+ [llvm_anyint_ty], [IntrConvergent, IntrWillReturn]
>;
def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty],
- [llvm_i1_ty, llvm_anyint_ty], [IntrNoMem, IntrConvergent]
+ [llvm_i1_ty, LLVMMatchType<0>],
+ [IntrNoMem, IntrConvergent, IntrWillReturn]
>;
def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
- [llvm_anyint_ty], [IntrConvergent]
+ [llvm_anyint_ty], [IntrConvergent, IntrWillReturn]
>;
-def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty], [IntrConvergent]>;
+def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty],
+ [IntrConvergent, IntrWillReturn]>;
// Represent unreachable in a divergent region.
def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>;
@@ -1854,6 +1978,12 @@ def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>;
// pass based on !fpmath metadata.
def int_amdgcn_fdiv_fast : Intrinsic<
[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
- [IntrNoMem, IntrSpeculatable]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
+>;
+
+// Represent a relocation constant.
+def int_amdgcn_reloc_constant : Intrinsic<
+ [llvm_i32_ty], [llvm_metadata_ty],
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
}
diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index 518ad7079225..df74e446b965 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -19,7 +19,7 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
// A space-consuming intrinsic primarily for testing ARMConstantIslands. The
// first argument is the number of bytes this "instruction" takes up, the second
// and return value are essentially chains, used to force ordering during ISel.
-def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg<0>]>;
+def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
// 16-bit multiplications
def int_arm_smulbb : GCCBuiltin<"__builtin_arm_smulbb">,
@@ -262,59 +262,59 @@ def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
// Coprocessor
def int_arm_ldc : GCCBuiltin<"__builtin_arm_ldc">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_ldcl : GCCBuiltin<"__builtin_arm_ldcl">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_ldc2 : GCCBuiltin<"__builtin_arm_ldc2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_ldc2l : GCCBuiltin<"__builtin_arm_ldc2l">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_stc : GCCBuiltin<"__builtin_arm_stc">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_stcl : GCCBuiltin<"__builtin_arm_stcl">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_stc2 : GCCBuiltin<"__builtin_arm_stc2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_stc2l : GCCBuiltin<"__builtin_arm_stc2l">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<0>, ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
// Move to coprocessor
def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
// Move from coprocessor
def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
MSBuiltin<"_MoveFromCoprocessor">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<2>, ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
MSBuiltin<"_MoveFromCoprocessor2">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<2>, ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
// Coprocessor data processing
def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<2>, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<2>, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
// Move from two registers to coprocessor
def int_arm_mcrr : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_arm_mcrr2 : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_arm_mrrc : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<2>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
def int_arm_mrrc2 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, ImmArg<1>, ImmArg<2>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
//===----------------------------------------------------------------------===//
// CRC32
@@ -695,16 +695,16 @@ def int_arm_neon_vst4 : Intrinsic<[],
def int_arm_neon_vst1x2 : Intrinsic<[],
[llvm_anyptr_ty, llvm_anyvector_ty,
LLVMMatchType<1>],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_arm_neon_vst1x3 : Intrinsic<[],
[llvm_anyptr_ty, llvm_anyvector_ty,
LLVMMatchType<1>, LLVMMatchType<1>],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_arm_neon_vst1x4 : Intrinsic<[],
[llvm_anyptr_ty, llvm_anyvector_ty,
LLVMMatchType<1>, LLVMMatchType<1>,
LLVMMatchType<1>],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
// Vector store N-element structure from one lane.
// Source operands are: the address, the N vectors, the lane number, and
@@ -773,6 +773,33 @@ class Neon_Dot_Intrinsic
def int_arm_neon_udot : Neon_Dot_Intrinsic;
def int_arm_neon_sdot : Neon_Dot_Intrinsic;
+// v8.6-A Matrix Multiply Intrinsics
+class Neon_MatMul_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty,
+ LLVMMatchType<1>],
+ [IntrNoMem]>;
+def int_arm_neon_ummla : Neon_MatMul_Intrinsic;
+def int_arm_neon_smmla : Neon_MatMul_Intrinsic;
+def int_arm_neon_usmmla : Neon_MatMul_Intrinsic;
+def int_arm_neon_usdot : Neon_Dot_Intrinsic;
+
+// v8.6-A Bfloat Intrinsics
+def int_arm_neon_vcvtfp2bf
+ : Intrinsic<[llvm_anyvector_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_arm_neon_vcvtbfp2bf
+ : Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>;
+
+def int_arm_neon_bfdot : Neon_Dot_Intrinsic;
+def int_arm_neon_bfmmla : Neon_MatMul_Intrinsic;
+
+class Neon_FML_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+def int_arm_neon_bfmlalb : Neon_FML_Intrinsic;
+def int_arm_neon_bfmlalt : Neon_FML_Intrinsic;
+
def int_arm_cls: Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_cls64: Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
@@ -795,14 +822,8 @@ def int_arm_mve_pred_i2v : Intrinsic<
[llvm_anyvector_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_pred_v2i : Intrinsic<
[llvm_i32_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-multiclass IntrinsicSignSuffix<list<LLVMType> rets, list<LLVMType> params = [],
- list<IntrinsicProperty> props = [],
- string name = "",
- list<SDNodeProperty> sdprops = []> {
- def _s: Intrinsic<rets, params, props, name, sdprops>;
- def _u: Intrinsic<rets, params, props, name, sdprops>;
-}
+def int_arm_mve_vreinterpretq : Intrinsic<
+ [llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_min_predicated: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
@@ -876,11 +897,18 @@ def int_arm_mve_qsub_predicated: Intrinsic<[llvm_anyvector_ty],
def int_arm_mve_hsub_predicated: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
-
-defm int_arm_mve_minv: IntrinsicSignSuffix<[llvm_i32_ty],
- [llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
-defm int_arm_mve_maxv: IntrinsicSignSuffix<[llvm_i32_ty],
- [llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
+def int_arm_mve_vmina_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_mve_vmaxa_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_mve_vminnma_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_mve_vmaxnma_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
multiclass MVEPredicated<list<LLVMType> rets, list<LLVMType> params,
LLVMType pred = llvm_anyvector_ty,
@@ -897,8 +925,40 @@ multiclass MVEPredicatedM<list<LLVMType> rets, list<LLVMType> params,
LLVMMatchType<0>, rets[0])], props>;
}
+multiclass MVE_minmaxv {
+ defm v: MVEPredicated<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_anyvector_ty, llvm_i32_ty /* unsigned */]>;
+ defm av: MVEPredicated<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_anyvector_ty]>;
+ defm nmv: MVEPredicated<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty]>;
+ defm nmav: MVEPredicated<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty]>;
+}
+defm int_arm_mve_min: MVE_minmaxv;
+defm int_arm_mve_max: MVE_minmaxv;
+
+defm int_arm_mve_addv: MVEPredicated<[llvm_i32_ty],
+ [llvm_anyvector_ty, llvm_i32_ty /* unsigned */]>;
+defm int_arm_mve_addlv: MVEPredicated<[llvm_i64_ty],
+ [llvm_anyvector_ty, llvm_i32_ty /* unsigned */]>;
+
+// Intrinsic with a predicated and a non-predicated case. The predicated case
+// has two additional parameters: inactive (the value for inactive lanes, can
+// be undef) and predicate.
+multiclass MVEMXPredicated<list<LLVMType> rets, list<LLVMType> flags,
+ list<LLVMType> params, LLVMType inactive,
+ LLVMType predicate,
+ list<IntrinsicProperty> props = [IntrNoMem]> {
+ def "": Intrinsic<rets, flags # params, props>;
+ def _predicated: Intrinsic<rets, flags # [inactive] # params # [predicate],
+ props>;
+}
+
defm int_arm_mve_vcvt_narrow: MVEPredicated<[llvm_v8f16_ty],
[llvm_v8f16_ty, llvm_v4f32_ty, llvm_i32_ty], llvm_v4i1_ty>;
+defm int_arm_mve_vcvt_widen: MVEMXPredicated<[llvm_v4f32_ty], [],
+ [llvm_v8f16_ty, llvm_i32_ty], llvm_v4f32_ty, llvm_v4i1_ty>;
defm int_arm_mve_vldr_gather_base: MVEPredicated<
[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty],
@@ -992,10 +1052,25 @@ def int_arm_mve_vabd: Intrinsic<
def int_arm_mve_vadc: Intrinsic<
[llvm_anyvector_ty, llvm_i32_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_mve_vsbc: Intrinsic<
+ [llvm_anyvector_ty, llvm_i32_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vadc_predicated: Intrinsic<
[llvm_anyvector_ty, llvm_i32_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
+def int_arm_mve_vsbc_predicated: Intrinsic<
+ [llvm_anyvector_ty, llvm_i32_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
+def int_arm_mve_vshlc: Intrinsic<
+ [llvm_i32_ty /* bits shifted out */, llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty /* bits shifted in */,
+ llvm_i32_ty /* shift count */], [IntrNoMem]>;
+def int_arm_mve_vshlc_predicated: Intrinsic<
+ [llvm_i32_ty /* bits shifted out */, llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty /* bits shifted in */,
+ llvm_i32_ty /* shift count */, llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_vmulh: Intrinsic<
[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
@@ -1030,21 +1105,9 @@ def int_arm_mve_vmull_poly: Intrinsic<
[llvm_anyvector_ty],
[llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem]>;
-// Intrinsic with a predicated and a non-predicated case. The predicated case
-// has two additional parameters: inactive (the value for inactive lanes, can
-// be undef) and predicate.
-multiclass MVEMXPredicated<list<LLVMType> rets, list<LLVMType> flags,
- list<LLVMType> params, LLVMType inactive,
- LLVMType predicate,
- list<IntrinsicProperty> props = [IntrNoMem]> {
- def "": Intrinsic<rets, flags # params, props>;
- def _predicated: Intrinsic<rets, flags # [inactive] # params # [predicate],
- props>;
-}
-
// The first two parameters are compile-time constants:
// * Halving: 0 means halving (vhcaddq), 1 means non-halving (vcaddq)
-// instruction. Note: the flag is inverted to match the corresonding
+// instruction. Note: the flag is inverted to match the corresponding
// bit in the instruction encoding
// * Rotation angle: 0 mean 90 deg, 1 means 180 deg
defm int_arm_mve_vcaddq : MVEMXPredicated<
@@ -1068,12 +1131,11 @@ defm int_arm_mve_vcmlaq : MVEPredicated<
[llvm_i32_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
llvm_anyvector_ty>;
-def int_arm_mve_vld2q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem]>;
-def int_arm_mve_vld4q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem]>;
+def int_arm_mve_vld2q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_arm_mve_vld4q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
-def int_arm_mve_vst2q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem]>;
-def int_arm_mve_vst4q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem]
->;
+def int_arm_mve_vst2q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem, IntrArgMemOnly]>;
+def int_arm_mve_vst4q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem, IntrArgMemOnly]>;
// MVE vector absolute difference and accumulate across vector
// The first operand is an 'unsigned' flag. The remaining operands are:
@@ -1121,4 +1183,197 @@ defm int_arm_mve_vrmlldavha: MVEPredicated<
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
llvm_anyvector_ty>;
+
+defm int_arm_mve_vidup: MVEMXPredicated<
+ [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
+ [llvm_i32_ty /* base */, llvm_i32_ty /* step */],
+ LLVMMatchType<0>, llvm_anyvector_ty>;
+defm int_arm_mve_vddup: MVEMXPredicated<
+ [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
+ [llvm_i32_ty /* base */, llvm_i32_ty /* step */],
+ LLVMMatchType<0>, llvm_anyvector_ty>;
+defm int_arm_mve_viwdup: MVEMXPredicated<
+ [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
+ [llvm_i32_ty /* base */, llvm_i32_ty /* limit */, llvm_i32_ty /* step */],
+ LLVMMatchType<0>, llvm_anyvector_ty>;
+defm int_arm_mve_vdwdup: MVEMXPredicated<
+ [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
+ [llvm_i32_ty /* base */, llvm_i32_ty /* limit */, llvm_i32_ty /* step */],
+ LLVMMatchType<0>, llvm_anyvector_ty>;
+
+// Flags:
+// * unsigned
+defm int_arm_mve_vcvt_fix: MVEMXPredicated<
+ [llvm_anyvector_ty /* output */], [llvm_i32_ty],
+ [llvm_anyvector_ty /* input vector */, llvm_i32_ty /* scale */],
+ LLVMMatchType<0>, llvm_anyvector_ty>;
+
+def int_arm_mve_vcvt_fp_int_predicated: Intrinsic<
+ [llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty /* predicate */, LLVMMatchType<0> /* inactive */],
+ [IntrNoMem]>;
+
+foreach suffix = ["a","n","p","m"] in {
+ defm "int_arm_mve_vcvt"#suffix: MVEMXPredicated<
+ [llvm_anyvector_ty /* output */], [llvm_i32_ty /* unsigned */],
+ [llvm_anyvector_ty /* input */], LLVMMatchType<0>, llvm_anyvector_ty>;
+}
+
+def int_arm_mve_vrintn: Intrinsic<
+ [llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_vcls: Intrinsic<
+ [llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+defm int_arm_mve_vbrsr: MVEMXPredicated<
+ [llvm_anyvector_ty], [],
+ [LLVMMatchType<0>, llvm_i32_ty], LLVMMatchType<0>, llvm_anyvector_ty>;
+
+def int_arm_mve_vqdmull: Intrinsic<
+ [llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_mve_vqdmull_predicated: Intrinsic<
+ [llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+class MVESimpleUnaryPredicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_arm_mve_mvn_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_abs_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_neg_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_qabs_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_qneg_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_clz_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_cls_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_vrintz_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_vrintm_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_vrintp_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_vrinta_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_vrintx_predicated: MVESimpleUnaryPredicated;
+def int_arm_mve_vrintn_predicated: MVESimpleUnaryPredicated;
+
+def int_arm_mve_vrev_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty /* size to reverse */,
+ llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_arm_mve_vmovl_predicated: Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, llvm_i32_ty /* unsigned */, llvm_i32_ty /* top half */,
+ llvm_anyvector_ty /* predicate */, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_vmovn_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i32_ty /* top half */,
+ llvm_anyvector_ty /* predicate */], [IntrNoMem]>;
+
+def int_arm_mve_vqmovn: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty,
+ llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
+ llvm_i32_ty /* top half */], [IntrNoMem]>;
+def int_arm_mve_vqmovn_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty,
+ llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
+ llvm_i32_ty /* top half */, llvm_anyvector_ty /* pred */], [IntrNoMem]>;
+
+def int_arm_mve_fma_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
+ LLVMMatchType<0> /* addend */, llvm_anyvector_ty /* pred */], [IntrNoMem]>;
+def int_arm_mve_vmla_n_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* addend */,
+ llvm_i32_ty /* mult op #2 (scalar) */, llvm_anyvector_ty /* pred */],
+ [IntrNoMem]>;
+def int_arm_mve_vmlas_n_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
+ llvm_i32_ty /* addend (scalar) */, llvm_anyvector_ty /* pred */],
+ [IntrNoMem]>;
+
+defm int_arm_mve_vqdmlah: MVEPredicated<[llvm_anyvector_ty],
+ [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* addend */,
+ llvm_i32_ty /* mult op #2 (scalar) */]>;
+defm int_arm_mve_vqrdmlah: MVEPredicated<[llvm_anyvector_ty],
+ [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* addend */,
+ llvm_i32_ty /* mult op #2 (scalar) */]>;
+defm int_arm_mve_vqdmlash: MVEPredicated<[llvm_anyvector_ty],
+ [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
+ llvm_i32_ty /* addend (scalar) */]>;
+defm int_arm_mve_vqrdmlash: MVEPredicated<[llvm_anyvector_ty],
+ [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
+ llvm_i32_ty /* addend (scalar) */]>;
+
+defm int_arm_mve_vqdmlad: MVEPredicated<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty /* exchange */, llvm_i32_ty /* round */,
+ llvm_i32_ty /* subtract */]>;
+
+// CDE (Custom Datapath Extension)
+
+multiclass CDEGPRIntrinsics<list<LLVMType> args> {
+ def "" : Intrinsic<
+ [llvm_i32_ty],
+ !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
+ def a : Intrinsic<
+ [llvm_i32_ty],
+ !listconcat([llvm_i32_ty /* coproc */, llvm_i32_ty /* acc */], args,
+ [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
+
+ def d: Intrinsic<
+ [llvm_i32_ty /* lo */, llvm_i32_ty /* hi */],
+ !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
+ def da: Intrinsic<
+ [llvm_i32_ty /* lo */, llvm_i32_ty /* hi */],
+ !listconcat([llvm_i32_ty /* coproc */, llvm_i32_ty /* acc_lo */,
+ llvm_i32_ty /* acc_hi */], args, [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 3)>>]>;
+}
+
+defm int_arm_cde_cx1: CDEGPRIntrinsics<[]>;
+defm int_arm_cde_cx2: CDEGPRIntrinsics<[llvm_i32_ty]>;
+defm int_arm_cde_cx3: CDEGPRIntrinsics<[llvm_i32_ty, llvm_i32_ty]>;
+
+multiclass CDEVCXIntrinsics<list<LLVMType> args> {
+ def "" : Intrinsic<
+ [llvm_anyfloat_ty],
+ !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
+ def a : Intrinsic<
+ [llvm_anyfloat_ty],
+ !listconcat([llvm_i32_ty /* coproc */, LLVMMatchType<0> /* acc */],
+ args, [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
+}
+
+defm int_arm_cde_vcx1 : CDEVCXIntrinsics<[]>;
+defm int_arm_cde_vcx2 : CDEVCXIntrinsics<[LLVMMatchType<0>]>;
+defm int_arm_cde_vcx3 : CDEVCXIntrinsics<[LLVMMatchType<0>, LLVMMatchType<0>]>;
+
+multiclass CDEVCXVecIntrinsics<list<LLVMType> args> {
+ def "" : Intrinsic<
+ [llvm_v16i8_ty],
+ !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
+ def a : Intrinsic<
+ [llvm_v16i8_ty],
+ !listconcat([llvm_i32_ty /* coproc */, llvm_v16i8_ty /* acc */],
+ args, [llvm_i32_ty /* imm */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
+
+ def _predicated : Intrinsic<
+ [llvm_anyvector_ty],
+ !listconcat([llvm_i32_ty /* coproc */, LLVMMatchType<0> /* inactive */],
+ args, [llvm_i32_ty /* imm */, llvm_anyvector_ty /* mask */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
+ def a_predicated : Intrinsic<
+ [llvm_anyvector_ty],
+ !listconcat([llvm_i32_ty /* coproc */, LLVMMatchType<0> /* acc */],
+ args, [llvm_i32_ty /* imm */, llvm_anyvector_ty /* mask */]),
+ [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
+}
+
+defm int_arm_cde_vcx1q : CDEVCXVecIntrinsics<[]>;
+defm int_arm_cde_vcx2q : CDEVCXVecIntrinsics<[llvm_v16i8_ty]>;
+defm int_arm_cde_vcx3q : CDEVCXVecIntrinsics<[llvm_v16i8_ty, llvm_v16i8_ty]>;
+
} // end TargetPrefix
diff --git a/llvm/include/llvm/IR/IntrinsicsBPF.td b/llvm/include/llvm/IR/IntrinsicsBPF.td
index 3618cc6a4128..c4d35b2a0a88 100644
--- a/llvm/include/llvm/IR/IntrinsicsBPF.td
+++ b/llvm/include/llvm/IR/IntrinsicsBPF.td
@@ -22,5 +22,8 @@ let TargetPrefix = "bpf" in { // All intrinsics start with "llvm.bpf."
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]>;
def int_bpf_preserve_field_info : GCCBuiltin<"__builtin_bpf_preserve_field_info">,
Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i64_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+ def int_bpf_btf_type_id : GCCBuiltin<"__builtin_bpf_btf_type_id">,
+ Intrinsic<[llvm_i32_ty], [llvm_any_ty, llvm_any_ty, llvm_i64_ty],
+ [IntrNoMem]>;
}
diff --git a/llvm/include/llvm/IR/IntrinsicsHexagon.td b/llvm/include/llvm/IR/IntrinsicsHexagon.td
index 2abc1dc07ebd..fe16a361ba3d 100644
--- a/llvm/include/llvm/IR/IntrinsicsHexagon.td
+++ b/llvm/include/llvm/IR/IntrinsicsHexagon.td
@@ -51,19 +51,19 @@ class Hexagon_mem_memmemsisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
llvm_i32_ty, llvm_i32_ty],
- [IntrArgMemOnly, ImmArg<3>]>;
+ [IntrArgMemOnly, ImmArg<ArgIndex<3>>]>;
class Hexagon_mem_memsisisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty],
- [IntrWriteMem, ImmArg<3>]>;
+ [IntrWriteMem, ImmArg<ArgIndex<3>>]>;
class Hexagon_mem_memdisisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
llvm_i32_ty, llvm_i32_ty],
- [IntrWriteMem, ImmArg<3>]>;
+ [IntrWriteMem, ImmArg<ArgIndex<3>>]>;
//
// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
@@ -122,24 +122,8 @@ Hexagon_mem_memsisisi_Intrinsic<"circ_sthhi">;
def int_hexagon_circ_stb :
Hexagon_mem_memsisisi_Intrinsic<"circ_stb">;
-//
-// BUILTIN_INFO(HEXAGON.dcfetch_A,v_ftype_DI*,1)
-//
def int_hexagon_prefetch :
Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>;
-def int_hexagon_Y2_dccleana :
-Hexagon_Intrinsic<"HEXAGON_Y2_dccleana", [], [llvm_ptr_ty], []>;
-def int_hexagon_Y2_dccleaninva :
-Hexagon_Intrinsic<"HEXAGON_Y2_dccleaninva", [], [llvm_ptr_ty], []>;
-def int_hexagon_Y2_dcinva :
-Hexagon_Intrinsic<"HEXAGON_Y2_dcinva", [], [llvm_ptr_ty], []>;
-def int_hexagon_Y2_dczeroa :
-Hexagon_Intrinsic<"HEXAGON_Y2_dczeroa", [], [llvm_ptr_ty],
- [IntrWriteMem, IntrArgMemOnly, IntrHasSideEffects]>;
-def int_hexagon_Y4_l2fetch :
-Hexagon_Intrinsic<"HEXAGON_Y4_l2fetch", [], [llvm_ptr_ty, llvm_i32_ty], []>;
-def int_hexagon_Y5_l2fetch :
-Hexagon_Intrinsic<"HEXAGON_Y5_l2fetch", [], [llvm_ptr_ty, llvm_i64_ty], []>;
def llvm_ptr32_ty : LLVMPointerType<llvm_i32_ty>;
def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;
@@ -147,34 +131,34 @@ def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;
// Mark locked loads as read/write to prevent any accidental reordering.
def int_hexagon_L2_loadw_locked :
Hexagon_Intrinsic<"HEXAGON_L2_loadw_locked", [llvm_i32_ty], [llvm_ptr32_ty],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_L4_loadd_locked :
Hexagon_Intrinsic<"HEXAGON_L4_loadd_locked", [llvm_i64_ty], [llvm_ptr64_ty],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_S2_storew_locked :
Hexagon_Intrinsic<"HEXAGON_S2_storew_locked", [llvm_i32_ty],
- [llvm_ptr32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<0>]>;
+ [llvm_ptr32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_S4_stored_locked :
Hexagon_Intrinsic<"HEXAGON_S4_stored_locked", [llvm_i32_ty],
- [llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<0>]>;
+ [llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_vmemcpy : Hexagon_Intrinsic<"hexagon_vmemcpy",
[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>, ReadOnly<1>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>, WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>]>;
def int_hexagon_vmemset : Hexagon_Intrinsic<"hexagon_vmemset",
[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
multiclass Hexagon_custom_circ_ld_Intrinsic<LLVMType ElTy> {
def NAME#_pci : Hexagon_NonGCC_Intrinsic<
[ElTy, llvm_ptr_ty],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<3>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
[ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<2>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
}
defm int_hexagon_L2_loadrub : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
@@ -188,10 +172,10 @@ multiclass Hexagon_custom_circ_st_Intrinsic<LLVMType ElTy> {
def NAME#_pci : Hexagon_NonGCC_Intrinsic<
[llvm_ptr_ty],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<4>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<3>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
}
defm int_hexagon_S2_storerb : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
@@ -221,6157 +205,83 @@ def int_hexagon_S2_storerf_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
def int_hexagon_S2_storeri_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
def int_hexagon_S2_storerd_pbr : Hexagon_mem_memdisi_Intrinsic<"brev_std">;
-//
-// Masked vector stores
-//
-
-//
-// Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
-// tag: V6_vS32b_qpred_ai
-class Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v512i1_ty,llvm_ptr_ty,llvm_v16i32_ty],
- [IntrArgMemOnly]>;
-
-//
-// Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
-// tag: V6_vS32b_qpred_ai_128B
-class Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v1024i1_ty,llvm_ptr_ty,llvm_v32i32_ty],
- [IntrArgMemOnly]>;
-
-def int_hexagon_V6_vS32b_qpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai">;
-
-def int_hexagon_V6_vS32b_nqpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai">;
-
-def int_hexagon_V6_vS32b_nt_qpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai">;
-
-def int_hexagon_V6_vS32b_nt_nqpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai">;
-
-def int_hexagon_V6_vS32b_qpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai_128B">;
-
-def int_hexagon_V6_vS32b_nqpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai_128B">;
-
-def int_hexagon_V6_vS32b_nt_qpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai_128B">;
-
-def int_hexagon_V6_vS32b_nt_nqpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai_128B">;
-
-def int_hexagon_V6_vmaskedstoreq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstoreq">;
-
-def int_hexagon_V6_vmaskedstorenq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorenq">;
-
-def int_hexagon_V6_vmaskedstorentq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentq">;
-
-def int_hexagon_V6_vmaskedstorentnq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentnq">;
-
-def int_hexagon_V6_vmaskedstoreq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstoreq_128B">;
-
-def int_hexagon_V6_vmaskedstorenq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorenq_128B">;
-
-def int_hexagon_V6_vmaskedstorentq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentq_128B">;
-
-def int_hexagon_V6_vmaskedstorentnq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentnq_128B">;
-
-class Hexagon_V65_vvmemiiv512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
- llvm_v16i32_ty],
- [IntrArgMemOnly]>;
-
-class Hexagon_V65_vvmemiiv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
- llvm_v32i32_ty],
- [IntrArgMemOnly]>;
-
-class Hexagon_V65_vvmemiiv2048_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
- llvm_v64i32_ty],
- [IntrArgMemOnly]>;
-
-class Hexagon_V65_vvmemv64iiiv512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v16i32_ty],
- [IntrArgMemOnly]>;
-
-class Hexagon_V65_vvmemv128iiiv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty],
- [IntrArgMemOnly]>;
-
-class Hexagon_V65_vvmemv64iiiv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty],
- [IntrArgMemOnly]>;
-
-class Hexagon_V65_vvmemv128iiiv2048_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v64i32_ty],
- [IntrArgMemOnly]>;
-
-def int_hexagon_V6_vgathermw :
-Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermw">;
-
-def int_hexagon_V6_vgathermw_128B :
-Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermw_128B">;
-
-def int_hexagon_V6_vgathermh :
-Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermh">;
-
-def int_hexagon_V6_vgathermh_128B :
-Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermh_128B">;
-
-def int_hexagon_V6_vgathermhw :
-Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermhw">;
-
-def int_hexagon_V6_vgathermhw_128B :
-Hexagon_V65_vvmemiiv2048_Intrinsic<"HEXAGON_V6_vgathermhw_128B">;
-
-def int_hexagon_V6_vgathermwq :
-Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermwq">;
-
-def int_hexagon_V6_vgathermwq_128B :
-Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermwq_128B">;
-
-def int_hexagon_V6_vgathermhq :
-Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermhq">;
-
-def int_hexagon_V6_vgathermhq_128B :
-Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhq_128B">;
-
-def int_hexagon_V6_vgathermhwq :
-Hexagon_V65_vvmemv64iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhwq">;
-
-def int_hexagon_V6_vgathermhwq_128B :
-Hexagon_V65_vvmemv128iiiv2048_Intrinsic<"HEXAGON_V6_vgathermhwq_128B">;
-
-class Hexagon_V65_viiv512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_viiv1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_vv64iiiv512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v16i32_ty,
- llvm_v16i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_vv128iiiv1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty,
- llvm_v32i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_viiv1024v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v32i32_ty,llvm_v16i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_viiv2048v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v64i32_ty,llvm_v32i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_vv64iiiv1024v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty,
- llvm_v16i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_vv128iiiv2048v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v64i32_ty,
- llvm_v32i32_ty],
- [IntrWriteMem]>;
-
-class Hexagon_V65_v2048_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [],
- [IntrNoMem]>;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw
-def int_hexagon_V6_vscattermw :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw_128B
-def int_hexagon_V6_vscattermw_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh
-def int_hexagon_V6_vscattermh :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh_128B
-def int_hexagon_V6_vscattermh_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw_add,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw_add
-def int_hexagon_V6_vscattermw_add :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw_add">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw_add_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw_add_128B
-def int_hexagon_V6_vscattermw_add_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_add_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh_add,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh_add
-def int_hexagon_V6_vscattermh_add :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh_add">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh_add_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh_add_128B
-def int_hexagon_V6_vscattermh_add_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_add_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermwq,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermwq
-def int_hexagon_V6_vscattermwq :
-Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermwq">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermwq_128B,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermwq_128B
-def int_hexagon_V6_vscattermwq_128B :
-Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermwq_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhq,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermhq
-def int_hexagon_V6_vscattermhq :
-Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermhq">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhq_128B,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermhq_128B
-def int_hexagon_V6_vscattermhq_128B :
-Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermhq_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw
-def int_hexagon_V6_vscattermhw :
-Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw_128B,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw_128B
-def int_hexagon_V6_vscattermhw_128B :
-Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhwq,v_ftype_QVSISIVDVI,5)
-// tag : V6_vscattermhwq
-def int_hexagon_V6_vscattermhwq :
-Hexagon_V65_vv64iiiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhwq">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhwq_128B,v_ftype_QVSISIVDVI,5)
-// tag : V6_vscattermhwq_128B
-def int_hexagon_V6_vscattermhwq_128B :
-Hexagon_V65_vv128iiiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhwq_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw_add
-def int_hexagon_V6_vscattermhw_add :
-Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw_add">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add_128B,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw_add_128B
-def int_hexagon_V6_vscattermhw_add_128B :
-Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_add_128B">;
-
-// Auto-generated intrinsics
-
-// tag : S2_vsatwh
-class Hexagon_i32_i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vrmpybusv
-class Hexagon_v16i32_v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vrmpybusv
-class Hexagon_v32i32_v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vaslw_acc
-class Hexagon_v16i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vaslw_acc
-class Hexagon_v32i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vmux
-class Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vmux
-class Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : S2_tableidxd_goodsyntax
-class Hexagon_i32_i32i32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<3>]>;
-
-// tag : V6_vandnqrt_acc
-class Hexagon_v16i32_v16i32v512i1i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v512i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandnqrt_acc
-class Hexagon_v32i32_v32i32v1024i1i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v1024i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vrmpybusi
-class Hexagon_v32i32_v32i32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vrmpybusi
-class Hexagon_v64i32_v64i32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vsubb_dv
-class Hexagon_v64i32_v64i32v64i32_Intrinsic<string GCCIntSuffix, list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : M2_mpysu_up
-class Hexagon_i32_i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : M2_mpyud_acc_ll_s0
-class Hexagon_i64_i64i32i32_Intrinsic<string GCCIntSuffix, list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : S2_lsr_i_r_nac
-class Hexagon_i32_i32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : M2_cmpysc_s0
-class Hexagon_i64_i32i32_Intrinsic<string GCCIntSuffix, list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_lo
-class Hexagon_v16i32_v32i32_Intrinsic<string GCCIntSuffix, list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v32i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_lo
-class Hexagon_v32i32_v64i32_Intrinsic<string GCCIntSuffix, list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v64i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : S2_shuffoh
-class Hexagon_i64_i64i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : F2_sfmax
-class Hexagon_float_floatfloat_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty,llvm_float_ty],
- [IntrNoMem, Throws]>;
-
-// tag : A2_vabswsat
-class Hexagon_i64_i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag :
-class Hexagon_v32i32_v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_ldnp0
-class Hexagon_v16i32_i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_ldnp0
-class Hexagon_v32i32_i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vdmpyhb
-class Hexagon_v16i32_v16i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vdmpyhb
-class Hexagon_v32i32_v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : A4_vcmphgti
-class Hexagon_i32_i64i32_Intrinsic<string GCCIntSuffix, list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag :
-class Hexagon_v32i32_v16i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : S6_rol_i_p_or
-class Hexagon_i64_i64i64i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vgtuh_and
-class Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vgtuh_and
-class Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : A2_abssat
-class Hexagon_i32_i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : A2_vcmpwgtu
-class Hexagon_i32_i64i64_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vtmpybus_acc
-class Hexagon_v64i32_v64i32v64i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : F2_conv_df2uw_chop
-class Hexagon_i32_double_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_double_ty],
- [IntrNoMem]>;
-
-// tag : V6_pred_or
-class Hexagon_v512i1_v512i1v512i1_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v512i1_ty],
- [IntrNoMem]>;
-
-// tag : V6_pred_or
-class Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v1024i1_ty],
- [IntrNoMem]>;
-
-// tag : S2_asr_i_p_rnd_goodsyntax
-class Hexagon_i64_i64i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : F2_conv_w2df
-class Hexagon_double_i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vunpackuh
-class Hexagon_v32i32_v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vunpackuh
-class Hexagon_v64i32_v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vadduhw_acc
-class Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vadduhw_acc
-class Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : M2_vdmacs_s0
-class Hexagon_i64_i64i64i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vrmpybub_rtt_acc
-class Hexagon_v32i32_v32i32v16i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vrmpybub_rtt_acc
-class Hexagon_v64i32_v64i32v32i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_ldu0
-class Hexagon_v16i32_i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_ldu0
-class Hexagon_v32i32_i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : S4_extract_rp
-class Hexagon_i32_i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vdmpyhsuisat
-class Hexagon_v16i32_v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vdmpyhsuisat
-class Hexagon_v32i32_v64i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : A2_addsp
-class Hexagon_i64_i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_extractw
-class Hexagon_i32_v16i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_extractw
-class Hexagon_i32_v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vlutvwhi
-class Hexagon_v32i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vlutvwhi
-class Hexagon_v64i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vgtuh
-class Hexagon_v512i1_v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vgtuh
-class Hexagon_v1024i1_v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : F2_sffma_lib
-class Hexagon_float_floatfloatfloat_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty],
- [IntrNoMem, Throws]>;
-
-// tag : F2_conv_ud2df
-class Hexagon_double_i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : S2_vzxthw
-class Hexagon_i64_i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vtmpyhb
-class Hexagon_v64i32_v64i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vshufoeh
-class Hexagon_v32i32_v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vshufoeh
-class Hexagon_v64i32_v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vlut4
-class Hexagon_v16i32_v16i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vlut4
-class Hexagon_v32i32_v32i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag :
-class Hexagon_v16i32_v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : F2_conv_uw2sf
-class Hexagon_float_i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vswap
-class Hexagon_v32i32_v512i1v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vswap
-class Hexagon_v64i32_v1024i1v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandnqrt
-class Hexagon_v16i32_v512i1i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandnqrt
-class Hexagon_v32i32_v1024i1i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vmpyub
-class Hexagon_v64i32_v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : A5_ACS
-class Hexagon_i64i32_i64i64i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty,llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vunpackob
-class Hexagon_v32i32_v32i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vunpackob
-class Hexagon_v64i32_v64i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vmpyhsat_acc
-class Hexagon_v32i32_v32i32v16i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vmpyhsat_acc
-class Hexagon_v64i32_v64i32v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vaddcarrysat
-class Hexagon_v16i32_v16i32v16i32v512i1_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v512i1_ty],
- [IntrNoMem]>;
-
-// tag : V6_vaddcarrysat
-class Hexagon_v32i32_v32i32v32i32v1024i1_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v1024i1_ty],
- [IntrNoMem]>;
-
-// tag : V6_vlutvvb_oracc
-class Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vlutvvb_oracc
-class Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix, list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
// tag : V6_vrmpybub_rtt
-class Hexagon_v32i32_v16i32i64_Intrinsic<string GCCIntSuffix>
+class Hexagon_v32i32_v16i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
[IntrNoMem]>;
-// tag : V6_vrmpybub_rtt
-class Hexagon_v64i32_v32i32i64_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpybub_rtt_128B
+class Hexagon_v64i32_v32i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
[IntrNoMem]>;
-// tag : A4_addp_c
-class Hexagon_i64i32_i64i64i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty,llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vrsadubi_acc
-class Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vrsadubi_acc
-class Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : F2_conv_df2sf
-class Hexagon_float_double_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_double_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandvqv
-class Hexagon_v16i32_v512i1v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandvqv
-class Hexagon_v32i32_v1024i1v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : C2_vmux
-class Hexagon_i64_i32i64i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : F2_sfcmpeq
-class Hexagon_i32_floatfloat_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_float_ty,llvm_float_ty],
- [IntrNoMem, Throws]>;
-
-// tag : V6_vmpahhsat
-class Hexagon_v16i32_v16i32v16i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vmpahhsat
-class Hexagon_v32i32_v32i32v32i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandvrt
-class Hexagon_v512i1_v16i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandvrt
-class Hexagon_v1024i1_v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vsubcarry
-class Hexagon_custom_v16i32v512i1_v16i32v16i32v512i1_Intrinsic
- : Hexagon_NonGCC_Intrinsic<
- [llvm_v16i32_ty,llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v512i1_ty],
- [IntrNoMem]>;
-
-// tag : V6_vsubcarry
-class Hexagon_custom_v32i32v1024i1_v32i32v32i32v1024i1_Intrinsic_128B
- : Hexagon_NonGCC_Intrinsic<
- [llvm_v32i32_ty,llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v1024i1_ty],
- [IntrNoMem]>;
-
-// tag : F2_sffixupr
-class Hexagon_float_float_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty],
- [IntrNoMem, Throws]>;
-
-// tag : V6_vandvrt_acc
-class Hexagon_v512i1_v512i1v16i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vandvrt_acc
-class Hexagon_v1024i1_v1024i1v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : F2_dfsub
-class Hexagon_double_doubledouble_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_double_ty,llvm_double_ty],
- [IntrNoMem, Throws]>;
-
-// tag : V6_vmpyowh_sacc
-class Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vmpyowh_sacc
-class Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : S2_insertp
-class Hexagon_i64_i64i64i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : F2_sfinvsqrta
-class Hexagon_floati32_float_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty,llvm_i32_ty], [llvm_float_ty],
- [IntrNoMem, Throws]>;
-
-// tag : V6_vtran2x2_map
-class Hexagon_v16i32v16i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty,llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vtran2x2_map
-class Hexagon_v32i32v32i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty,llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vlutvwh_oracc
-class Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : V6_vlutvwh_oracc
-class Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- !listconcat([IntrNoMem], intr_properties)>;
-
-// tag : F2_dfcmpge
-class Hexagon_i32_doubledouble_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_double_ty,llvm_double_ty],
- [IntrNoMem, Throws]>;
-
-// tag : F2_conv_df2d_chop
-class Hexagon_i64_double_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_double_ty],
- [IntrNoMem]>;
-
-// tag : F2_conv_sf2w
-class Hexagon_i32_float_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_float_ty],
- [IntrNoMem]>;
-
-// tag : F2_sfclass
-class Hexagon_i32_floati32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_float_ty,llvm_i32_ty],
- [IntrNoMem, Throws, ImmArg<1>]>;
-
-// tag : F2_conv_sf2ud_chop
-class Hexagon_i64_float_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_float_ty],
- [IntrNoMem]>;
-
-// tag : V6_pred_scalar2v2
-class Hexagon_v512i1_i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_pred_scalar2v2
-class Hexagon_v1024i1_i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : F2_sfrecipa
-class Hexagon_floati32_floatfloat_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty,llvm_i32_ty], [llvm_float_ty,llvm_float_ty],
- [IntrNoMem, Throws]>;
-
-// tag : V6_vprefixqh
-class Hexagon_v16i32_v512i1_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty],
- [IntrNoMem]>;
-
-// tag : V6_vprefixqh
-class Hexagon_v32i32_v1024i1_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty],
- [IntrNoMem]>;
-
-// tag : V6_vdmpyhisat_acc
-class Hexagon_v16i32_v16i32v32i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_vdmpyhisat_acc
-class Hexagon_v32i32_v32i32v64i32i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
-// tag : F2_conv_ud2sf
-class Hexagon_float_i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : F2_conv_sf2df
-class Hexagon_double_float_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_float_ty],
- [IntrNoMem]>;
-
-// tag : F2_sffma_sc
-class Hexagon_float_floatfloatfloati32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty,llvm_i32_ty],
- [IntrNoMem, Throws]>;
-
-// tag : F2_dfclass
-class Hexagon_i32_doublei32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = []>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_double_ty,llvm_i32_ty],
- !listconcat([IntrNoMem, Throws], intr_properties)>;
-
-// tag : V6_vd0
-class Hexagon_v16i32__Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [],
- [IntrNoMem]>;
-
-// tag : V6_vd0
-class Hexagon_v32i32__Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [],
- [IntrNoMem]>;
-
-// tag : V6_vdd0
-class Hexagon_v64i32__Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [],
- [IntrNoMem]>;
-
-// tag : S2_insert_rp
-class Hexagon_i32_i32i32i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_vassignp
-class Hexagon_v64i32_v64i32_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty],
- [IntrNoMem]>;
-
-// tag : A6_vminub_RdP
-class Hexagon_i64i32_i64i64_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty,llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
- [IntrNoMem]>;
-
-// tag : V6_pred_not
-class Hexagon_v512i1_v512i1_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpybub_rtt_acc
+class Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty],
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i64_ty],
[IntrNoMem]>;
-// tag : V6_pred_not
-class Hexagon_v1024i1_v1024i1_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpybub_rtt_acc_128B
+class Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty],
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i64_ty],
[IntrNoMem]>;
-// V5 Scalar Instructions.
-
-def int_hexagon_S2_asr_r_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_or">;
-
-def int_hexagon_S2_vsatwh :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwh">;
-
-def int_hexagon_S2_tableidxd_goodsyntax :
-Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax">;
-
-def int_hexagon_M2_mpysu_up :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysu_up">;
-
-def int_hexagon_M2_mpyud_acc_ll_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;
-
-def int_hexagon_M2_mpyud_acc_ll_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;
-
-def int_hexagon_M2_cmpysc_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s1">;
-
-def int_hexagon_M2_cmpysc_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s0">;
-
-def int_hexagon_M4_cmpyi_whc :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_whc">;
-
-def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;
-
-def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;
-
-def int_hexagon_S2_tableidxb_goodsyntax :
-Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax">;
-
-def int_hexagon_S2_shuffoh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffoh">;
-
-def int_hexagon_F2_sfmax :
-Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmax">;
-
-def int_hexagon_A2_vabswsat :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabswsat">;
-
-def int_hexagon_S2_asr_i_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r", [ImmArg<1>]>;
-
-def int_hexagon_S2_asr_i_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p", [ImmArg<1>]>;
-
-def int_hexagon_A4_combineri :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineri", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpy_nac_sat_hl_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;
-
-def int_hexagon_M4_vpmpyh_acc :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;
-
-def int_hexagon_M2_vcmpy_s0_sat_i :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;
-
-def int_hexagon_A2_notp :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_notp">;
-
-def int_hexagon_M2_mpy_hl_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;
-
-def int_hexagon_M2_mpy_hl_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;
-
-def int_hexagon_C4_or_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_and">;
-
-def int_hexagon_M2_vmac2s_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s0">;
-
-def int_hexagon_M2_vmac2s_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s1">;
-
-def int_hexagon_S2_brevp :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_brevp">;
-
-def int_hexagon_M4_pmpyw_acc :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_pmpyw_acc">;
-
-def int_hexagon_S2_cl1 :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl1">;
-
-def int_hexagon_C4_cmplte :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplte">;
-
-def int_hexagon_M2_mmpyul_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s0">;
-
-def int_hexagon_A2_vaddws :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddws">;
-
-def int_hexagon_A2_maxup :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxup">;
-
-def int_hexagon_A4_vcmphgti :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgti", [ImmArg<1>]>;
-
-def int_hexagon_S2_interleave :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_interleave">;
-
-def int_hexagon_M2_vrcmpyi_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;
-
-def int_hexagon_A2_abssat :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abssat">;
-
-def int_hexagon_A2_vcmpwgtu :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgtu">;
-
-def int_hexagon_C2_cmpgtu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtu">;
-
-def int_hexagon_C2_cmpgtp :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtp">;
-
-def int_hexagon_A4_cmphgtui :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtui", [ImmArg<1>]>;
-
-def int_hexagon_C2_cmpgti :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgti", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpyi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyi">;
-
-def int_hexagon_F2_conv_df2uw_chop :
-Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;
-
-def int_hexagon_A4_cmpheq :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheq">;
-
-def int_hexagon_M2_mpy_lh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;
-
-def int_hexagon_M2_mpy_lh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;
-
-def int_hexagon_S2_lsr_i_r_xacc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc", [ImmArg<2>]>;
-
-def int_hexagon_S2_vrcnegh :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vrcnegh">;
-
-def int_hexagon_S2_extractup :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S2_extractup", [ImmArg<1>, ImmArg<2>]>;
-
-def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax", [ImmArg<1>]>;
-
-def int_hexagon_S4_ntstbit_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_r">;
-
-def int_hexagon_F2_conv_w2sf :
-Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_w2sf">;
-
-def int_hexagon_C2_not :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_not">;
-
-def int_hexagon_C2_tfrpr :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrpr">;
-
-def int_hexagon_M2_mpy_ll_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;
-
-def int_hexagon_M2_mpy_ll_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;
-
-def int_hexagon_A4_cmpbgt :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgt">;
-
-def int_hexagon_S2_asr_r_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_and">;
-
-def int_hexagon_A4_rcmpneqi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneqi", [ImmArg<1>]>;
-
-def int_hexagon_S2_asl_i_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_nac", [ImmArg<2>]>;
-
-def int_hexagon_M2_subacc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_subacc">;
-
-def int_hexagon_A2_orp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_orp">;
-
-def int_hexagon_M2_mpyu_up :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_up">;
-
-def int_hexagon_M2_mpy_acc_sat_lh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;
-
-def int_hexagon_S2_asr_i_vh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vh", [ImmArg<1>]>;
-
-def int_hexagon_S2_asr_i_vw :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vw", [ImmArg<1>]>;
-
-def int_hexagon_A4_cmpbgtu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtu">;
-
-def int_hexagon_A4_vcmpbeq_any :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;
-
-def int_hexagon_A4_cmpbgti :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgti", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpyd_lh_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;
-
-def int_hexagon_S2_asl_r_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;
-
-def int_hexagon_S2_lsr_i_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_nac", [ImmArg<2>]>;
-
-def int_hexagon_A2_addsp :
-Hexagon_i64_i32i64_Intrinsic<"HEXAGON_A2_addsp">;
-
-def int_hexagon_S4_vxsubaddw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddw">;
-
-def int_hexagon_A4_vcmpheqi :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpheqi", [ImmArg<1>]>;
-
-def int_hexagon_S4_vxsubaddh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddh">;
-
-def int_hexagon_M4_pmpyw :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_pmpyw">;
-
-def int_hexagon_S2_vsathb :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathb">;
-
-def int_hexagon_S2_asr_r_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_and">;
-
-def int_hexagon_M2_mpyu_acc_lh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;
-
-def int_hexagon_M2_mpyu_acc_lh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;
-
-def int_hexagon_S2_lsl_r_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;
-
-def int_hexagon_A2_pxorf :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_A2_pxorf">;
-
-def int_hexagon_C2_cmpgei :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgei", [ImmArg<1>]>;
-
-def int_hexagon_A2_vsubub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubub">;
-
-def int_hexagon_S2_asl_i_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_p", [ImmArg<1>]>;
-
-def int_hexagon_S2_asl_i_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r", [ImmArg<1>]>;
-
-def int_hexagon_A4_vrminuw :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuw">;
-
-def int_hexagon_F2_sffma :
-Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma">;
-
-def int_hexagon_A2_absp :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_absp">;
-
-def int_hexagon_C2_all8 :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_all8">;
-
-def int_hexagon_A4_vrminuh :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuh">;
-
-def int_hexagon_F2_sffma_lib :
-Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma_lib">;
-
-def int_hexagon_M4_vrmpyoh_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;
-
-def int_hexagon_M4_vrmpyoh_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;
-
-def int_hexagon_C2_bitsset :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsset">;
-
-def int_hexagon_M2_mpysip :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysip", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpysin :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysin", [ImmArg<1>]>;
-
-def int_hexagon_A4_boundscheck :
-Hexagon_i32_i32i64_Intrinsic<"HEXAGON_A4_boundscheck">;
-
-def int_hexagon_M5_vrmpybuu :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybuu">;
-
-def int_hexagon_C4_fastcorner9 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9">;
-
-def int_hexagon_M2_vrcmpys_s1rp :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;
-
-def int_hexagon_A2_neg :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_neg">;
-
-def int_hexagon_A2_subsat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subsat">;
-
-def int_hexagon_S2_asl_r_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r">;
-
-def int_hexagon_S2_asl_r_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_p">;
-
-def int_hexagon_A2_vnavgh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgh">;
-
-def int_hexagon_M2_mpy_nac_sat_hl_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;
-
-def int_hexagon_F2_conv_ud2df :
-Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_ud2df">;
-
-def int_hexagon_A2_vnavgw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgw">;
-
-def int_hexagon_S2_asl_i_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_acc", [ImmArg<2>]>;
-
-def int_hexagon_S4_subi_lsr_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_lsr_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_S2_vzxthw :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxthw">;
-
-def int_hexagon_F2_sfadd :
-Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfadd">;
-
-def int_hexagon_A2_sub :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_sub">;
-
-def int_hexagon_M2_vmac2su_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s0">;
-
-def int_hexagon_M2_vmac2su_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s1">;
-
-def int_hexagon_M2_dpmpyss_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
-
-def int_hexagon_S2_insert :
-Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_insert">;
-
-def int_hexagon_S2_packhl :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_S2_packhl">;
-
-def int_hexagon_A4_vcmpwgti :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgti", [ImmArg<1>]>;
-
-def int_hexagon_A2_vavguwr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguwr">;
-
-def int_hexagon_S2_asl_r_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_and">;
-
-def int_hexagon_A2_svsubhs :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubhs">;
-
-def int_hexagon_A2_addh_l16_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_hl">;
-
-def int_hexagon_M4_and_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_and">;
-
-def int_hexagon_F2_conv_d2df :
-Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_d2df">;
-
-def int_hexagon_C2_cmpgtui :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtui", [ImmArg<1>]>;
-
-def int_hexagon_A2_vconj :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vconj">;
-
-def int_hexagon_S2_lsr_r_vw :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vw">;
-
-def int_hexagon_S2_lsr_r_vh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vh">;
-
-def int_hexagon_A2_subh_l16_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_hl">;
-
-def int_hexagon_S4_vxsubaddhr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddhr">;
-
-def int_hexagon_S2_clbp :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_clbp">;
-
-def int_hexagon_S2_deinterleave :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_deinterleave">;
-
-def int_hexagon_C2_any8 :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_any8">;
-
-def int_hexagon_S2_togglebit_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_r">;
-
-def int_hexagon_S2_togglebit_i :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_i", [ImmArg<1>]>;
-
-def int_hexagon_F2_conv_uw2sf :
-Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_uw2sf">;
-
-def int_hexagon_S2_vsathb_nopack :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathb_nopack">;
-
-def int_hexagon_M2_cmacs_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s0">;
-
-def int_hexagon_M2_cmacs_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s1">;
-
-def int_hexagon_M2_mpy_sat_hh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;
-
-def int_hexagon_M2_mpy_sat_hh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;
-
-def int_hexagon_M2_mmacuhs_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;
-
-def int_hexagon_M2_mmacuhs_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;
-
-def int_hexagon_S2_clrbit_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_r">;
-
-def int_hexagon_C4_or_andn :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_andn">;
-
-def int_hexagon_S2_asl_r_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;
-
-def int_hexagon_S2_asl_i_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_acc", [ImmArg<2>]>;
-
-def int_hexagon_A4_vcmpwgtui :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgtui", [ImmArg<1>]>;
-
-def int_hexagon_M4_vrmpyoh_acc_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;
-
-def int_hexagon_M4_vrmpyoh_acc_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;
-
-def int_hexagon_A4_vrmaxh :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxh">;
-
-def int_hexagon_A2_vcmpbeq :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbeq">;
-
-def int_hexagon_A2_vcmphgt :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgt">;
-
-def int_hexagon_A2_vnavgwcr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwcr">;
-
-def int_hexagon_M2_vrcmacr_s0c :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;
-
-def int_hexagon_A2_vavgwcr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwcr">;
-
-def int_hexagon_S2_asl_i_p_xacc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_xacc", [ImmArg<2>]>;
-
-def int_hexagon_A4_vrmaxw :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxw">;
-
-def int_hexagon_A2_vnavghr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghr">;
-
-def int_hexagon_M4_cmpyi_wh :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_wh">;
-
-def int_hexagon_A2_tfrsi :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrsi", [ImmArg<0>]>;
-
-def int_hexagon_S2_asr_i_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_acc", [ImmArg<2>]>;
-
-def int_hexagon_A2_svnavgh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svnavgh">;
-
-def int_hexagon_S2_lsr_i_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r", [ImmArg<1>]>;
-
-def int_hexagon_M2_vmac2 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2">;
-
-def int_hexagon_A4_vcmphgtui :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgtui", [ImmArg<1>]>;
-
-def int_hexagon_A2_svavgh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavgh">;
-
-def int_hexagon_M4_vrmpyeh_acc_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;
-
-def int_hexagon_M4_vrmpyeh_acc_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;
-
-def int_hexagon_S2_lsr_i_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p", [ImmArg<1>]>;
-
-def int_hexagon_A2_combine_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hl">;
-
-def int_hexagon_M2_mpy_up :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up">;
-
-def int_hexagon_A2_combine_hh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hh">;
-
-def int_hexagon_A2_negsat :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_negsat">;
-
-def int_hexagon_M2_mpyd_hl_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;
-
-def int_hexagon_M2_mpyd_hl_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;
-
-def int_hexagon_A4_bitsplit :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitsplit">;
-
-def int_hexagon_A2_vabshsat :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabshsat">;
-
-def int_hexagon_M2_mpyui :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyui">;
-
-def int_hexagon_A2_addh_l16_sat_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
-
-def int_hexagon_S2_lsl_r_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;
-
-def int_hexagon_M2_mmpyul_rs0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;
-
-def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax", [ImmArg<1>]>;
-
-def int_hexagon_S2_lsr_r_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;
-
-def int_hexagon_C2_cmplt :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmplt">;
-
-def int_hexagon_M2_cmacr_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacr_s0">;
-
-def int_hexagon_M4_or_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_and">;
-
-def int_hexagon_M4_mpyrr_addi :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addi", [ImmArg<0>]>;
-
-def int_hexagon_S4_or_andi :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andi", [ImmArg<2>]>;
-
-def int_hexagon_M2_mpy_sat_hl_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;
-
-def int_hexagon_M2_mpy_sat_hl_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;
-
-def int_hexagon_M4_mpyrr_addr :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addr">;
-
-def int_hexagon_M2_mmachs_rs0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs0">;
-
-def int_hexagon_M2_mmachs_rs1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs1">;
-
-def int_hexagon_M2_vrcmpyr_s0c :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;
-
-def int_hexagon_M2_mpy_acc_sat_hl_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;
-
-def int_hexagon_M2_mpyd_acc_ll_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;
-
-def int_hexagon_F2_sffixupn :
-Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupn">;
-
-def int_hexagon_M2_mpyd_acc_lh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;
-
-def int_hexagon_M2_mpyd_acc_lh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;
-
-def int_hexagon_M2_mpy_rnd_hh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;
-
-def int_hexagon_M2_mpy_rnd_hh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;
-
-def int_hexagon_A2_vadduhs :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vadduhs">;
-
-def int_hexagon_A2_vsubuhs :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubuhs">;
-
-def int_hexagon_A2_subh_h16_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hl">;
-
-def int_hexagon_A2_subh_h16_hh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
-
-def int_hexagon_A2_xorp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_xorp">;
-
-def int_hexagon_A4_tfrpcp :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A4_tfrpcp">;
-
-def int_hexagon_A2_addh_h16_lh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_lh">;
-
-def int_hexagon_A2_addh_h16_sat_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;
-
-def int_hexagon_A2_addh_h16_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_ll">;
-
-def int_hexagon_A2_addh_h16_sat_hh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;
-
-def int_hexagon_A2_zxtb :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxtb">;
-
-def int_hexagon_A2_zxth :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxth">;
-
-def int_hexagon_A2_vnavgwr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwr">;
-
-def int_hexagon_M4_or_xor :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_xor">;
-
-def int_hexagon_M2_mpyud_acc_hh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;
-
-def int_hexagon_M2_mpyud_acc_hh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;
-
-def int_hexagon_M5_vmacbsu :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbsu">;
-
-def int_hexagon_M2_dpmpyuu_acc_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
-
-def int_hexagon_M2_mpy_rnd_hl_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;
-
-def int_hexagon_M2_mpy_rnd_hl_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;
-
-def int_hexagon_F2_sffms_lib :
-Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms_lib">;
-
-def int_hexagon_C4_cmpneqi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneqi", [ImmArg<1>]>;
-
-def int_hexagon_M4_and_xor :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_xor">;
-
-def int_hexagon_A2_sat :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_sat">;
-
-def int_hexagon_M2_mpyd_nac_lh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;
-
-def int_hexagon_M2_mpyd_nac_lh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;
-
-def int_hexagon_A2_addsat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addsat">;
-
-def int_hexagon_A2_svavghs :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavghs">;
-
-def int_hexagon_A2_vrsadub_acc :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vrsadub_acc">;
-
-def int_hexagon_C2_bitsclri :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclri", [ImmArg<1>]>;
-
-def int_hexagon_A2_subh_h16_sat_hh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;
-
-def int_hexagon_A2_subh_h16_sat_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;
-
-def int_hexagon_M2_mmaculs_rs0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;
-
-def int_hexagon_M2_mmaculs_rs1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;
-
-def int_hexagon_M2_vradduh :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vradduh">;
-
-def int_hexagon_A4_addp_c :
-Hexagon_i64i32_i64i64i32_Intrinsic<"HEXAGON_A4_addp_c">;
-
-def int_hexagon_C2_xor :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_xor">;
-
-def int_hexagon_S2_lsl_r_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;
-
-def int_hexagon_M2_mmpyh_rs1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;
-
-def int_hexagon_M2_mmpyh_rs0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;
-
-def int_hexagon_F2_conv_df2ud_chop :
-Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;
-
-def int_hexagon_C4_or_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_or">;
-
-def int_hexagon_S4_vxaddsubhr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubhr">;
-
-def int_hexagon_S2_vsathub :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathub">;
-
-def int_hexagon_F2_conv_df2sf :
-Hexagon_float_double_Intrinsic<"HEXAGON_F2_conv_df2sf">;
-
-def int_hexagon_M2_hmmpyh_rs1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
-
-def int_hexagon_M2_hmmpyh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;
-
-def int_hexagon_A2_vavgwr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwr">;
-
-def int_hexagon_S2_tableidxh_goodsyntax :
-Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax">;
-
-def int_hexagon_A2_sxth :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxth">;
-
-def int_hexagon_A2_sxtb :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxtb">;
-
-def int_hexagon_C4_or_orn :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_orn">;
-
-def int_hexagon_M2_vrcmaci_s0c :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;
-
-def int_hexagon_A2_sxtw :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_sxtw">;
-
-def int_hexagon_M2_vabsdiffh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffh">;
-
-def int_hexagon_M2_mpy_acc_lh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;
-
-def int_hexagon_M2_mpy_acc_lh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;
-
-def int_hexagon_M2_hmmpyl_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;
-
-def int_hexagon_S2_cl1p :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl1p">;
-
-def int_hexagon_M2_vabsdiffw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffw">;
-
-def int_hexagon_A4_andnp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_andnp">;
-
-def int_hexagon_C2_vmux :
-Hexagon_i64_i32i64i64_Intrinsic<"HEXAGON_C2_vmux">;
-
-def int_hexagon_S2_parityp :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_S2_parityp">;
-
-def int_hexagon_S2_lsr_i_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_and", [ImmArg<2>]>;
-
-def int_hexagon_S2_asr_i_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_or", [ImmArg<2>]>;
-
-def int_hexagon_M2_mpyu_nac_ll_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;
-
-def int_hexagon_M2_mpyu_nac_ll_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;
-
-def int_hexagon_F2_sfcmpeq :
-Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpeq">;
-
-def int_hexagon_A2_vaddb_map :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddb_map">;
-
-def int_hexagon_S2_lsr_r_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;
-
-def int_hexagon_A2_vcmpheq :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpheq">;
-
-def int_hexagon_S2_clbnorm :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clbnorm">;
-
-def int_hexagon_M2_cnacsc_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s1">;
-
-def int_hexagon_M2_cnacsc_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s0">;
-
-def int_hexagon_S4_subaddi :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subaddi", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpyud_nac_hl_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;
-
-def int_hexagon_M2_mpyud_nac_hl_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;
-
-def int_hexagon_S5_vasrhrnd_goodsyntax :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax", [ImmArg<1>]>;
-
-def int_hexagon_S2_tstbit_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_r">;
-
-def int_hexagon_S4_vrcrotate :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate", [ImmArg<2>]>;
-
-def int_hexagon_M2_mmachs_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s1">;
-
-def int_hexagon_M2_mmachs_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s0">;
-
-def int_hexagon_S2_tstbit_i :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_i", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpy_up_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1">;
-
-def int_hexagon_S2_extractu_rp :
-Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S2_extractu_rp">;
-
-def int_hexagon_M2_mmpyuh_rs0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;
-
-def int_hexagon_S2_lsr_i_vw :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vw", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpy_rnd_ll_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;
-
-def int_hexagon_M2_mpy_rnd_ll_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;
-
-def int_hexagon_M4_or_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_or">;
-
-def int_hexagon_M2_mpyu_hh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;
-
-def int_hexagon_M2_mpyu_hh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;
-
-def int_hexagon_S2_asl_r_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;
-
-def int_hexagon_M2_mpyu_nac_lh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;
-
-def int_hexagon_M2_mpyu_nac_lh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;
-
-def int_hexagon_M2_mpy_sat_ll_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;
-
-def int_hexagon_M2_mpy_sat_ll_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;
-
-def int_hexagon_F2_conv_w2df :
-Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_w2df">;
-
-def int_hexagon_A2_subh_l16_sat_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;
-
-def int_hexagon_C2_cmpeqi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeqi", [ImmArg<1>]>;
-
-def int_hexagon_S2_asl_i_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_and", [ImmArg<2>]>;
-
-def int_hexagon_S2_vcnegh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcnegh">;
-
-def int_hexagon_A4_vcmpweqi :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpweqi", [ImmArg<1>]>;
-
-def int_hexagon_M2_vdmpyrs_s0 :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;
-
-def int_hexagon_M2_vdmpyrs_s1 :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;
-
-def int_hexagon_M4_xor_xacc :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_xor_xacc">;
-
-def int_hexagon_M2_vdmpys_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s1">;
-
-def int_hexagon_M2_vdmpys_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s0">;
-
-def int_hexagon_A2_vavgubr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgubr">;
-
-def int_hexagon_M2_mpyu_hl_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;
-
-def int_hexagon_M2_mpyu_hl_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;
-
-def int_hexagon_S2_asl_r_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;
-
-def int_hexagon_S2_cl0p :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl0p">;
-
-def int_hexagon_S2_valignib :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignib", [ImmArg<2>]>;
-
-def int_hexagon_F2_sffixupd :
-Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupd">;
-
-def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;
-
-def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;
-
-def int_hexagon_M2_cmacsc_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s0">;
-
-def int_hexagon_M2_cmacsc_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s1">;
-
-def int_hexagon_S2_ct1 :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct1">;
-
-def int_hexagon_S2_ct0 :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct0">;
-
-def int_hexagon_M2_dpmpyuu_nac_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
-
-def int_hexagon_M2_mmpyul_rs1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;
-
-def int_hexagon_S4_ntstbit_i :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_i", [ImmArg<1>]> ;
-
-def int_hexagon_F2_sffixupr :
-Hexagon_float_float_Intrinsic<"HEXAGON_F2_sffixupr">;
-
-def int_hexagon_S2_asr_r_p_xor :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;
-
-def int_hexagon_M2_mpyud_acc_hl_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;
-
-def int_hexagon_M2_mpyud_acc_hl_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;
-
-def int_hexagon_A2_vcmphgtu :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgtu">;
-
-def int_hexagon_C2_andn :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_andn">;
-
-def int_hexagon_M2_vmpy2s_s0pack :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;
-
-def int_hexagon_S4_addaddi :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addaddi", [ImmArg<2>]>;
-
-def int_hexagon_M2_mpyd_acc_ll_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;
-
-def int_hexagon_M2_mpy_acc_sat_hl_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;
-
-def int_hexagon_A4_rcmpeqi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeqi", [ImmArg<1>]>;
-
-def int_hexagon_M4_xor_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_and">;
-
-def int_hexagon_S2_asl_i_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_and", [ImmArg<2>]>;
-
-def int_hexagon_M2_mmpyuh_rs1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;
-
-def int_hexagon_S2_asr_r_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_or">;
-
-def int_hexagon_A4_round_ri :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri", [ImmArg<1>]>;
-
-def int_hexagon_A2_max :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_max">;
-
-def int_hexagon_A4_round_rr :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr">;
-
-def int_hexagon_A4_combineii :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineii", [ImmArg<0>, ImmArg<1>]>;
-
-def int_hexagon_A4_combineir :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineir", [ImmArg<0>]>;
-
-def int_hexagon_C4_and_orn :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_orn">;
-
-def int_hexagon_M5_vmacbuu :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbuu">;
-
-def int_hexagon_A4_rcmpeq :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeq">;
-
-def int_hexagon_M4_cmpyr_whc :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_whc">;
-
-def int_hexagon_S2_lsr_i_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_acc", [ImmArg<2>]>;
-
-def int_hexagon_S2_vzxtbh :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxtbh">;
-
-def int_hexagon_M2_mmacuhs_rs1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;
-
-def int_hexagon_S2_asr_r_r_sat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;
-
-def int_hexagon_A2_combinew :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combinew">;
-
-def int_hexagon_M2_mpy_acc_ll_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;
-
-def int_hexagon_M2_mpy_acc_ll_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;
-
-def int_hexagon_M2_cmpyi_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyi_s0">;
-
-def int_hexagon_S2_asl_r_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_or">;
-
-def int_hexagon_S4_ori_asl_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_asl_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_C4_nbitsset :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsset">;
-
-def int_hexagon_M2_mpyu_acc_hh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;
-
-def int_hexagon_M2_mpyu_acc_hh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;
-
-def int_hexagon_M2_mpyu_ll_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;
-
-def int_hexagon_M2_mpyu_ll_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;
-
-def int_hexagon_A2_addh_l16_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_ll">;
-
-def int_hexagon_S2_lsr_r_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;
-
-def int_hexagon_A4_modwrapu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_modwrapu">;
-
-def int_hexagon_A4_rcmpneq :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneq">;
-
-def int_hexagon_M2_mpyd_acc_hh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;
-
-def int_hexagon_M2_mpyd_acc_hh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;
-
-def int_hexagon_F2_sfimm_p :
-Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_p", [ImmArg<0>]>;
-
-def int_hexagon_F2_sfimm_n :
-Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_n", [ImmArg<0>]>;
-
-def int_hexagon_M4_cmpyr_wh :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_wh">;
-
-def int_hexagon_S2_lsl_r_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;
-
-def int_hexagon_A2_vavgub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgub">;
-
-def int_hexagon_F2_conv_d2sf :
-Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_d2sf">;
-
-def int_hexagon_A2_vavguh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguh">;
-
-def int_hexagon_A4_cmpbeqi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeqi", [ImmArg<1>]>;
-
-def int_hexagon_F2_sfcmpuo :
-Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpuo">;
-
-def int_hexagon_A2_vavguw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguw">;
-
-def int_hexagon_S2_asr_i_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_nac", [ImmArg<2>]>;
-
-def int_hexagon_S2_vsatwh_nopack :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;
-
-def int_hexagon_M2_mpyd_hh_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;
-
-def int_hexagon_M2_mpyd_hh_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;
-
-def int_hexagon_S2_lsl_r_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;
-
-def int_hexagon_A2_minu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_minu">;
-
-def int_hexagon_M2_mpy_sat_lh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;
-
-def int_hexagon_M4_or_andn :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_andn">;
-
-def int_hexagon_A2_minp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minp">;
-
-def int_hexagon_S4_or_andix :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andix", [ImmArg<2>]>;
-
-def int_hexagon_M2_mpy_rnd_lh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;
-
-def int_hexagon_M2_mpy_rnd_lh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;
-
-def int_hexagon_M2_mmpyuh_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;
-
-def int_hexagon_M2_mmpyuh_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;
-
-def int_hexagon_M2_mpy_acc_sat_lh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;
-
-def int_hexagon_F2_sfcmpge :
-Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpge">;
-
-def int_hexagon_F2_sfmin :
-Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmin">;
-
-def int_hexagon_F2_sfcmpgt :
-Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpgt">;
-
-def int_hexagon_M4_vpmpyh :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_vpmpyh">;
-
-def int_hexagon_M2_mmacuhs_rs0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;
-
-def int_hexagon_M2_mpyd_rnd_lh_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;
-
-def int_hexagon_M2_mpyd_rnd_lh_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;
-
-def int_hexagon_A2_roundsat :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_roundsat">;
-
-def int_hexagon_S2_ct1p :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct1p">;
-
-def int_hexagon_S4_extract_rp :
-Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S4_extract_rp">;
-
-def int_hexagon_S2_lsl_r_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;
-
-def int_hexagon_C4_cmplteui :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteui", [ImmArg<1>]>;
-
-def int_hexagon_S4_addi_lsr_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_lsr_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_A4_tfrcpp :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A4_tfrcpp">;
-
-def int_hexagon_S2_asr_i_svw_trun :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_i_svw_trun", [ImmArg<1>]>;
-
-def int_hexagon_A4_cmphgti :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgti", [ImmArg<1>]>;
-
-def int_hexagon_A4_vrminh :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminh">;
-
-def int_hexagon_A4_vrminw :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminw">;
-
-def int_hexagon_A4_cmphgtu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtu">;
-
-def int_hexagon_S2_insertp_rp :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_S2_insertp_rp">;
-
-def int_hexagon_A2_vnavghcr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghcr">;
-
-def int_hexagon_S4_subi_asl_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_asl_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_S2_lsl_r_vh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vh">;
-
-def int_hexagon_M2_mpy_hh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;
-
-def int_hexagon_A2_vsubws :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubws">;
-
-def int_hexagon_A2_sath :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sath">;
-
-def int_hexagon_S2_asl_r_p_xor :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;
-
-def int_hexagon_A2_satb :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satb">;
-
-def int_hexagon_C2_cmpltu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpltu">;
-
-def int_hexagon_S2_insertp :
-Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S2_insertp", [ImmArg<2>, ImmArg<3>]>;
-
-def int_hexagon_M2_mpyd_rnd_ll_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
-
-def int_hexagon_M2_mpyd_rnd_ll_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;
-
-def int_hexagon_S2_lsr_i_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_nac", [ImmArg<2>]>;
-
-def int_hexagon_S2_extractup_rp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_extractup_rp">;
-
-def int_hexagon_S4_vxaddsubw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubw">;
-
-def int_hexagon_S4_vxaddsubh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubh">;
-
-def int_hexagon_A2_asrh :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_asrh">;
-
-def int_hexagon_S4_extractp_rp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_extractp_rp">;
-
-def int_hexagon_S2_lsr_r_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;
-
-def int_hexagon_M2_mpyd_nac_ll_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;
-
-def int_hexagon_M2_mpyd_nac_ll_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;
-
-def int_hexagon_C2_or :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_or">;
-
-def int_hexagon_M2_mmpyul_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s1">;
-
-def int_hexagon_M2_vrcmacr_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;
-
-def int_hexagon_A2_xor :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_xor">;
-
-def int_hexagon_A2_add :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_add">;
-
-def int_hexagon_A2_vsububs :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsububs">;
-
-def int_hexagon_M2_vmpy2s_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;
-
-def int_hexagon_M2_vmpy2s_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;
-
-def int_hexagon_A2_vraddub_acc :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vraddub_acc">;
-
-def int_hexagon_F2_sfinvsqrta :
-Hexagon_floati32_float_Intrinsic<"HEXAGON_F2_sfinvsqrta">;
-
-def int_hexagon_S2_ct0p :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct0p">;
-
-def int_hexagon_A2_svaddh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddh">;
-
-def int_hexagon_S2_vcrotate :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcrotate">;
-
-def int_hexagon_A2_aslh :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_aslh">;
-
-def int_hexagon_A2_subh_h16_lh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_lh">;
-
-def int_hexagon_A2_subh_h16_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_ll">;
-
-def int_hexagon_M2_hmmpyl_rs1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;
-
-def int_hexagon_S2_asr_r_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_p">;
-
-def int_hexagon_S2_vsplatrh :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsplatrh">;
-
-def int_hexagon_S2_asr_r_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r">;
-
-def int_hexagon_A2_addh_h16_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hl">;
-
-def int_hexagon_S2_vsplatrb :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_vsplatrb">;
-
-def int_hexagon_A2_addh_h16_hh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
-
-def int_hexagon_M2_cmpyr_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyr_s0">;
-
-def int_hexagon_M2_dpmpyss_rnd_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
-
-def int_hexagon_C2_muxri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxri", [ImmArg<1>]>;
-
-def int_hexagon_M2_vmac2es_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s0">;
-
-def int_hexagon_M2_vmac2es_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s1">;
-
-def int_hexagon_C2_pxfer_map :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_pxfer_map">;
-
-def int_hexagon_M2_mpyu_lh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;
-
-def int_hexagon_M2_mpyu_lh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;
-
-def int_hexagon_S2_asl_i_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_or", [ImmArg<2>]>;
-
-def int_hexagon_M2_mpyd_acc_hl_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;
-
-def int_hexagon_M2_mpyd_acc_hl_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;
-
-def int_hexagon_S2_asr_r_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;
-
-def int_hexagon_A2_vaddw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddw">;
-
-def int_hexagon_S2_asr_i_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_and", [ImmArg<2>]>;
-
-def int_hexagon_A2_vaddh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddh">;
-
-def int_hexagon_M2_mpy_nac_sat_lh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;
-
-def int_hexagon_M2_mpy_nac_sat_lh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;
-
-def int_hexagon_C2_cmpeqp :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpeqp">;
-
-def int_hexagon_M4_mpyri_addi :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addi", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_A2_not :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_not">;
-
-def int_hexagon_S4_andi_lsr_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_lsr_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_M2_macsip :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsip", [ImmArg<2>]>;
-
-def int_hexagon_A2_tfrcrr :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrcrr">;
-
-def int_hexagon_M2_macsin :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsin", [ImmArg<2>]>;
-
-def int_hexagon_C2_orn :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_orn">;
-
-def int_hexagon_M4_and_andn :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_andn">;
-
-def int_hexagon_F2_sfmpy :
-Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmpy">;
-
-def int_hexagon_M2_mpyud_nac_hh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;
-
-def int_hexagon_M2_mpyud_nac_hh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;
-
-def int_hexagon_S2_lsr_r_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;
-
-def int_hexagon_S2_asr_r_vw :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vw">;
-
-def int_hexagon_M4_and_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_or">;
-
-def int_hexagon_S2_asr_r_vh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vh">;
-
-def int_hexagon_C2_mask :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_C2_mask">;
-
-def int_hexagon_M2_mpy_nac_hh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;
-
-def int_hexagon_M2_mpy_nac_hh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;
-
-def int_hexagon_M2_mpy_up_s1_sat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;
-
-def int_hexagon_A4_vcmpbgt :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbgt">;
-
-def int_hexagon_M5_vrmacbsu :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbsu">;
-
-def int_hexagon_S2_tableidxw_goodsyntax :
-Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax">;
-
-def int_hexagon_A2_vrsadub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vrsadub">;
-
-def int_hexagon_A2_tfrrcr :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrrcr">;
-
-def int_hexagon_M2_vrcmpys_acc_s1 :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;
-
-def int_hexagon_F2_dfcmpge :
-Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpge">;
-
-def int_hexagon_M2_accii :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_accii", [ImmArg<2>]>;
-
-def int_hexagon_A5_vaddhubs :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A5_vaddhubs">;
-
-def int_hexagon_A2_vmaxw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxw">;
-
-def int_hexagon_A2_vmaxb :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxb">;
-
-def int_hexagon_A2_vmaxh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxh">;
-
-def int_hexagon_S2_vsxthw :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxthw">;
-
-def int_hexagon_S4_andi_asl_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_asl_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_S2_asl_i_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_nac", [ImmArg<2>]>;
-
-def int_hexagon_S2_lsl_r_p_xor :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;
-
-def int_hexagon_C2_cmpgt :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgt">;
-
-def int_hexagon_F2_conv_df2d_chop :
-Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;
-
-def int_hexagon_M2_mpyu_nac_hl_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;
-
-def int_hexagon_M2_mpyu_nac_hl_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;
-
-def int_hexagon_F2_conv_sf2w :
-Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w">;
-
-def int_hexagon_S2_lsr_r_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;
-
-def int_hexagon_F2_sfclass :
-Hexagon_i32_floati32_Intrinsic<"HEXAGON_F2_sfclass">;
-
-def int_hexagon_M2_mpyud_acc_lh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;
-
-def int_hexagon_M4_xor_andn :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_andn">;
-
-def int_hexagon_S2_addasl_rrri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_addasl_rrri", [ImmArg<2>]>;
-
-def int_hexagon_M5_vdmpybsu :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vdmpybsu">;
-
-def int_hexagon_M2_mpyu_nac_hh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;
-
-def int_hexagon_M2_mpyu_nac_hh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;
-
-def int_hexagon_A2_addi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addi", [ImmArg<1>]>;
-
-def int_hexagon_A2_addp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addp">;
-
-def int_hexagon_M2_vmpy2s_s1pack :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;
-
-def int_hexagon_S4_clbpnorm :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S4_clbpnorm">;
-
-def int_hexagon_A4_round_rr_sat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr_sat">;
-
-def int_hexagon_M2_nacci :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_nacci">;
-
-def int_hexagon_S2_shuffeh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeh">;
-
-def int_hexagon_S2_lsr_i_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_and", [ImmArg<2>]>;
-
-def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;
-
-def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;
-
-def int_hexagon_F2_conv_sf2uw :
-Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw">;
-
-def int_hexagon_A2_vsubh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubh">;
-
-def int_hexagon_F2_conv_sf2ud :
-Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud">;
-
-def int_hexagon_A2_vsubw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubw">;
-
-def int_hexagon_A2_vcmpwgt :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgt">;
-
-def int_hexagon_M4_xor_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_or">;
-
-def int_hexagon_F2_conv_sf2uw_chop :
-Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;
-
-def int_hexagon_S2_asl_r_vw :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vw">;
-
-def int_hexagon_S2_vsatwuh_nopack :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;
-
-def int_hexagon_S2_asl_r_vh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vh">;
-
-def int_hexagon_A2_svsubuhs :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubuhs">;
-
-def int_hexagon_M5_vmpybsu :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybsu">;
-
-def int_hexagon_A2_subh_l16_sat_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
-
-def int_hexagon_C4_and_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_and">;
-
-def int_hexagon_M2_mpyu_acc_hl_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;
-
-def int_hexagon_M2_mpyu_acc_hl_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;
-
-def int_hexagon_S2_lsr_r_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p">;
-
-def int_hexagon_S2_lsr_r_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r">;
-
-def int_hexagon_A4_subp_c :
-Hexagon_i64i32_i64i64i32_Intrinsic<"HEXAGON_A4_subp_c">;
-
-def int_hexagon_A2_vsubhs :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubhs">;
-
-def int_hexagon_C2_vitpack :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_vitpack">;
-
-def int_hexagon_A2_vavguhr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguhr">;
-
-def int_hexagon_S2_vsplicerb :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vsplicerb">;
-
-def int_hexagon_C4_nbitsclr :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclr">;
-
-def int_hexagon_A2_vcmpbgtu :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbgtu">;
-
-def int_hexagon_M2_cmpys_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s1">;
-
-def int_hexagon_M2_cmpys_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s0">;
-
-def int_hexagon_F2_dfcmpuo :
-Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpuo">;
-
-def int_hexagon_S2_shuffob :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffob">;
-
-def int_hexagon_C2_and :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_and">;
-
-def int_hexagon_S5_popcountp :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S5_popcountp">;
-
-def int_hexagon_S4_extractp :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_extractp", [ImmArg<1>, ImmArg<2>]>;
-
-def int_hexagon_S2_cl0 :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl0">;
-
-def int_hexagon_A4_vcmpbgti :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgti", [ImmArg<1>]>;
-
-def int_hexagon_M2_mmacls_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s1">;
-
-def int_hexagon_M2_mmacls_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s0">;
-
-def int_hexagon_C4_cmpneq :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneq">;
-
-def int_hexagon_M2_vmac2es :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es">;
-
-def int_hexagon_M2_vdmacs_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s0">;
-
-def int_hexagon_M2_vdmacs_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s1">;
-
-def int_hexagon_M2_mpyud_ll_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;
-
-def int_hexagon_M2_mpyud_ll_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;
-
-def int_hexagon_S2_clb :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clb">;
-
-def int_hexagon_M2_mpy_nac_ll_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;
-
-def int_hexagon_M2_mpy_nac_ll_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;
-
-def int_hexagon_M2_mpyd_nac_hl_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;
-
-def int_hexagon_M2_mpyd_nac_hl_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;
-
-def int_hexagon_M2_maci :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_maci">;
-
-def int_hexagon_A2_vmaxuh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuh">;
-
-def int_hexagon_A4_bitspliti :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitspliti", [ImmArg<1>]>;
-
-def int_hexagon_A2_vmaxub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxub">;
-
-def int_hexagon_M2_mpyud_hh_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;
-
-def int_hexagon_M2_mpyud_hh_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;
-
-def int_hexagon_M2_vrmac_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrmac_s0">;
-
-def int_hexagon_M2_mpy_sat_lh_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;
-
-def int_hexagon_S2_asl_r_r_sat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;
-
-def int_hexagon_F2_conv_sf2d :
-Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d">;
-
-def int_hexagon_S2_asr_r_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;
-
-def int_hexagon_F2_dfimm_n :
-Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_n", [ImmArg<0>]>;
-
-def int_hexagon_A4_cmphgt :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgt">;
-
-def int_hexagon_F2_dfimm_p :
-Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_p", [ImmArg<0>]>;
-
-def int_hexagon_M2_mpyud_acc_lh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;
-
-def int_hexagon_M2_vcmpy_s1_sat_r :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;
-
-def int_hexagon_M4_mpyri_addr_u2 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr_u2", [ImmArg<1>]>;
-
-def int_hexagon_M2_vcmpy_s1_sat_i :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;
-
-def int_hexagon_S2_lsl_r_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;
-
-def int_hexagon_M5_vrmacbuu :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbuu">;
-
-def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax", [ImmArg<1>]>;
-
-def int_hexagon_S2_vspliceib :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vspliceib", [ImmArg<2>]>;
-
-def int_hexagon_M2_dpmpyss_acc_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;
-
-def int_hexagon_M2_cnacs_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s1">;
-
-def int_hexagon_M2_cnacs_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s0">;
-
-def int_hexagon_A2_maxu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_maxu">;
-
-def int_hexagon_A2_maxp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxp">;
-
-def int_hexagon_A2_andir :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_andir", [ImmArg<1>]>;
-
-def int_hexagon_F2_sfrecipa :
-Hexagon_floati32_floatfloat_Intrinsic<"HEXAGON_F2_sfrecipa">;
-
-def int_hexagon_A2_combineii :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combineii", [ImmArg<0>, ImmArg<1>]>;
-
-def int_hexagon_A4_orn :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_orn">;
-
-def int_hexagon_A4_cmpbgtui :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtui", [ImmArg<1>]>;
-
-def int_hexagon_S2_lsr_r_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;
-
-def int_hexagon_A4_vcmpbeqi :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbeqi", [ImmArg<1>]>;
-
-def int_hexagon_S2_lsl_r_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r">;
-
-def int_hexagon_S2_lsl_r_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p">;
-
-def int_hexagon_A2_or :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_or">;
-
-def int_hexagon_F2_dfcmpeq :
-Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpeq">;
-
-def int_hexagon_C2_cmpeq :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeq">;
-
-def int_hexagon_A2_tfrp :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_tfrp">;
-
-def int_hexagon_C4_and_andn :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_andn">;
-
-def int_hexagon_S2_vsathub_nopack :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathub_nopack">;
-
-def int_hexagon_A2_satuh :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satuh">;
-
-def int_hexagon_A2_satub :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satub">;
-
-def int_hexagon_M2_vrcmpys_s1 :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;
-
-def int_hexagon_S4_or_ori :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_ori", [ImmArg<2>]>;
-
-def int_hexagon_C4_fastcorner9_not :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9_not">;
-
-def int_hexagon_A2_tfrih :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfrih", [ImmArg<1>]>;
-
-def int_hexagon_A2_tfril :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfril", [ImmArg<1>]>;
-
-def int_hexagon_M4_mpyri_addr :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr", [ImmArg<2>]>;
-
-def int_hexagon_S2_vtrunehb :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunehb">;
-
-def int_hexagon_A2_vabsw :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsw">;
-
-def int_hexagon_A2_vabsh :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsh">;
-
-def int_hexagon_F2_sfsub :
-Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfsub">;
-
-def int_hexagon_C2_muxii :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxii", [ImmArg<1>, ImmArg<2>]>;
-
-def int_hexagon_C2_muxir :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxir", [ImmArg<2>]>;
-
-def int_hexagon_A2_swiz :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_swiz">;
-
-def int_hexagon_S2_asr_i_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_and", [ImmArg<2>]>;
-
-def int_hexagon_M2_cmpyrsc_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;
-
-def int_hexagon_M2_cmpyrsc_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;
-
-def int_hexagon_A2_vraddub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vraddub">;
-
-def int_hexagon_A4_tlbmatch :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_tlbmatch">;
-
-def int_hexagon_F2_conv_df2w_chop :
-Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;
-
-def int_hexagon_A2_and :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_and">;
-
-def int_hexagon_S2_lsr_r_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;
-
-def int_hexagon_M2_mpy_nac_sat_ll_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;
-
-def int_hexagon_M2_mpy_nac_sat_ll_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;
-
-def int_hexagon_S4_extract :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_extract", [ImmArg<1>, ImmArg<2>]>;
-
-def int_hexagon_A2_vcmpweq :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpweq">;
-
-def int_hexagon_M2_acci :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_acci">;
-
-def int_hexagon_S2_lsr_i_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_acc", [ImmArg<2>]>;
-
-def int_hexagon_S2_lsr_i_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_or", [ImmArg<2>]>;
-
-def int_hexagon_F2_conv_ud2sf :
-Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_ud2sf">;
-
-def int_hexagon_A2_tfr :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfr">;
-
-def int_hexagon_S2_asr_i_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_or", [ImmArg<2>]>;
-
-def int_hexagon_A2_subri :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subri", [ImmArg<0>]>;
-
-def int_hexagon_A4_vrmaxuw :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuw">;
-
-def int_hexagon_M5_vmpybuu :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybuu">;
-
-def int_hexagon_A4_vrmaxuh :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuh">;
-
-def int_hexagon_S2_asl_i_vw :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vw", [ImmArg<1>]>;
-
-def int_hexagon_A2_vavgw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgw">;
-
-def int_hexagon_S2_brev :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_brev">;
-
-def int_hexagon_A2_vavgh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgh">;
-
-def int_hexagon_S2_clrbit_i :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_i", [ImmArg<1>]>;
-
-def int_hexagon_S2_asl_i_vh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vh", [ImmArg<1>]>;
-
-def int_hexagon_S2_lsr_i_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_or", [ImmArg<2>]>;
-
-def int_hexagon_S2_lsl_r_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;
-
-def int_hexagon_M2_mmpyl_rs1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;
-
-def int_hexagon_M2_mpyud_hl_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;
-
-def int_hexagon_M2_mmpyl_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s0">;
-
-def int_hexagon_M2_mmpyl_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s1">;
-
-def int_hexagon_M2_naccii :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_naccii", [ImmArg<2>]>;
-
-def int_hexagon_S2_vrndpackwhs :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwhs">;
-
-def int_hexagon_S2_vtrunewh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunewh">;
-
-def int_hexagon_M2_dpmpyss_nac_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;
-
-def int_hexagon_M2_mpyd_ll_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;
-
-def int_hexagon_M2_mpyd_ll_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;
-
-def int_hexagon_M4_mac_up_s1_sat :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;
-
-def int_hexagon_S4_vrcrotate_acc :
-Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate_acc", [ImmArg<3>]>;
-
-def int_hexagon_F2_conv_uw2df :
-Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_uw2df">;
-
-def int_hexagon_A2_vaddubs :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddubs">;
-
-def int_hexagon_S2_asr_r_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;
-
-def int_hexagon_A2_orir :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_orir", [ImmArg<1>]>;
-
-def int_hexagon_A2_andp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_andp">;
-
-def int_hexagon_S2_lfsp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_lfsp">;
-
-def int_hexagon_A2_min :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_min">;
-
-def int_hexagon_M2_mpysmi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysmi", [ImmArg<1>]>;
-
-def int_hexagon_M2_vcmpy_s0_sat_r :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;
-
-def int_hexagon_M2_mpyu_acc_ll_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;
-
-def int_hexagon_M2_mpyu_acc_ll_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;
-
-def int_hexagon_S2_asr_r_svw_trun :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;
-
-def int_hexagon_M2_mmpyh_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s0">;
-
-def int_hexagon_M2_mmpyh_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s1">;
-
-def int_hexagon_F2_conv_sf2df :
-Hexagon_double_float_Intrinsic<"HEXAGON_F2_conv_sf2df">;
-
-def int_hexagon_S2_vtrunohb :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunohb">;
-
-def int_hexagon_F2_conv_sf2d_chop :
-Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;
-
-def int_hexagon_M2_mpyd_lh_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;
-
-def int_hexagon_F2_conv_df2w :
-Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w">;
-
-def int_hexagon_S5_asrhub_sat :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_sat", [ImmArg<1>]>;
-
-def int_hexagon_S2_asl_i_r_xacc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_xacc", [ImmArg<2>]>;
-
-def int_hexagon_F2_conv_df2d :
-Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d">;
-
-def int_hexagon_M2_mmaculs_s1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s1">;
-
-def int_hexagon_M2_mmaculs_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s0">;
-
-def int_hexagon_A2_svadduhs :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svadduhs">;
-
-def int_hexagon_F2_conv_sf2w_chop :
-Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;
-
-def int_hexagon_S2_svsathub :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathub">;
-
-def int_hexagon_M2_mpyd_rnd_hl_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;
-
-def int_hexagon_M2_mpyd_rnd_hl_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;
-
-def int_hexagon_S2_setbit_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_r">;
-
-def int_hexagon_A2_vavghr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghr">;
-
-def int_hexagon_F2_sffma_sc :
-Hexagon_float_floatfloatfloati32_Intrinsic<"HEXAGON_F2_sffma_sc">;
-
-def int_hexagon_F2_dfclass :
-Hexagon_i32_doublei32_Intrinsic<"HEXAGON_F2_dfclass", [ImmArg<1>]>;
-
-def int_hexagon_F2_conv_df2ud :
-Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud">;
-
-def int_hexagon_F2_conv_df2uw :
-Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw">;
-
-def int_hexagon_M2_cmpyrs_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;
-
-def int_hexagon_M2_cmpyrs_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;
-
-def int_hexagon_C4_cmpltei :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpltei", [ImmArg<1>]>;
-
-def int_hexagon_C4_cmplteu :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteu">;
-
-def int_hexagon_A2_vsubb_map :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubb_map">;
-
-def int_hexagon_A2_subh_l16_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_ll">;
-
-def int_hexagon_S2_asr_i_r_rnd :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd", [ImmArg<1>]>;
-
-def int_hexagon_M2_vrmpy_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrmpy_s0">;
-
-def int_hexagon_M2_mpyd_rnd_hh_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;
-
-def int_hexagon_M2_mpyd_rnd_hh_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;
-
-def int_hexagon_A2_minup :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minup">;
-
-def int_hexagon_S2_valignrb :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignrb">;
-
-def int_hexagon_S2_asr_r_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;
-
-def int_hexagon_M2_mmpyl_rs0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;
-
-def int_hexagon_M2_vrcmaci_s0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;
-
-def int_hexagon_A2_vaddub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddub">;
-
-def int_hexagon_A2_combine_lh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_lh">;
-
-def int_hexagon_M5_vdmacbsu :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vdmacbsu">;
-
-def int_hexagon_A2_combine_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_ll">;
-
-def int_hexagon_M2_mpyud_hl_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;
-
-def int_hexagon_M2_vrcmpyi_s0c :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;
-
-def int_hexagon_S2_asr_i_p_rnd :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd", [ImmArg<1>]>;
-
-def int_hexagon_A2_addpsat :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addpsat">;
-
-def int_hexagon_A2_svaddhs :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddhs">;
-
-def int_hexagon_S4_ori_lsr_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_lsr_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;
-
-def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;
-
-def int_hexagon_A2_vminw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminw">;
-
-def int_hexagon_A2_vminh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminh">;
-
-def int_hexagon_M2_vrcmpyr_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;
-
-def int_hexagon_A2_vminb :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminb">;
-
-def int_hexagon_M2_vcmac_s0_sat_i :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;
-
-def int_hexagon_M2_mpyud_lh_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;
-
-def int_hexagon_M2_mpyud_lh_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;
-
-def int_hexagon_S2_asl_r_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_or">;
-
-def int_hexagon_S4_lsli :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_lsli", [ImmArg<0>]>;
-
-def int_hexagon_S2_lsl_r_vw :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
-
-def int_hexagon_M2_mpy_hh_s1 :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;
-
-def int_hexagon_M4_vrmpyeh_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;
-
-def int_hexagon_M4_vrmpyeh_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;
-
-def int_hexagon_M2_mpy_nac_lh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;
-
-def int_hexagon_M2_mpy_nac_lh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;
-
-def int_hexagon_M2_vraddh :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vraddh">;
-
-def int_hexagon_C2_tfrrp :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrrp">;
-
-def int_hexagon_M2_mpy_acc_sat_ll_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;
-
-def int_hexagon_M2_mpy_acc_sat_ll_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;
-
-def int_hexagon_S2_vtrunowh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunowh">;
-
-def int_hexagon_A2_abs :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abs">;
-
-def int_hexagon_A4_cmpbeq :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeq">;
-
-def int_hexagon_A2_negp :
-Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_negp">;
-
-def int_hexagon_S2_asl_i_r_sat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_sat", [ImmArg<1>]>;
-
-def int_hexagon_A2_addh_l16_sat_hl :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;
-
-def int_hexagon_S2_vsatwuh :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwuh">;
-
-def int_hexagon_F2_dfcmpgt :
-Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpgt">;
-
-def int_hexagon_S2_svsathb :
-Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathb">;
-
-def int_hexagon_C2_cmpgtup :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtup">;
-
-def int_hexagon_A4_cround_ri :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_ri", [ImmArg<1>]>;
-
-def int_hexagon_S4_clbpaddi :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S4_clbpaddi", [ImmArg<1>]>;
-
-def int_hexagon_A4_cround_rr :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_rr">;
-
-def int_hexagon_C2_mux :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_mux">;
-
-def int_hexagon_M2_dpmpyuu_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
-
-def int_hexagon_S2_shuffeb :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeb">;
-
-def int_hexagon_A2_vminuw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuw">;
-
-def int_hexagon_A2_vaddhs :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddhs">;
-
-def int_hexagon_S2_insert_rp :
-Hexagon_i32_i32i32i64_Intrinsic<"HEXAGON_S2_insert_rp">;
-
-def int_hexagon_A2_vminuh :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuh">;
-
-def int_hexagon_A2_vminub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminub">;
-
-def int_hexagon_S2_extractu :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_extractu", [ImmArg<1>, ImmArg<2>]>;
-
-def int_hexagon_A2_svsubh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubh">;
-
-def int_hexagon_S4_clbaddi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_clbaddi", [ImmArg<1>]>;
-
-def int_hexagon_F2_sffms :
-Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms">;
-
-def int_hexagon_S2_vsxtbh :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxtbh">;
-
-def int_hexagon_M2_mpyud_nac_ll_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
-
-def int_hexagon_M2_mpyud_nac_ll_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;
-
-def int_hexagon_A2_subp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_subp">;
-
-def int_hexagon_M2_vmpy2es_s1 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;
-
-def int_hexagon_M2_vmpy2es_s0 :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;
-
-def int_hexagon_S4_parity :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_parity">;
-
-def int_hexagon_M2_mpy_acc_hh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;
-
-def int_hexagon_M2_mpy_acc_hh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;
-
-def int_hexagon_S4_addi_asl_ri :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_asl_ri", [ImmArg<0>, ImmArg<2>]>;
-
-def int_hexagon_M2_mpyd_nac_hh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;
-
-def int_hexagon_M2_mpyd_nac_hh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;
-
-def int_hexagon_S2_asr_i_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_nac", [ImmArg<2>]>;
-
-def int_hexagon_A4_cmpheqi :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheqi", [ImmArg<1>]>;
-
-def int_hexagon_S2_lsr_r_p_xor :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;
-
-def int_hexagon_M2_mpy_acc_hl_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;
-
-def int_hexagon_M2_mpy_acc_hl_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;
-
-def int_hexagon_F2_conv_sf2ud_chop :
-Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;
-
-def int_hexagon_C2_cmpgeui :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgeui", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpy_acc_sat_hh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;
-
-def int_hexagon_M2_mpy_acc_sat_hh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;
-
-def int_hexagon_S2_asl_r_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_and">;
-
-def int_hexagon_A2_addh_h16_sat_lh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;
-
-def int_hexagon_A2_addh_h16_sat_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;
-
-def int_hexagon_M4_nac_up_s1_sat :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;
-
-def int_hexagon_M2_mpyud_nac_lh_s1 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;
-
-def int_hexagon_M2_mpyud_nac_lh_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;
-
-def int_hexagon_A4_round_ri_sat :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri_sat", [ImmArg<1>]>;
-
-def int_hexagon_M2_mpy_nac_hl_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;
-
-def int_hexagon_M2_mpy_nac_hl_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;
-
-def int_hexagon_A2_vavghcr :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghcr">;
-
-def int_hexagon_M2_mmacls_rs0 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs0">;
-
-def int_hexagon_M2_mmacls_rs1 :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs1">;
-
-def int_hexagon_M2_cmaci_s0 :
-Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmaci_s0">;
-
-def int_hexagon_S2_setbit_i :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_i", [ImmArg<1>]>;
-
-def int_hexagon_S2_asl_i_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_or", [ImmArg<2>]>;
-
-def int_hexagon_A4_andn :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_andn">;
-
-def int_hexagon_M5_vrmpybsu :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybsu">;
-
-def int_hexagon_S2_vrndpackwh :
-Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwh">;
-
-def int_hexagon_M2_vcmac_s0_sat_r :
-Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;
-
-def int_hexagon_A2_vmaxuw :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuw">;
-
-def int_hexagon_C2_bitsclr :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclr">;
-
-def int_hexagon_M2_xor_xacc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_xor_xacc">;
-
-def int_hexagon_A4_vcmpbgtui :
-Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgtui", [ImmArg<1>]>;
-
-def int_hexagon_A4_ornp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_ornp">;
-
-def int_hexagon_A2_tfrpi :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_tfrpi", [ImmArg<0>]>;
-
-def int_hexagon_C4_and_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_or">;
-
-def int_hexagon_M2_mpy_nac_sat_hh_s1 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;
-
-def int_hexagon_M2_mpy_nac_sat_hh_s0 :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;
-
-def int_hexagon_A2_subh_h16_sat_ll :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;
-
-def int_hexagon_A2_subh_h16_sat_lh :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;
-
-def int_hexagon_M2_vmpy2su_s1 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;
-
-def int_hexagon_M2_vmpy2su_s0 :
-Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;
-
-def int_hexagon_S2_asr_i_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_acc", [ImmArg<2>]>;
-
-def int_hexagon_C4_nbitsclri :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclri", [ImmArg<1>]>;
-
-def int_hexagon_S2_lsr_i_vh :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vh", [ImmArg<1>]>;
-
-def int_hexagon_S2_lsr_i_p_xacc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc", [ImmArg<2>]>;
-
-// V55 Scalar Instructions.
-
-def int_hexagon_A5_ACS :
-Hexagon_i64i32_i64i64i64_Intrinsic<"HEXAGON_A5_ACS">;
-
-// V60 Scalar Instructions.
-
-def int_hexagon_S6_rol_i_p_and :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_and", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_r_xacc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_xacc", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_r_and :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_and", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_r_acc :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_acc", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_p_xacc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_xacc", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_p :
-Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S6_rol_i_p", [ImmArg<1>]>;
-
-def int_hexagon_S6_rol_i_p_nac :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_nac", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_p_acc :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_acc", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_r_or :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_or", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_r :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S6_rol_i_r", [ImmArg<1>]>;
-
-def int_hexagon_S6_rol_i_r_nac :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_nac", [ImmArg<2>]>;
-
-def int_hexagon_S6_rol_i_p_or :
-Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_or", [ImmArg<2>]>;
-
-// V62 Scalar Instructions.
-
-def int_hexagon_S6_vtrunehb_ppp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">;
-
-def int_hexagon_V6_ldntnt0 :
-Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ldntnt0">;
-
-def int_hexagon_M6_vabsdiffub :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffub">;
-
-def int_hexagon_S6_vtrunohb_ppp :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">;
-
-def int_hexagon_M6_vabsdiffb :
-Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffb">;
-
-def int_hexagon_A6_vminub_RdP :
-Hexagon_i64i32_i64i64_Intrinsic<"HEXAGON_A6_vminub_RdP">;
-
-def int_hexagon_S6_vsplatrbp :
-Hexagon_i64_i32_Intrinsic<"HEXAGON_S6_vsplatrbp">;
-
-// V65 Scalar Instructions.
-
-def int_hexagon_A6_vcmpbeq_notany :
-Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A6_vcmpbeq_notany">;
-
-// V66 Scalar Instructions.
-
-def int_hexagon_F2_dfsub :
-Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfsub">;
-
-def int_hexagon_F2_dfadd :
-Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfadd">;
-
-def int_hexagon_M2_mnaci :
-Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mnaci">;
-
-def int_hexagon_S2_mask :
-Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_mask", [ImmArg<0>, ImmArg<1>]>;
-
-// V60 HVX Instructions.
-
-def int_hexagon_V6_veqb_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_or">;
-
-def int_hexagon_V6_veqb_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_or_128B">;
-
-def int_hexagon_V6_vminub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminub">;
-
-def int_hexagon_V6_vminub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminub_128B">;
-
-def int_hexagon_V6_vaslw_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc">;
-
-def int_hexagon_V6_vaslw_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc_128B">;
-
-def int_hexagon_V6_vmpyhvsrs :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs">;
-
-def int_hexagon_V6_vmpyhvsrs_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs_128B">;
-
-def int_hexagon_V6_vsathub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsathub">;
-
-def int_hexagon_V6_vsathub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsathub_128B">;
-
-def int_hexagon_V6_vaddh_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_dv">;
-
-def int_hexagon_V6_vaddh_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;
-
-def int_hexagon_V6_vrmpybusi :
-Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi", [ImmArg<2>]>;
-
-def int_hexagon_V6_vrmpybusi_128B :
-Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_128B", [ImmArg<2>]>;
-
-def int_hexagon_V6_vshufoh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoh">;
-
-def int_hexagon_V6_vshufoh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoh_128B">;
-
-def int_hexagon_V6_vasrwv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrwv">;
-
-def int_hexagon_V6_vasrwv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrwv_128B">;
-
-def int_hexagon_V6_vdmpyhsuisat :
-Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat">;
-
-def int_hexagon_V6_vdmpyhsuisat_128B :
-Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_128B">;
-
-def int_hexagon_V6_vrsadubi_acc :
-Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc", [ImmArg<3>]>;
-
-def int_hexagon_V6_vrsadubi_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc_128B", [ImmArg<3>]>;
-
-def int_hexagon_V6_vnavgw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgw">;
-
-def int_hexagon_V6_vnavgw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgw_128B">;
-
-def int_hexagon_V6_vnavgh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgh">;
-
-def int_hexagon_V6_vnavgh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgh_128B">;
-
-def int_hexagon_V6_vavgub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgub">;
-
-def int_hexagon_V6_vavgub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgub_128B">;
-
-def int_hexagon_V6_vsubb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubb">;
-
-def int_hexagon_V6_vsubb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_128B">;
-
-def int_hexagon_V6_vgtw_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_and">;
-
-def int_hexagon_V6_vgtw_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_and_128B">;
-
-def int_hexagon_V6_vavgubrnd :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgubrnd">;
-
-def int_hexagon_V6_vavgubrnd_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgubrnd_128B">;
-
-def int_hexagon_V6_vrmpybusv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv">;
-
-def int_hexagon_V6_vrmpybusv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_128B">;
-
-def int_hexagon_V6_vsubbnq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbnq">;
-
-def int_hexagon_V6_vsubbnq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbnq_128B">;
-
-def int_hexagon_V6_vroundhb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhb">;
-
-def int_hexagon_V6_vroundhb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhb_128B">;
-
-def int_hexagon_V6_vadduhsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv">;
-
-def int_hexagon_V6_vadduhsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv_128B">;
-
-def int_hexagon_V6_vsububsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububsat">;
-
-def int_hexagon_V6_vsububsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_128B">;
-
-def int_hexagon_V6_vmpabus_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc">;
-
-def int_hexagon_V6_vmpabus_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc_128B">;
-
-def int_hexagon_V6_vmux :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vmux">;
-
-def int_hexagon_V6_vmux_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vmux_128B">;
-
-def int_hexagon_V6_vmpyhus :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus">;
-
-def int_hexagon_V6_vmpyhus_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_128B">;
-
-def int_hexagon_V6_vpackeb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeb">;
-
-def int_hexagon_V6_vpackeb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeb_128B">;
-
-def int_hexagon_V6_vsubhnq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhnq">;
-
-def int_hexagon_V6_vsubhnq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhnq_128B">;
-
-def int_hexagon_V6_vavghrnd :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavghrnd">;
-
-def int_hexagon_V6_vavghrnd_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavghrnd_128B">;
-
-def int_hexagon_V6_vtran2x2_map :
-Hexagon_v16i32v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vtran2x2_map">;
-
-def int_hexagon_V6_vtran2x2_map_128B :
-Hexagon_v32i32v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtran2x2_map_128B">;
-
-def int_hexagon_V6_vdelta :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdelta">;
-
-def int_hexagon_V6_vdelta_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdelta_128B">;
-
-def int_hexagon_V6_vgtuh_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_and">;
-
-def int_hexagon_V6_vgtuh_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_and_128B">;
-
-def int_hexagon_V6_vtmpyhb :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb">;
-
-def int_hexagon_V6_vtmpyhb_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_128B">;
-
-def int_hexagon_V6_vpackob :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackob">;
-
-def int_hexagon_V6_vpackob_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackob_128B">;
-
-def int_hexagon_V6_vmaxh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxh">;
-
-def int_hexagon_V6_vmaxh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxh_128B">;
-
-def int_hexagon_V6_vtmpybus_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc">;
-
-def int_hexagon_V6_vtmpybus_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc_128B">;
-
-def int_hexagon_V6_vsubuhsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhsat">;
-
-def int_hexagon_V6_vsubuhsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_128B">;
-
-def int_hexagon_V6_vasrw_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc">;
-
-def int_hexagon_V6_vasrw_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc_128B">;
-
-def int_hexagon_V6_pred_or :
-Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_or">;
-
-def int_hexagon_V6_pred_or_128B :
-Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_or_128B">;
-
-def int_hexagon_V6_vrmpyub_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc">;
-
-def int_hexagon_V6_vrmpyub_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc_128B">;
-
-def int_hexagon_V6_lo :
-Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_lo">;
-
-def int_hexagon_V6_lo_128B :
-Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_lo_128B">;
-
-def int_hexagon_V6_vsubb_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_dv">;
-
-def int_hexagon_V6_vsubb_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;
-
-def int_hexagon_V6_vsubhsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv">;
-
-def int_hexagon_V6_vsubhsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv_128B">;
-
-def int_hexagon_V6_vmpyiwh :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh">;
-
-def int_hexagon_V6_vmpyiwh_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_128B">;
-
-def int_hexagon_V6_vmpyiwb :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb">;
-
-def int_hexagon_V6_vmpyiwb_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_128B">;
-
-def int_hexagon_V6_ldu0 :
-Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ldu0">;
-
-def int_hexagon_V6_ldu0_128B :
-Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_ldu0_128B">;
-
-def int_hexagon_V6_vgtuh_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_xor">;
-
-def int_hexagon_V6_vgtuh_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_xor_128B">;
-
-def int_hexagon_V6_vgth_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_or">;
-
-def int_hexagon_V6_vgth_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_or_128B">;
-
-def int_hexagon_V6_vavgh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgh">;
-
-def int_hexagon_V6_vavgh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgh_128B">;
-
-def int_hexagon_V6_vlalignb :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignb">;
-
-def int_hexagon_V6_vlalignb_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignb_128B">;
-
-def int_hexagon_V6_vsh :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsh">;
-
-def int_hexagon_V6_vsh_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsh_128B">;
-
-def int_hexagon_V6_pred_and_n :
-Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_and_n">;
-
-def int_hexagon_V6_pred_and_n_128B :
-Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_and_n_128B">;
-
-def int_hexagon_V6_vsb :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsb">;
-
-def int_hexagon_V6_vsb_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsb_128B">;
-
-def int_hexagon_V6_vroundwuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwuh">;
-
-def int_hexagon_V6_vroundwuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwuh_128B">;
-
-def int_hexagon_V6_vasrhv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrhv">;
-
-def int_hexagon_V6_vasrhv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrhv_128B">;
-
-def int_hexagon_V6_vshuffh :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffh">;
-
-def int_hexagon_V6_vshuffh_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffh_128B">;
-
-def int_hexagon_V6_vaddhsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv">;
-
-def int_hexagon_V6_vaddhsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv_128B">;
-
-def int_hexagon_V6_vnavgub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgub">;
-
-def int_hexagon_V6_vnavgub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgub_128B">;
-
-def int_hexagon_V6_vrmpybv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv">;
-
-def int_hexagon_V6_vrmpybv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_128B">;
-
-def int_hexagon_V6_vnormamth :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamth">;
-
-def int_hexagon_V6_vnormamth_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamth_128B">;
-
-def int_hexagon_V6_vdmpyhb :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb">;
-
-def int_hexagon_V6_vdmpyhb_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_128B">;
-
-def int_hexagon_V6_vavguh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguh">;
-
-def int_hexagon_V6_vavguh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguh_128B">;
-
-def int_hexagon_V6_vlsrwv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrwv">;
-
-def int_hexagon_V6_vlsrwv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrwv_128B">;
-
-def int_hexagon_V6_vlsrhv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrhv">;
-
-def int_hexagon_V6_vlsrhv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrhv_128B">;
-
-def int_hexagon_V6_vdmpyhisat :
-Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat">;
-
-def int_hexagon_V6_vdmpyhisat_128B :
-Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_128B">;
-
-def int_hexagon_V6_vdmpyhvsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat">;
-
-def int_hexagon_V6_vdmpyhvsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_128B">;
-
-def int_hexagon_V6_vaddw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddw">;
-
-def int_hexagon_V6_vaddw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_128B">;
-
-def int_hexagon_V6_vzh :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzh">;
-
-def int_hexagon_V6_vzh_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzh_128B">;
-
-def int_hexagon_V6_vaddh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddh">;
-
-def int_hexagon_V6_vaddh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_128B">;
-
-def int_hexagon_V6_vmaxub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxub">;
-
-def int_hexagon_V6_vmaxub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxub_128B">;
-
-def int_hexagon_V6_vmpyhv_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc">;
-
-def int_hexagon_V6_vmpyhv_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc_128B">;
-
-def int_hexagon_V6_vadduhsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhsat">;
-
-def int_hexagon_V6_vadduhsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_128B">;
-
-def int_hexagon_V6_vshufoeh :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeh">;
-
-def int_hexagon_V6_vshufoeh_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeh_128B">;
-
-def int_hexagon_V6_vmpyuhv_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc">;
-
-def int_hexagon_V6_vmpyuhv_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;
-
-def int_hexagon_V6_veqh :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh">;
-
-def int_hexagon_V6_veqh_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_128B">;
-
-def int_hexagon_V6_vmpabuuv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabuuv">;
-
-def int_hexagon_V6_vmpabuuv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabuuv_128B">;
-
-def int_hexagon_V6_vasrwhsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat">;
-
-def int_hexagon_V6_vasrwhsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat_128B">;
-
-def int_hexagon_V6_vminuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminuh">;
-
-def int_hexagon_V6_vminuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminuh_128B">;
-
-def int_hexagon_V6_vror :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vror">;
-
-def int_hexagon_V6_vror_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vror_128B">;
-
-def int_hexagon_V6_vmpyowh_rnd_sacc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc">;
-
-def int_hexagon_V6_vmpyowh_rnd_sacc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc_128B">;
-
-def int_hexagon_V6_vmaxuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxuh">;
-
-def int_hexagon_V6_vmaxuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxuh_128B">;
-
-def int_hexagon_V6_vabsh_sat :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh_sat">;
-
-def int_hexagon_V6_vabsh_sat_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_sat_128B">;
-
-def int_hexagon_V6_pred_or_n :
-Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_or_n">;
-
-def int_hexagon_V6_pred_or_n_128B :
-Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_or_n_128B">;
-
-def int_hexagon_V6_vdealb :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealb">;
-
-def int_hexagon_V6_vdealb_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealb_128B">;
-
-def int_hexagon_V6_vmpybusv :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv">;
-
-def int_hexagon_V6_vmpybusv_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_128B">;
-
-def int_hexagon_V6_vzb :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzb">;
-
-def int_hexagon_V6_vzb_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzb_128B">;
-
-def int_hexagon_V6_vdmpybus_dv :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv">;
-
-def int_hexagon_V6_vdmpybus_dv_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_128B">;
-
-def int_hexagon_V6_vaddbq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbq">;
-
-def int_hexagon_V6_vaddbq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbq_128B">;
-
-def int_hexagon_V6_vaddb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddb">;
-
-def int_hexagon_V6_vaddb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_128B">;
-
-def int_hexagon_V6_vaddwq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwq">;
-
-def int_hexagon_V6_vaddwq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwq_128B">;
-
-def int_hexagon_V6_vasrhubrndsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat">;
-
-def int_hexagon_V6_vasrhubrndsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat_128B">;
-
-def int_hexagon_V6_vasrhubsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat">;
-
-def int_hexagon_V6_vasrhubsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat_128B">;
-
-def int_hexagon_V6_vshufoeb :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeb">;
-
-def int_hexagon_V6_vshufoeb_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeb_128B">;
-
-def int_hexagon_V6_vpackhub_sat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhub_sat">;
-
-def int_hexagon_V6_vpackhub_sat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhub_sat_128B">;
-
-def int_hexagon_V6_vmpyiwh_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc">;
-
-def int_hexagon_V6_vmpyiwh_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc_128B">;
-
-def int_hexagon_V6_vtmpyb :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb">;
-
-def int_hexagon_V6_vtmpyb_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_128B">;
-
-def int_hexagon_V6_vmpabusv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabusv">;
-
-def int_hexagon_V6_vmpabusv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabusv_128B">;
-
-def int_hexagon_V6_pred_and :
-Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_and">;
-
-def int_hexagon_V6_pred_and_128B :
-Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_and_128B">;
-
-def int_hexagon_V6_vsubwnq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwnq">;
-
-def int_hexagon_V6_vsubwnq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwnq_128B">;
-
-def int_hexagon_V6_vpackwuh_sat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat">;
-
-def int_hexagon_V6_vpackwuh_sat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat_128B">;
-
-def int_hexagon_V6_vswap :
-Hexagon_v32i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vswap">;
-
-def int_hexagon_V6_vswap_128B :
-Hexagon_v64i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vswap_128B">;
-
-def int_hexagon_V6_vrmpyubv_acc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc">;
-
-def int_hexagon_V6_vrmpyubv_acc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc_128B">;
-
-def int_hexagon_V6_vgtb_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_and">;
-
-def int_hexagon_V6_vgtb_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_and_128B">;
-
-def int_hexagon_V6_vaslw :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslw">;
-
-def int_hexagon_V6_vaslw_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_128B">;
-
-def int_hexagon_V6_vpackhb_sat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhb_sat">;
-
-def int_hexagon_V6_vpackhb_sat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhb_sat_128B">;
-
-def int_hexagon_V6_vmpyih_acc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih_acc">;
-
-def int_hexagon_V6_vmpyih_acc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_acc_128B">;
-
-def int_hexagon_V6_vshuffvdd :
-Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd">;
-
-def int_hexagon_V6_vshuffvdd_128B :
-Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd_128B">;
-
-def int_hexagon_V6_vaddb_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_dv">;
-
-def int_hexagon_V6_vaddb_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;
-
-def int_hexagon_V6_vunpackub :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackub">;
-
-def int_hexagon_V6_vunpackub_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackub_128B">;
-
-def int_hexagon_V6_vgtuw :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw">;
-
-def int_hexagon_V6_vgtuw_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_128B">;
-
-def int_hexagon_V6_vlutvwh :
-Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh">;
-
-def int_hexagon_V6_vlutvwh_128B :
-Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_128B">;
-
-def int_hexagon_V6_vgtub :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub">;
-
-def int_hexagon_V6_vgtub_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_128B">;
-
-def int_hexagon_V6_vmpyowh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh">;
-
-def int_hexagon_V6_vmpyowh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_128B">;
-
-def int_hexagon_V6_vmpyieoh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyieoh">;
-
-def int_hexagon_V6_vmpyieoh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyieoh_128B">;
-
-def int_hexagon_V6_extractw :
-Hexagon_i32_v16i32i32_Intrinsic<"HEXAGON_V6_extractw">;
-
-def int_hexagon_V6_extractw_128B :
-Hexagon_i32_v32i32i32_Intrinsic<"HEXAGON_V6_extractw_128B">;
-
-def int_hexagon_V6_vavgwrnd :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgwrnd">;
-
-def int_hexagon_V6_vavgwrnd_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgwrnd_128B">;
-
-def int_hexagon_V6_vdmpyhsat_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc">;
-
-def int_hexagon_V6_vdmpyhsat_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc_128B">;
-
-def int_hexagon_V6_vgtub_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_xor">;
-
-def int_hexagon_V6_vgtub_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_xor_128B">;
-
-def int_hexagon_V6_vmpyub :
-Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub">;
-
-def int_hexagon_V6_vmpyub_128B :
-Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_128B">;
-
-def int_hexagon_V6_vmpyuh :
-Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh">;
-
-def int_hexagon_V6_vmpyuh_128B :
-Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_128B">;
-
-def int_hexagon_V6_vunpackob :
-Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackob">;
-
-def int_hexagon_V6_vunpackob_128B :
-Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackob_128B">;
-
-def int_hexagon_V6_vmpahb :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb">;
-
-def int_hexagon_V6_vmpahb_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_128B">;
-
-def int_hexagon_V6_veqw_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_or">;
-
-def int_hexagon_V6_veqw_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_or_128B">;
-
-def int_hexagon_V6_vandqrt :
-Hexagon_v16i32_v512i1i32_Intrinsic<"HEXAGON_V6_vandqrt">;
-
-def int_hexagon_V6_vandqrt_128B :
-Hexagon_v32i32_v1024i1i32_Intrinsic<"HEXAGON_V6_vandqrt_128B">;
-
-def int_hexagon_V6_vxor :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vxor">;
-
-def int_hexagon_V6_vxor_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vxor_128B">;
-
-def int_hexagon_V6_vasrwhrndsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat">;
-
-def int_hexagon_V6_vasrwhrndsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat_128B">;
-
-def int_hexagon_V6_vmpyhsat_acc :
-Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc">;
-
-def int_hexagon_V6_vmpyhsat_acc_128B :
-Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc_128B">;
-
-def int_hexagon_V6_vrmpybus_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc">;
-
-def int_hexagon_V6_vrmpybus_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc_128B">;
-
-def int_hexagon_V6_vsubhw :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhw">;
-
-def int_hexagon_V6_vsubhw_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhw_128B">;
-
-def int_hexagon_V6_vdealb4w :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdealb4w">;
-
-def int_hexagon_V6_vdealb4w_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdealb4w_128B">;
-
-def int_hexagon_V6_vmpyowh_sacc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc">;
-
-def int_hexagon_V6_vmpyowh_sacc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc_128B">;
-
-def int_hexagon_V6_vmpybv :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv">;
-
-def int_hexagon_V6_vmpybv_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_128B">;
-
-def int_hexagon_V6_vabsdiffh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffh">;
-
-def int_hexagon_V6_vabsdiffh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffh_128B">;
-
-def int_hexagon_V6_vshuffob :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffob">;
-
-def int_hexagon_V6_vshuffob_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffob_128B">;
-
-def int_hexagon_V6_vmpyub_acc :
-Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc">;
-
-def int_hexagon_V6_vmpyub_acc_128B :
-Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc_128B">;
-
-def int_hexagon_V6_vnormamtw :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamtw">;
-
-def int_hexagon_V6_vnormamtw_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamtw_128B">;
-
-def int_hexagon_V6_vunpackuh :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackuh">;
-
-def int_hexagon_V6_vunpackuh_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackuh_128B">;
-
-def int_hexagon_V6_vgtuh_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_or">;
-
-def int_hexagon_V6_vgtuh_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_or_128B">;
-
-def int_hexagon_V6_vmpyiewuh_acc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc">;
-
-def int_hexagon_V6_vmpyiewuh_acc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc_128B">;
-
-def int_hexagon_V6_vunpackoh :
-Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackoh">;
-
-def int_hexagon_V6_vunpackoh_128B :
-Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackoh_128B">;
-
-def int_hexagon_V6_vdmpyhsat :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat">;
-
-def int_hexagon_V6_vdmpyhsat_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_128B">;
-
-def int_hexagon_V6_vmpyubv :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv">;
-
-def int_hexagon_V6_vmpyubv_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_128B">;
-
-def int_hexagon_V6_vmpyhss :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhss">;
-
-def int_hexagon_V6_vmpyhss_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhss_128B">;
-
-def int_hexagon_V6_hi :
-Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_hi">;
-
-def int_hexagon_V6_hi_128B :
-Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_hi_128B">;
-
-def int_hexagon_V6_vasrwuhsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat">;
-
-def int_hexagon_V6_vasrwuhsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat_128B">;
-
-def int_hexagon_V6_veqw :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw">;
-
-def int_hexagon_V6_veqw_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_128B">;
-
-def int_hexagon_V6_vdsaduh :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh">;
-
-def int_hexagon_V6_vdsaduh_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_128B">;
-
-def int_hexagon_V6_vsubw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubw">;
-
-def int_hexagon_V6_vsubw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_128B">;
-
-def int_hexagon_V6_vsubw_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_dv">;
-
-def int_hexagon_V6_vsubw_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;
-
-def int_hexagon_V6_veqb_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_and">;
-
-def int_hexagon_V6_veqb_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_and_128B">;
-
-def int_hexagon_V6_vmpyih :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih">;
-
-def int_hexagon_V6_vmpyih_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_128B">;
-
-def int_hexagon_V6_vtmpyb_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc">;
-
-def int_hexagon_V6_vtmpyb_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc_128B">;
-
-def int_hexagon_V6_vrmpybus :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus">;
-
-def int_hexagon_V6_vrmpybus_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_128B">;
-
-def int_hexagon_V6_vmpybus_acc :
-Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc">;
-
-def int_hexagon_V6_vmpybus_acc_128B :
-Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc_128B">;
-
-def int_hexagon_V6_vgth_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_xor">;
-
-def int_hexagon_V6_vgth_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_xor_128B">;
-
-def int_hexagon_V6_vsubhsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhsat">;
-
-def int_hexagon_V6_vsubhsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_128B">;
-
-def int_hexagon_V6_vrmpyubi_acc :
-Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc", [ImmArg<3>]>;
-
-def int_hexagon_V6_vrmpyubi_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc_128B", [ImmArg<3>]>;
-
-def int_hexagon_V6_vabsw :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw">;
-
-def int_hexagon_V6_vabsw_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_128B">;
-
-def int_hexagon_V6_vaddwsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv">;
-
-def int_hexagon_V6_vaddwsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv_128B">;
-
-def int_hexagon_V6_vlsrw :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrw">;
-
-def int_hexagon_V6_vlsrw_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrw_128B">;
-
-def int_hexagon_V6_vabsh :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh">;
-
-def int_hexagon_V6_vabsh_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_128B">;
-
-def int_hexagon_V6_vlsrh :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrh">;
-
-def int_hexagon_V6_vlsrh_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrh_128B">;
-
-def int_hexagon_V6_valignb :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignb">;
-
-def int_hexagon_V6_valignb_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignb_128B">;
-
-def int_hexagon_V6_vsubhq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhq">;
-
-def int_hexagon_V6_vsubhq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhq_128B">;
-
-def int_hexagon_V6_vpackoh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackoh">;
-
-def int_hexagon_V6_vpackoh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackoh_128B">;
-
-def int_hexagon_V6_vdmpybus_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc">;
-
-def int_hexagon_V6_vdmpybus_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc_128B">;
-
-def int_hexagon_V6_vdmpyhvsat_acc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc">;
-
-def int_hexagon_V6_vdmpyhvsat_acc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc_128B">;
-
-def int_hexagon_V6_vrmpybv_acc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc">;
-
-def int_hexagon_V6_vrmpybv_acc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc_128B">;
-
-def int_hexagon_V6_vaddhsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhsat">;
-
-def int_hexagon_V6_vaddhsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_128B">;
-
-def int_hexagon_V6_vcombine :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcombine">;
-
-def int_hexagon_V6_vcombine_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcombine_128B">;
-
-def int_hexagon_V6_vandqrt_acc :
-Hexagon_v16i32_v16i32v512i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc">;
-
-def int_hexagon_V6_vandqrt_acc_128B :
-Hexagon_v32i32_v32i32v1024i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc_128B">;
-
-def int_hexagon_V6_vaslhv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslhv">;
-
-def int_hexagon_V6_vaslhv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslhv_128B">;
-
-def int_hexagon_V6_vinsertwr :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vinsertwr">;
-
-def int_hexagon_V6_vinsertwr_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vinsertwr_128B">;
-
-def int_hexagon_V6_vsubh_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_dv">;
-
-def int_hexagon_V6_vsubh_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;
-
-def int_hexagon_V6_vshuffb :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffb">;
-
-def int_hexagon_V6_vshuffb_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffb_128B">;
-
-def int_hexagon_V6_vand :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vand">;
-
-def int_hexagon_V6_vand_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vand_128B">;
-
-def int_hexagon_V6_vmpyhv :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv">;
-
-def int_hexagon_V6_vmpyhv_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_128B">;
-
-def int_hexagon_V6_vdmpyhsuisat_acc :
-Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc">;
-
-def int_hexagon_V6_vdmpyhsuisat_acc_128B :
-Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc_128B">;
-
-def int_hexagon_V6_vsububsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_dv">;
-
-def int_hexagon_V6_vsububsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsububsat_dv_128B">;
-
-def int_hexagon_V6_vgtb_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_xor">;
-
-def int_hexagon_V6_vgtb_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_xor_128B">;
-
-def int_hexagon_V6_vdsaduh_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc">;
-
-def int_hexagon_V6_vdsaduh_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;
-
-def int_hexagon_V6_vrmpyub :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub">;
-
-def int_hexagon_V6_vrmpyub_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_128B">;
-
-def int_hexagon_V6_vmpyuh_acc :
-Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc">;
-
-def int_hexagon_V6_vmpyuh_acc_128B :
-Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc_128B">;
-
-def int_hexagon_V6_vcl0h :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0h">;
-
-def int_hexagon_V6_vcl0h_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0h_128B">;
-
-def int_hexagon_V6_vmpyhus_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc">;
-
-def int_hexagon_V6_vmpyhus_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc_128B">;
-
-def int_hexagon_V6_vmpybv_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv_acc">;
-
-def int_hexagon_V6_vmpybv_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_acc_128B">;
-
-def int_hexagon_V6_vrsadubi :
-Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi", [ImmArg<2>]>;
-
-def int_hexagon_V6_vrsadubi_128B :
-Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_128B", [ImmArg<2>]>;
-
-def int_hexagon_V6_vdmpyhb_dv_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc">;
-
-def int_hexagon_V6_vdmpyhb_dv_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc_128B">;
-
-def int_hexagon_V6_vshufeh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufeh">;
-
-def int_hexagon_V6_vshufeh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufeh_128B">;
-
-def int_hexagon_V6_vmpyewuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh">;
-
-def int_hexagon_V6_vmpyewuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_128B">;
-
-def int_hexagon_V6_vmpyhsrs :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs">;
-
-def int_hexagon_V6_vmpyhsrs_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs_128B">;
-
-def int_hexagon_V6_vdmpybus_dv_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc">;
-
-def int_hexagon_V6_vdmpybus_dv_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc_128B">;
-
-def int_hexagon_V6_vaddubh :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh">;
-
-def int_hexagon_V6_vaddubh_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_128B">;
-
-def int_hexagon_V6_vasrwh :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwh">;
-
-def int_hexagon_V6_vasrwh_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwh_128B">;
-
-def int_hexagon_V6_ld0 :
-Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ld0">;
-
-def int_hexagon_V6_ld0_128B :
-Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_ld0_128B">;
-
-def int_hexagon_V6_vpopcounth :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vpopcounth">;
-
-def int_hexagon_V6_vpopcounth_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vpopcounth_128B">;
-
-def int_hexagon_V6_ldnt0 :
-Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ldnt0">;
-
-def int_hexagon_V6_ldnt0_128B :
-Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_ldnt0_128B">;
-
-def int_hexagon_V6_vgth_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_and">;
-
-def int_hexagon_V6_vgth_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_and_128B">;
-
-def int_hexagon_V6_vaddubsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv">;
-
-def int_hexagon_V6_vaddubsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv_128B">;
-
-def int_hexagon_V6_vpackeh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeh">;
-
-def int_hexagon_V6_vpackeh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeh_128B">;
-
-def int_hexagon_V6_vmpyh :
-Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh">;
-
-def int_hexagon_V6_vmpyh_128B :
-Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_128B">;
-
-def int_hexagon_V6_vminh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminh">;
-
-def int_hexagon_V6_vminh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminh_128B">;
-
-def int_hexagon_V6_pred_scalar2 :
-Hexagon_v512i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2">;
-
-def int_hexagon_V6_pred_scalar2_128B :
-Hexagon_v1024i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2_128B">;
-
-def int_hexagon_V6_vdealh :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealh">;
-
-def int_hexagon_V6_vdealh_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealh_128B">;
-
-def int_hexagon_V6_vpackwh_sat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwh_sat">;
-
-def int_hexagon_V6_vpackwh_sat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwh_sat_128B">;
-
-def int_hexagon_V6_vaslh :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslh">;
-
-def int_hexagon_V6_vaslh_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_128B">;
-
-def int_hexagon_V6_vgtuw_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_and">;
-
-def int_hexagon_V6_vgtuw_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_and_128B">;
-
-def int_hexagon_V6_vor :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vor">;
-
-def int_hexagon_V6_vor_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vor_128B">;
-
-def int_hexagon_V6_vlutvvb :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb">;
-
-def int_hexagon_V6_vlutvvb_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_128B">;
-
-def int_hexagon_V6_vmpyiowh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiowh">;
-
-def int_hexagon_V6_vmpyiowh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiowh_128B">;
-
-def int_hexagon_V6_vlutvvb_oracc :
-Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc">;
-
-def int_hexagon_V6_vlutvvb_oracc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc_128B">;
-
-def int_hexagon_V6_vandvrt :
-Hexagon_v512i1_v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt">;
-
-def int_hexagon_V6_vandvrt_128B :
-Hexagon_v1024i1_v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_128B">;
-
-def int_hexagon_V6_veqh_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_xor">;
-
-def int_hexagon_V6_veqh_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_xor_128B">;
-
-def int_hexagon_V6_vadduhw :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw">;
-
-def int_hexagon_V6_vadduhw_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_128B">;
-
-def int_hexagon_V6_vcl0w :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0w">;
-
-def int_hexagon_V6_vcl0w_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0w_128B">;
-
-def int_hexagon_V6_vmpyihb :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb">;
-
-def int_hexagon_V6_vmpyihb_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_128B">;
-
-def int_hexagon_V6_vtmpybus :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus">;
-
-def int_hexagon_V6_vtmpybus_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_128B">;
-
-def int_hexagon_V6_vd0 :
-Hexagon_v16i32__Intrinsic<"HEXAGON_V6_vd0">;
-
-def int_hexagon_V6_vd0_128B :
-Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vd0_128B">;
-
-def int_hexagon_V6_veqh_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_or">;
-
-def int_hexagon_V6_veqh_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_or_128B">;
-
-def int_hexagon_V6_vgtw_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_or">;
-
-def int_hexagon_V6_vgtw_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_or_128B">;
-
-def int_hexagon_V6_vdmpybus :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus">;
-
-def int_hexagon_V6_vdmpybus_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_128B">;
-
-def int_hexagon_V6_vgtub_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_or">;
-
-def int_hexagon_V6_vgtub_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_or_128B">;
-
-def int_hexagon_V6_vmpybus :
-Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus">;
-
-def int_hexagon_V6_vmpybus_128B :
-Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_128B">;
-
-def int_hexagon_V6_vdmpyhb_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc">;
-
-def int_hexagon_V6_vdmpyhb_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc_128B">;
-
-def int_hexagon_V6_vandvrt_acc :
-Hexagon_v512i1_v512i1v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc">;
-
-def int_hexagon_V6_vandvrt_acc_128B :
-Hexagon_v1024i1_v1024i1v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc_128B">;
-
-def int_hexagon_V6_vassign :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vassign">;
-
-def int_hexagon_V6_vassign_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassign_128B">;
-
-def int_hexagon_V6_vaddwnq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwnq">;
-
-def int_hexagon_V6_vaddwnq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwnq_128B">;
-
-def int_hexagon_V6_vgtub_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_and">;
-
-def int_hexagon_V6_vgtub_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_and_128B">;
-
-def int_hexagon_V6_vdmpyhb_dv :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv">;
-
-def int_hexagon_V6_vdmpyhb_dv_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_128B">;
-
-def int_hexagon_V6_vunpackb :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackb">;
-
-def int_hexagon_V6_vunpackb_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackb_128B">;
-
-def int_hexagon_V6_vunpackh :
-Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackh">;
-
-def int_hexagon_V6_vunpackh_128B :
-Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackh_128B">;
-
-def int_hexagon_V6_vmpahb_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc">;
-
-def int_hexagon_V6_vmpahb_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc_128B">;
-
-def int_hexagon_V6_vaddbnq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbnq">;
-
-def int_hexagon_V6_vaddbnq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbnq_128B">;
-
-def int_hexagon_V6_vlalignbi :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignbi", [ImmArg<2>]>;
-
-def int_hexagon_V6_vlalignbi_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignbi_128B", [ImmArg<2>]>;
-
-def int_hexagon_V6_vsatwh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatwh">;
-
-def int_hexagon_V6_vsatwh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatwh_128B">;
-
-def int_hexagon_V6_vgtuh :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh">;
-
-def int_hexagon_V6_vgtuh_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_128B">;
-
-def int_hexagon_V6_vmpyihb_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc">;
-
-def int_hexagon_V6_vmpyihb_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc_128B">;
-
-def int_hexagon_V6_vrmpybusv_acc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc">;
-
-def int_hexagon_V6_vrmpybusv_acc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc_128B">;
-
-def int_hexagon_V6_vrdelta :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrdelta">;
-
-def int_hexagon_V6_vrdelta_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrdelta_128B">;
-
-def int_hexagon_V6_vroundwh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwh">;
-
-def int_hexagon_V6_vroundwh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwh_128B">;
-
-def int_hexagon_V6_vaddw_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_dv">;
-
-def int_hexagon_V6_vaddw_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;
-
-def int_hexagon_V6_vmpyiwb_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc">;
-
-def int_hexagon_V6_vmpyiwb_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc_128B">;
-
-def int_hexagon_V6_vsubbq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbq">;
-
-def int_hexagon_V6_vsubbq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbq_128B">;
-
-def int_hexagon_V6_veqh_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_and">;
-
-def int_hexagon_V6_veqh_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_and_128B">;
-
-def int_hexagon_V6_valignbi :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignbi", [ImmArg<2>]>;
-
-def int_hexagon_V6_valignbi_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignbi_128B", [ImmArg<2>]>;
-
-def int_hexagon_V6_vaddwsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwsat">;
-
-def int_hexagon_V6_vaddwsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_128B">;
-
-def int_hexagon_V6_veqw_and :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_and">;
-
-def int_hexagon_V6_veqw_and_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_and_128B">;
-
-def int_hexagon_V6_vabsdiffub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffub">;
-
-def int_hexagon_V6_vabsdiffub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffub_128B">;
-
-def int_hexagon_V6_vshuffeb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffeb">;
-
-def int_hexagon_V6_vshuffeb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffeb_128B">;
-
-def int_hexagon_V6_vabsdiffuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffuh">;
-
-def int_hexagon_V6_vabsdiffuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffuh_128B">;
-
-def int_hexagon_V6_veqw_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_xor">;
-
-def int_hexagon_V6_veqw_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_xor_128B">;
-
-def int_hexagon_V6_vgth :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth">;
-
-def int_hexagon_V6_vgth_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_128B">;
-
-def int_hexagon_V6_vgtuw_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_xor">;
-
-def int_hexagon_V6_vgtuw_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_xor_128B">;
-
-def int_hexagon_V6_vgtb :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb">;
-
-def int_hexagon_V6_vgtb_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_128B">;
-
-def int_hexagon_V6_vgtw :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw">;
-
-def int_hexagon_V6_vgtw_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_128B">;
-
-def int_hexagon_V6_vsubwq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwq">;
-
-def int_hexagon_V6_vsubwq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwq_128B">;
-
-def int_hexagon_V6_vnot :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnot">;
-
-def int_hexagon_V6_vnot_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnot_128B">;
-
-def int_hexagon_V6_vgtb_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_or">;
-
-def int_hexagon_V6_vgtb_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_or_128B">;
-
-def int_hexagon_V6_vgtuw_or :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_or">;
-
-def int_hexagon_V6_vgtuw_or_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_or_128B">;
-
-def int_hexagon_V6_vaddubsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubsat">;
-
-def int_hexagon_V6_vaddubsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_128B">;
-
-def int_hexagon_V6_vmaxw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxw">;
-
-def int_hexagon_V6_vmaxw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxw_128B">;
-
-def int_hexagon_V6_vaslwv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslwv">;
-
-def int_hexagon_V6_vaslwv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslwv_128B">;
-
-def int_hexagon_V6_vabsw_sat :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw_sat">;
-
-def int_hexagon_V6_vabsw_sat_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_sat_128B">;
-
-def int_hexagon_V6_vsubwsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv">;
-
-def int_hexagon_V6_vsubwsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;
-
-def int_hexagon_V6_vroundhub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhub">;
-
-def int_hexagon_V6_vroundhub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhub_128B">;
-
-def int_hexagon_V6_vdmpyhisat_acc :
-Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc">;
-
-def int_hexagon_V6_vdmpyhisat_acc_128B :
-Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc_128B">;
-
-def int_hexagon_V6_vmpabus :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus">;
-
-def int_hexagon_V6_vmpabus_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_128B">;
-
-def int_hexagon_V6_vassignp :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassignp">;
-
-def int_hexagon_V6_vassignp_128B :
-Hexagon_v64i32_v64i32_Intrinsic<"HEXAGON_V6_vassignp_128B">;
-
-def int_hexagon_V6_veqb :
-Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb">;
-
-def int_hexagon_V6_veqb_128B :
-Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_128B">;
-
-def int_hexagon_V6_vsububh :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububh">;
-
-def int_hexagon_V6_vsububh_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububh_128B">;
-
-def int_hexagon_V6_lvsplatw :
-Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw">;
-
-def int_hexagon_V6_lvsplatw_128B :
-Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;
-
-def int_hexagon_V6_vaddhnq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhnq">;
-
-def int_hexagon_V6_vaddhnq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhnq_128B">;
-
-def int_hexagon_V6_vdmpyhsusat :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat">;
-
-def int_hexagon_V6_vdmpyhsusat_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_128B">;
-
-def int_hexagon_V6_pred_not :
-Hexagon_v512i1_v512i1_Intrinsic<"HEXAGON_V6_pred_not">;
-
-def int_hexagon_V6_pred_not_128B :
-Hexagon_v1024i1_v1024i1_Intrinsic<"HEXAGON_V6_pred_not_128B">;
-
-def int_hexagon_V6_vlutvwh_oracc :
-Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">;
-
-def int_hexagon_V6_vlutvwh_oracc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">;
-
-def int_hexagon_V6_vmpyiewh_acc :
-Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc">;
-
-def int_hexagon_V6_vmpyiewh_acc_128B :
-Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc_128B">;
-
-def int_hexagon_V6_vdealvdd :
-Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdealvdd">;
-
-def int_hexagon_V6_vdealvdd_128B :
-Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdealvdd_128B">;
-
-def int_hexagon_V6_vavgw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgw">;
-
-def int_hexagon_V6_vavgw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgw_128B">;
-
-def int_hexagon_V6_vdmpyhsusat_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc">;
-
-def int_hexagon_V6_vdmpyhsusat_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc_128B">;
-
-def int_hexagon_V6_vgtw_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_xor">;
-
-def int_hexagon_V6_vgtw_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_xor_128B">;
-
-def int_hexagon_V6_vtmpyhb_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc">;
-
-def int_hexagon_V6_vtmpyhb_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc_128B">;
-
-def int_hexagon_V6_vaddhw :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw">;
-
-def int_hexagon_V6_vaddhw_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_128B">;
-
-def int_hexagon_V6_vaddhq :
-Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhq">;
-
-def int_hexagon_V6_vaddhq_128B :
-Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhq_128B">;
-
-def int_hexagon_V6_vrmpyubv :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv">;
-
-def int_hexagon_V6_vrmpyubv_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_128B">;
-
-def int_hexagon_V6_vsubh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubh">;
-
-def int_hexagon_V6_vsubh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_128B">;
-
-def int_hexagon_V6_vrmpyubi :
-Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi", [ImmArg<2>]>;
-
-def int_hexagon_V6_vrmpyubi_128B :
-Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_128B", [ImmArg<2>]>;
-
-def int_hexagon_V6_vminw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminw">;
-
-def int_hexagon_V6_vminw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminw_128B">;
-
-def int_hexagon_V6_vmpyubv_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc">;
-
-def int_hexagon_V6_vmpyubv_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc_128B">;
-
-def int_hexagon_V6_pred_xor :
-Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_xor">;
-
-def int_hexagon_V6_pred_xor_128B :
-Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_xor_128B">;
-
-def int_hexagon_V6_veqb_xor :
-Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_xor">;
-
-def int_hexagon_V6_veqb_xor_128B :
-Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_xor_128B">;
-
-def int_hexagon_V6_vmpyiewuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh">;
-
-def int_hexagon_V6_vmpyiewuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_128B">;
-
-def int_hexagon_V6_vmpybusv_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc">;
-
-def int_hexagon_V6_vmpybusv_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc_128B">;
-
-def int_hexagon_V6_vavguhrnd :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguhrnd">;
-
-def int_hexagon_V6_vavguhrnd_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguhrnd_128B">;
-
-def int_hexagon_V6_vmpyowh_rnd :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd">;
-
-def int_hexagon_V6_vmpyowh_rnd_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_128B">;
-
-def int_hexagon_V6_vsubwsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwsat">;
-
-def int_hexagon_V6_vsubwsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_128B">;
-
-def int_hexagon_V6_vsubuhw :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhw">;
-
-def int_hexagon_V6_vsubuhw_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhw_128B">;
-
-def int_hexagon_V6_vrmpybusi_acc :
-Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc", [ImmArg<3>]>;
-
-def int_hexagon_V6_vrmpybusi_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc_128B", [ImmArg<3>]>;
-
-def int_hexagon_V6_vasrw :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrw">;
-
-def int_hexagon_V6_vasrw_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_128B">;
-
-def int_hexagon_V6_vasrh :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrh">;
-
-def int_hexagon_V6_vasrh_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_128B">;
-
-def int_hexagon_V6_vmpyuhv :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv">;
-
-def int_hexagon_V6_vmpyuhv_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_128B">;
-
-def int_hexagon_V6_vasrhbrndsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat">;
-
-def int_hexagon_V6_vasrhbrndsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat_128B">;
-
-def int_hexagon_V6_vsubuhsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv">;
-
-def int_hexagon_V6_vsubuhsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv_128B">;
-
-def int_hexagon_V6_vabsdiffw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffw">;
-
-def int_hexagon_V6_vabsdiffw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffw_128B">;
-
-// V62 HVX Instructions.
-
-def int_hexagon_V6_vandnqrt_acc :
-Hexagon_v16i32_v16i32v512i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc">;
-
-def int_hexagon_V6_vandnqrt_acc_128B :
-Hexagon_v32i32_v32i32v1024i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc_128B">;
-
-def int_hexagon_V6_vaddclbh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbh">;
-
-def int_hexagon_V6_vaddclbh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbh_128B">;
-
-def int_hexagon_V6_vmpyowh_64_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc">;
-
-def int_hexagon_V6_vmpyowh_64_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc_128B">;
-
-def int_hexagon_V6_vmpyewuh_64 :
-Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64">;
-
-def int_hexagon_V6_vmpyewuh_64_128B :
-Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64_128B">;
-
-def int_hexagon_V6_vsatuwuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatuwuh">;
-
-def int_hexagon_V6_vsatuwuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatuwuh_128B">;
-
-def int_hexagon_V6_shuffeqh :
-Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_shuffeqh">;
-
-def int_hexagon_V6_shuffeqh_128B :
-Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_shuffeqh_128B">;
-
-def int_hexagon_V6_shuffeqw :
-Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_shuffeqw">;
-
-def int_hexagon_V6_shuffeqw_128B :
-Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_shuffeqw_128B">;
-
-def int_hexagon_V6_ldcnpnt0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnpnt0">;
-
-def int_hexagon_V6_ldcnpnt0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnpnt0_128B">;
-
-def int_hexagon_V6_vsubcarry :
-Hexagon_custom_v16i32v512i1_v16i32v16i32v512i1_Intrinsic;
-
-def int_hexagon_V6_vsubcarry_128B :
-Hexagon_custom_v32i32v1024i1_v32i32v32i32v1024i1_Intrinsic_128B;
-
-def int_hexagon_V6_vasrhbsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat">;
-
-def int_hexagon_V6_vasrhbsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat_128B">;
-
-def int_hexagon_V6_vminb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminb">;
-
-def int_hexagon_V6_vminb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminb_128B">;
-
-def int_hexagon_V6_vmpauhb_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc">;
-
-def int_hexagon_V6_vmpauhb_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc_128B">;
-
-def int_hexagon_V6_vaddhw_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw_acc">;
-
-def int_hexagon_V6_vaddhw_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_acc_128B">;
-
-def int_hexagon_V6_vlsrb :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrb">;
-
-def int_hexagon_V6_vlsrb_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrb_128B">;
-
-def int_hexagon_V6_vlutvwhi :
-Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi", [ImmArg<2>]>;
-
-def int_hexagon_V6_vlutvwhi_128B :
-Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi_128B", [ImmArg<2>]>;
-
-def int_hexagon_V6_vaddububb_sat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddububb_sat">;
-
-def int_hexagon_V6_vaddububb_sat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddububb_sat_128B">;
-
-def int_hexagon_V6_vsubbsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv">;
-
-def int_hexagon_V6_vsubbsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv_128B">;
-
-def int_hexagon_V6_ldtp0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtp0">;
-
-def int_hexagon_V6_ldtp0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtp0_128B">;
-
-def int_hexagon_V6_vlutvvb_oracci :
-Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci", [ImmArg<3>]>;
-
-def int_hexagon_V6_vlutvvb_oracci_128B :
-Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci_128B", [ImmArg<3>]>;
-
-def int_hexagon_V6_vsubuwsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv">;
-
-def int_hexagon_V6_vsubuwsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv_128B">;
-
-def int_hexagon_V6_ldpnt0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldpnt0">;
-
-def int_hexagon_V6_ldpnt0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldpnt0_128B">;
-
-def int_hexagon_V6_vandvnqv :
-Hexagon_v16i32_v512i1v16i32_Intrinsic<"HEXAGON_V6_vandvnqv">;
-
-def int_hexagon_V6_vandvnqv_128B :
-Hexagon_v32i32_v1024i1v32i32_Intrinsic<"HEXAGON_V6_vandvnqv_128B">;
-
-def int_hexagon_V6_lvsplatb :
-Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb">;
-
-def int_hexagon_V6_lvsplatb_128B :
-Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb_128B">;
-
-def int_hexagon_V6_lvsplath :
-Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplath">;
-
-def int_hexagon_V6_lvsplath_128B :
-Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplath_128B">;
-
-def int_hexagon_V6_ldtpnt0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtpnt0">;
-
-def int_hexagon_V6_ldtpnt0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtpnt0_128B">;
-
-def int_hexagon_V6_vlutvwh_nm :
-Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm">;
-
-def int_hexagon_V6_vlutvwh_nm_128B :
-Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm_128B">;
-
-def int_hexagon_V6_ldnpnt0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldnpnt0">;
-
-def int_hexagon_V6_ldnpnt0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldnpnt0_128B">;
-
-def int_hexagon_V6_vmpauhb :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb">;
-
-def int_hexagon_V6_vmpauhb_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_128B">;
-
-def int_hexagon_V6_ldtnp0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnp0">;
-
-def int_hexagon_V6_ldtnp0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnp0_128B">;
-
-def int_hexagon_V6_vrounduhub :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduhub">;
-
-def int_hexagon_V6_vrounduhub_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduhub_128B">;
-
-def int_hexagon_V6_vadduhw_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw_acc">;
-
-def int_hexagon_V6_vadduhw_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_acc_128B">;
-
-def int_hexagon_V6_ldcp0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcp0">;
-
-def int_hexagon_V6_ldcp0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcp0_128B">;
-
-def int_hexagon_V6_vadduwsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduwsat">;
-
-def int_hexagon_V6_vadduwsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_128B">;
-
-def int_hexagon_V6_ldtnpnt0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnpnt0">;
-
-def int_hexagon_V6_ldtnpnt0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnpnt0_128B">;
-
-def int_hexagon_V6_vaddbsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbsat">;
-
-def int_hexagon_V6_vaddbsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_128B">;
-
-def int_hexagon_V6_vandnqrt :
-Hexagon_v16i32_v512i1i32_Intrinsic<"HEXAGON_V6_vandnqrt">;
-
-def int_hexagon_V6_vandnqrt_128B :
-Hexagon_v32i32_v1024i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_128B">;
-
-def int_hexagon_V6_vmpyiwub_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc">;
-
-def int_hexagon_V6_vmpyiwub_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc_128B">;
-
-def int_hexagon_V6_vmaxb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxb">;
-
-def int_hexagon_V6_vmaxb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxb_128B">;
-
-def int_hexagon_V6_vandvqv :
-Hexagon_v16i32_v512i1v16i32_Intrinsic<"HEXAGON_V6_vandvqv">;
-
-def int_hexagon_V6_vandvqv_128B :
-Hexagon_v32i32_v1024i1v32i32_Intrinsic<"HEXAGON_V6_vandvqv_128B">;
-
-def int_hexagon_V6_vaddcarry :
-Hexagon_custom_v16i32v512i1_v16i32v16i32v512i1_Intrinsic;
-
-def int_hexagon_V6_vaddcarry_128B :
-Hexagon_custom_v32i32v1024i1_v32i32v32i32v1024i1_Intrinsic_128B;
-
-def int_hexagon_V6_vasrwuhrndsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat">;
-
-def int_hexagon_V6_vasrwuhrndsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat_128B">;
-
-def int_hexagon_V6_vlutvvbi :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi", [ImmArg<2>]>;
-
-def int_hexagon_V6_vlutvvbi_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi_128B", [ImmArg<2>]>;
-
-def int_hexagon_V6_vsubuwsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuwsat">;
-
-def int_hexagon_V6_vsubuwsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_128B">;
-
-def int_hexagon_V6_vaddbsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv">;
-
-def int_hexagon_V6_vaddbsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv_128B">;
-
-def int_hexagon_V6_ldnp0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldnp0">;
-
-def int_hexagon_V6_ldnp0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldnp0_128B">;
-
-def int_hexagon_V6_vasruwuhrndsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat">;
-
-def int_hexagon_V6_vasruwuhrndsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat_128B">;
-
-def int_hexagon_V6_vrounduwuh :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduwuh">;
-
-def int_hexagon_V6_vrounduwuh_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduwuh_128B">;
-
-def int_hexagon_V6_vlutvvb_nm :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm">;
-
-def int_hexagon_V6_vlutvvb_nm_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm_128B">;
-
-def int_hexagon_V6_pred_scalar2v2 :
-Hexagon_v512i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2">;
-
-def int_hexagon_V6_pred_scalar2v2_128B :
-Hexagon_v1024i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2_128B">;
-
-def int_hexagon_V6_ldp0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldp0">;
-
-def int_hexagon_V6_ldp0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldp0_128B">;
-
-def int_hexagon_V6_vaddubh_acc :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh_acc">;
-
-def int_hexagon_V6_vaddubh_acc_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_acc_128B">;
-
-def int_hexagon_V6_vaddclbw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbw">;
-
-def int_hexagon_V6_vaddclbw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbw_128B">;
-
-def int_hexagon_V6_ldcpnt0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcpnt0">;
-
-def int_hexagon_V6_ldcpnt0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcpnt0_128B">;
-
-def int_hexagon_V6_vadduwsat_dv :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv">;
-
-def int_hexagon_V6_vadduwsat_dv_128B :
-Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">;
-
-def int_hexagon_V6_vmpyiwub :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub">;
-
-def int_hexagon_V6_vmpyiwub_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_128B">;
-
-def int_hexagon_V6_vsubububb_sat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubububb_sat">;
-
-def int_hexagon_V6_vsubububb_sat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubububb_sat_128B">;
-
-def int_hexagon_V6_ldcnp0 :
-Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnp0">;
-
-def int_hexagon_V6_ldcnp0_128B :
-Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnp0_128B">;
-
-def int_hexagon_V6_vlutvwh_oracci :
-Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci", [ImmArg<3>]>;
-
-def int_hexagon_V6_vlutvwh_oracci_128B :
-Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci_128B", [ImmArg<3>]>;
-
-def int_hexagon_V6_vsubbsat :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbsat">;
-
-def int_hexagon_V6_vsubbsat_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_128B">;
-
-// V65 HVX Instructions.
-
-def int_hexagon_V6_vasruhubrndsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat">;
-
-def int_hexagon_V6_vasruhubrndsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat_128B">;
-
def int_hexagon_V6_vrmpybub_rtt :
-Hexagon_v32i32_v16i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt">;
+Hexagon_v32i32_v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt">;
def int_hexagon_V6_vrmpybub_rtt_128B :
-Hexagon_v64i32_v32i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_128B">;
-
-def int_hexagon_V6_vmpahhsat :
-Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat">;
-
-def int_hexagon_V6_vmpahhsat_128B :
-Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat_128B">;
-
-def int_hexagon_V6_vavguwrnd :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguwrnd">;
-
-def int_hexagon_V6_vavguwrnd_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguwrnd_128B">;
-
-def int_hexagon_V6_vnavgb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgb">;
-
-def int_hexagon_V6_vnavgb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgb_128B">;
-
-def int_hexagon_V6_vasrh_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc">;
-
-def int_hexagon_V6_vasrh_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc_128B">;
-
-def int_hexagon_V6_vmpauhuhsat :
-Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat">;
-
-def int_hexagon_V6_vmpauhuhsat_128B :
-Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat_128B">;
-
-def int_hexagon_V6_vmpyh_acc :
-Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc">;
-
-def int_hexagon_V6_vmpyh_acc_128B :
-Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc_128B">;
+Hexagon_v64i32_v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_128B">;
def int_hexagon_V6_vrmpybub_rtt_acc :
-Hexagon_v32i32_v32i32v16i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc">;
+Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc">;
def int_hexagon_V6_vrmpybub_rtt_acc_128B :
-Hexagon_v64i32_v64i32v32i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc_128B">;
-
-def int_hexagon_V6_vavgb :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgb">;
-
-def int_hexagon_V6_vavgb_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgb_128B">;
-
-def int_hexagon_V6_vaslh_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc">;
-
-def int_hexagon_V6_vaslh_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc_128B">;
-
-def int_hexagon_V6_vavguw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguw">;
-
-def int_hexagon_V6_vavguw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguw_128B">;
-
-def int_hexagon_V6_vlut4 :
-Hexagon_v16i32_v16i32i64_Intrinsic<"HEXAGON_V6_vlut4">;
-
-def int_hexagon_V6_vlut4_128B :
-Hexagon_v32i32_v32i32i64_Intrinsic<"HEXAGON_V6_vlut4_128B">;
-
-def int_hexagon_V6_vmpyuhe_acc :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc">;
-
-def int_hexagon_V6_vmpyuhe_acc_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc_128B">;
+Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc_128B">;
def int_hexagon_V6_vrmpyub_rtt :
-Hexagon_v32i32_v16i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt">;
+Hexagon_v32i32_v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt">;
def int_hexagon_V6_vrmpyub_rtt_128B :
-Hexagon_v64i32_v32i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_128B">;
-
-def int_hexagon_V6_vmpsuhuhsat :
-Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat">;
-
-def int_hexagon_V6_vmpsuhuhsat_128B :
-Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat_128B">;
-
-def int_hexagon_V6_vasruhubsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat">;
-
-def int_hexagon_V6_vasruhubsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat_128B">;
-
-def int_hexagon_V6_vmpyuhe :
-Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe">;
-
-def int_hexagon_V6_vmpyuhe_128B :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_128B">;
+Hexagon_v64i32_v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_128B">;
def int_hexagon_V6_vrmpyub_rtt_acc :
-Hexagon_v32i32_v32i32v16i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc">;
+Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc">;
def int_hexagon_V6_vrmpyub_rtt_acc_128B :
-Hexagon_v64i32_v64i32v32i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;
-
-def int_hexagon_V6_vasruwuhsat :
-Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat">;
-
-def int_hexagon_V6_vasruwuhsat_128B :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat_128B">;
-
-def int_hexagon_V6_vmpabuu_acc :
-Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc">;
-
-def int_hexagon_V6_vmpabuu_acc_128B :
-Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc_128B">;
-
-def int_hexagon_V6_vprefixqw :
-Hexagon_v16i32_v512i1_Intrinsic<"HEXAGON_V6_vprefixqw">;
-
-def int_hexagon_V6_vprefixqw_128B :
-Hexagon_v32i32_v1024i1_Intrinsic<"HEXAGON_V6_vprefixqw_128B">;
-
-def int_hexagon_V6_vprefixqh :
-Hexagon_v16i32_v512i1_Intrinsic<"HEXAGON_V6_vprefixqh">;
-
-def int_hexagon_V6_vprefixqh_128B :
-Hexagon_v32i32_v1024i1_Intrinsic<"HEXAGON_V6_vprefixqh_128B">;
-
-def int_hexagon_V6_vprefixqb :
-Hexagon_v16i32_v512i1_Intrinsic<"HEXAGON_V6_vprefixqb">;
+Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;
-def int_hexagon_V6_vprefixqb_128B :
-Hexagon_v32i32_v1024i1_Intrinsic<"HEXAGON_V6_vprefixqb_128B">;
+// HVX Vector predicate casts.
+// These intrinsics do not emit (nor do they correspond to) any instructions,
+// they are no-ops.
-def int_hexagon_V6_vabsb :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb">;
+def int_hexagon_V6_pred_typecast :
+Hexagon_NonGCC_Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_hexagon_V6_vabsb_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_128B">;
+def int_hexagon_V6_pred_typecast_128B :
+Hexagon_NonGCC_Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_hexagon_V6_vavgbrnd :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgbrnd">;
-
-def int_hexagon_V6_vavgbrnd_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgbrnd_128B">;
-
-def int_hexagon_V6_vdd0 :
-Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vdd0">;
-
-def int_hexagon_V6_vdd0_128B :
-Hexagon_v64i32__Intrinsic<"HEXAGON_V6_vdd0_128B">;
-
-def int_hexagon_V6_vmpabuu :
-Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu">;
-
-def int_hexagon_V6_vmpabuu_128B :
-Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_128B">;
-
-def int_hexagon_V6_vabsb_sat :
-Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb_sat">;
-
-def int_hexagon_V6_vabsb_sat_128B :
-Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_sat_128B">;
-
-// V66 HVX Instructions.
-
-def int_hexagon_V6_vaddcarrysat :
-Hexagon_v16i32_v16i32v16i32v512i1_Intrinsic<"HEXAGON_V6_vaddcarrysat">;
-
-def int_hexagon_V6_vaddcarrysat_128B :
-Hexagon_v32i32_v32i32v32i32v1024i1_Intrinsic<"HEXAGON_V6_vaddcarrysat_128B">;
-
-def int_hexagon_V6_vasr_into :
-Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vasr_into">;
-
-def int_hexagon_V6_vasr_into_128B :
-Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vasr_into_128B">;
+// Masked vector stores
+//
-def int_hexagon_V6_vsatdw :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatdw">;
+class Hexagon_custom_vms_Intrinsic
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v64i1_ty,llvm_ptr_ty,llvm_v16i32_ty], [IntrWriteMem]>;
-def int_hexagon_V6_vsatdw_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatdw_128B">;
+class Hexagon_custom_vms_Intrinsic_128B
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v128i1_ty,llvm_ptr_ty,llvm_v32i32_ty], [IntrWriteMem]>;
-def int_hexagon_V6_vrotr :
-Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrotr">;
+def int_hexagon_V6_vmaskedstoreq: Hexagon_custom_vms_Intrinsic;
+def int_hexagon_V6_vmaskedstorenq: Hexagon_custom_vms_Intrinsic;
+def int_hexagon_V6_vmaskedstorentq: Hexagon_custom_vms_Intrinsic;
+def int_hexagon_V6_vmaskedstorentnq: Hexagon_custom_vms_Intrinsic;
-def int_hexagon_V6_vrotr_128B :
-Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrotr_128B">;
+def int_hexagon_V6_vmaskedstoreq_128B: Hexagon_custom_vms_Intrinsic_128B;
+def int_hexagon_V6_vmaskedstorenq_128B: Hexagon_custom_vms_Intrinsic_128B;
+def int_hexagon_V6_vmaskedstorentq_128B: Hexagon_custom_vms_Intrinsic_128B;
+def int_hexagon_V6_vmaskedstorentnq_128B: Hexagon_custom_vms_Intrinsic_128B;
+include "llvm/IR/IntrinsicsHexagonDep.td"
diff --git a/llvm/include/llvm/IR/IntrinsicsHexagonDep.td b/llvm/include/llvm/IR/IntrinsicsHexagonDep.td
new file mode 100644
index 000000000000..198b6a7ab0d1
--- /dev/null
+++ b/llvm/include/llvm/IR/IntrinsicsHexagonDep.td
@@ -0,0 +1,6144 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Automatically generated file, do not edit!
+//===----------------------------------------------------------------------===//
+
+// tag : C2_cmpeq
+class Hexagon_i32_i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : C2_cmpeqp
+class Hexagon_i32_i64i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : C2_not
+class Hexagon_i32_i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag : C4_and_and
+class Hexagon_i32_i32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : C2_vmux
+class Hexagon_i64_i32i64i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : C2_mask
+class Hexagon_i64_i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag : A4_vcmpbeqi
+class Hexagon_i32_i64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : A4_boundscheck
+class Hexagon_i32_i32i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : M2_mpyd_acc_hh_s0
+class Hexagon_i64_i64i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : M2_mpyd_hh_s0
+class Hexagon_i64_i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : M2_vmpy2es_s0
+class Hexagon_i64_i64i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : M2_vmac2es_s0
+class Hexagon_i64_i64i64i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : M2_vrcmpys_s1
+class Hexagon_i64_i64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : M2_vrcmpys_acc_s1
+class Hexagon_i64_i64i64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : S4_vrcrotate_acc
+class Hexagon_i64_i64i64i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : A2_addsp
+class Hexagon_i64_i32i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : A2_vconj
+class Hexagon_i64_i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty],
+ intr_properties>;
+
+// tag : A2_sat
+class Hexagon_i32_i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty],
+ intr_properties>;
+
+// tag : F2_sfadd
+class Hexagon_float_floatfloat_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty,llvm_float_ty],
+ intr_properties>;
+
+// tag : F2_sffma
+class Hexagon_float_floatfloatfloat_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty],
+ intr_properties>;
+
+// tag : F2_sffma_sc
+class Hexagon_float_floatfloatfloati32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : F2_sfcmpeq
+class Hexagon_i32_floatfloat_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty,llvm_float_ty],
+ intr_properties>;
+
+// tag : F2_sfclass
+class Hexagon_i32_floati32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : F2_sfimm_p
+class Hexagon_float_i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag : F2_sffixupr
+class Hexagon_float_float_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty],
+ intr_properties>;
+
+// tag : F2_dfadd
+class Hexagon_double_doubledouble_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty,llvm_double_ty],
+ intr_properties>;
+
+// tag : F2_dfmpylh
+class Hexagon_double_doubledoubledouble_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty,llvm_double_ty,llvm_double_ty],
+ intr_properties>;
+
+// tag : F2_dfcmpeq
+class Hexagon_i32_doubledouble_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty,llvm_double_ty],
+ intr_properties>;
+
+// tag : F2_dfclass
+class Hexagon_i32_doublei32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : F2_dfimm_p
+class Hexagon_double_i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag : F2_conv_sf2df
+class Hexagon_double_float_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_float_ty],
+ intr_properties>;
+
+// tag : F2_conv_df2sf
+class Hexagon_float_double_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_double_ty],
+ intr_properties>;
+
+// tag : F2_conv_ud2sf
+class Hexagon_float_i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i64_ty],
+ intr_properties>;
+
+// tag : F2_conv_ud2df
+class Hexagon_double_i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i64_ty],
+ intr_properties>;
+
+// tag : F2_conv_sf2uw
+class Hexagon_i32_float_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty],
+ intr_properties>;
+
+// tag : F2_conv_sf2ud
+class Hexagon_i64_float_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_float_ty],
+ intr_properties>;
+
+// tag : F2_conv_df2uw
+class Hexagon_i32_double_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty],
+ intr_properties>;
+
+// tag : F2_conv_df2ud
+class Hexagon_i64_double_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_double_ty],
+ intr_properties>;
+
+// tag : S2_insert
+class Hexagon_i32_i32i32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : S2_insert_rp
+class Hexagon_i32_i32i32i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : Y2_dcfetch
+class Hexagon__ptr_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_ptr_ty],
+ intr_properties>;
+
+// tag : Y4_l2fetch
+class Hexagon__ptri32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_ptr_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : Y5_l2fetch
+class Hexagon__ptri64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_ptr_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32__Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32__Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [],
+ intr_properties>;
+
+// tag :
+class Hexagon_i32_v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_i64_v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_i64_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32v64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v4i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v4i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v4i32_v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v4i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v4i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v4i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v8i32v64i32v64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v8i32_ty,llvm_v64i32_ty,llvm_v64i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v64i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v64i32v64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32v64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32v4i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v4i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32v64i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v32i32_v32i32v32i32i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag :
+class Hexagon_v64i32_v64i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vS32b_qpred_ai
+class Hexagon_custom__v64i1ptrv16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v64i1_ty,llvm_ptr_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vS32b_qpred_ai
+class Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v128i1_ty,llvm_ptr_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_valignb
+class Hexagon_v16i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vror
+class Hexagon_v16i32_v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vunpackub
+class Hexagon_v32i32_v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vunpackob
+class Hexagon_v32i32_v32i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vpackeb
+class Hexagon_v16i32_v16i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vdmpyhvsat_acc
+class Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vdmpyhisat
+class Hexagon_v16i32_v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vdmpyhisat_acc
+class Hexagon_v16i32_v16i32v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vdmpyhisat_acc
+class Hexagon_v32i32_v32i32v64i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vrmpyubi
+class Hexagon_v32i32_v32i32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vrmpyubi
+class Hexagon_v64i32_v64i32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vrmpyubi_acc
+class Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vrmpyubi_acc
+class Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vasr_into
+class Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vaddcarrysat
+class Hexagon_custom_v16i32_v16i32v16i32v64i1_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v64i1_ty],
+ intr_properties>;
+
+// tag : V6_vaddcarrysat
+class Hexagon_custom_v32i32_v32i32v32i32v128i1_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v128i1_ty],
+ intr_properties>;
+
+// tag : V6_vaddcarry
+class Hexagon_custom_v16i32v64i1_v16i32v16i32v64i1_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty,llvm_v64i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v64i1_ty],
+ intr_properties>;
+
+// tag : V6_vaddcarry
+class Hexagon_custom_v32i32v128i1_v32i32v32i32v128i1_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty,llvm_v128i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v128i1_ty],
+ intr_properties>;
+
+// tag : V6_vaddubh
+class Hexagon_v32i32_v16i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vd0
+class Hexagon_v16i32__Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [],
+ intr_properties>;
+
+// tag : V6_vaddbq
+class Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vaddbq
+class Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vabsb
+class Hexagon_v16i32_v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vmpyub
+class Hexagon_v32i32_v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vmpyub
+class Hexagon_v64i32_v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vmpyub_acc
+class Hexagon_v32i32_v32i32v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vmpyub_acc
+class Hexagon_v64i32_v64i32v32i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandqrt
+class Hexagon_custom_v16i32_v64i1i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty], [llvm_v64i1_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandqrt
+class Hexagon_custom_v32i32_v128i1i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty], [llvm_v128i1_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandqrt_acc
+class Hexagon_custom_v16i32_v16i32v64i1i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v64i1_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandqrt_acc
+class Hexagon_custom_v32i32_v32i32v128i1i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v128i1_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandvrt
+class Hexagon_custom_v64i1_v16i32i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandvrt
+class Hexagon_custom_v128i1_v32i32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v128i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandvrt_acc
+class Hexagon_custom_v64i1_v64i1v16i32i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandvrt_acc
+class Hexagon_custom_v128i1_v128i1v32i32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vandvqv
+class Hexagon_custom_v16i32_v64i1v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vandvqv
+class Hexagon_custom_v32i32_v128i1v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vgtw
+class Hexagon_custom_v64i1_v16i32v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vgtw
+class Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v128i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vgtw_and
+class Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vgtw_and
+class Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_pred_scalar2
+class Hexagon_custom_v64i1_i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i1_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_pred_scalar2
+class Hexagon_custom_v128i1_i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v128i1_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_shuffeqw
+class Hexagon_custom_v64i1_v64i1v64i1_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v64i1_ty],
+ intr_properties>;
+
+// tag : V6_shuffeqw
+class Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v128i1_ty],
+ intr_properties>;
+
+// tag : V6_pred_not
+class Hexagon_custom_v64i1_v64i1_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i1_ty], [llvm_v64i1_ty],
+ intr_properties>;
+
+// tag : V6_pred_not
+class Hexagon_custom_v128i1_v128i1_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v128i1_ty], [llvm_v128i1_ty],
+ intr_properties>;
+
+// tag : V6_vswap
+class Hexagon_custom_v32i32_v64i1v16i32v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vswap
+class Hexagon_custom_v64i32_v128i1v32i32v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v64i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vshuffvdd
+class Hexagon_v32i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_extractw
+class Hexagon_i32_v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_lvsplatw
+class Hexagon_v16i32_i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vlutvvb_oracc
+class Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vlutvwh_oracc
+class Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ intr_properties>;
+
+// tag : V6_vmpahhsat
+class Hexagon_v16i32_v16i32v16i32i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : V6_vlut4
+class Hexagon_v16i32_v16i32i64_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
+ intr_properties>;
+
+// tag : V6_hi
+class Hexagon_v16i32_v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vgathermw
+class Hexagon__ptri32i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vgathermw
+class Hexagon__ptri32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vgathermhw
+class Hexagon__ptri32i32v64i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty],
+ intr_properties>;
+
+// tag : V6_vgathermwq
+class Hexagon_custom__ptrv64i1i32i32v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_ptr_ty,llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vgathermwq
+class Hexagon_custom__ptrv128i1i32i32v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_ptr_ty,llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vgathermhwq
+class Hexagon_custom__ptrv64i1i32i32v32i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_ptr_ty,llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vgathermhwq
+class Hexagon_custom__ptrv128i1i32i32v64i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_ptr_ty,llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermw
+class Hexagon__i32i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermw
+class Hexagon__i32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermwq
+class Hexagon_custom__v64i1i32i32v16i32v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermwq
+class Hexagon_custom__v128i1i32i32v32i32v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermhw
+class Hexagon__i32i32v32i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermhw
+class Hexagon__i32i32v64i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermhwq
+class Hexagon_custom__v64i1i32i32v32i32v16i32_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vscattermhwq
+class Hexagon_custom__v128i1i32i32v64i32v32i32_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [], [llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
+// tag : V6_vprefixqb
+class Hexagon_custom_v16i32_v64i1_Intrinsic<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty], [llvm_v64i1_ty],
+ intr_properties>;
+
+// tag : V6_vprefixqb
+class Hexagon_custom_v32i32_v128i1_Intrinsic_128B<
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty], [llvm_v128i1_ty],
+ intr_properties>;
+
+// V5 Scalar Instructions.
+
+def int_hexagon_C2_cmpeq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeq">;
+
+def int_hexagon_C2_cmpgt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgt">;
+
+def int_hexagon_C2_cmpgtu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtu">;
+
+def int_hexagon_C2_cmpeqp :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpeqp">;
+
+def int_hexagon_C2_cmpgtp :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtp">;
+
+def int_hexagon_C2_cmpgtup :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtup">;
+
+def int_hexagon_A4_rcmpeqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_rcmpneqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_rcmpeq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeq">;
+
+def int_hexagon_A4_rcmpneq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneq">;
+
+def int_hexagon_C2_bitsset :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsset">;
+
+def int_hexagon_C2_bitsclr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclr">;
+
+def int_hexagon_C4_nbitsset :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsset">;
+
+def int_hexagon_C4_nbitsclr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclr">;
+
+def int_hexagon_C2_cmpeqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C2_cmpgti :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C2_cmpgtui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C2_cmpgei :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgei", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C2_cmpgeui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgeui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C2_cmplt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmplt">;
+
+def int_hexagon_C2_cmpltu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpltu">;
+
+def int_hexagon_C2_bitsclri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C4_nbitsclri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C4_cmpneqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C4_cmpltei :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpltei", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C4_cmplteui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C4_cmpneq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneq">;
+
+def int_hexagon_C4_cmplte :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplte">;
+
+def int_hexagon_C4_cmplteu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteu">;
+
+def int_hexagon_C2_and :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_and">;
+
+def int_hexagon_C2_or :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_or">;
+
+def int_hexagon_C2_xor :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_xor">;
+
+def int_hexagon_C2_andn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_andn">;
+
+def int_hexagon_C2_not :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_not">;
+
+def int_hexagon_C2_orn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_orn">;
+
+def int_hexagon_C4_and_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_and">;
+
+def int_hexagon_C4_and_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_or">;
+
+def int_hexagon_C4_or_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_and">;
+
+def int_hexagon_C4_or_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_or">;
+
+def int_hexagon_C4_and_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_andn">;
+
+def int_hexagon_C4_and_orn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_orn">;
+
+def int_hexagon_C4_or_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_andn">;
+
+def int_hexagon_C4_or_orn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_orn">;
+
+def int_hexagon_C2_pxfer_map :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_pxfer_map">;
+
+def int_hexagon_C2_any8 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_any8">;
+
+def int_hexagon_C2_all8 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_all8">;
+
+def int_hexagon_C2_vitpack :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_vitpack">;
+
+def int_hexagon_C2_mux :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_mux">;
+
+def int_hexagon_C2_muxii :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxii", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_C2_muxir :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxir", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_C2_muxri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_C2_vmux :
+Hexagon_i64_i32i64i64_Intrinsic<"HEXAGON_C2_vmux">;
+
+def int_hexagon_C2_mask :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_C2_mask">;
+
+def int_hexagon_A2_vcmpbeq :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbeq">;
+
+def int_hexagon_A4_vcmpbeqi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_vcmpbeq_any :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;
+
+def int_hexagon_A2_vcmpbgtu :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbgtu">;
+
+def int_hexagon_A4_vcmpbgtui :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_vcmpbgt :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbgt">;
+
+def int_hexagon_A4_vcmpbgti :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_cmpbeq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeq">;
+
+def int_hexagon_A4_cmpbeqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_cmpbgtu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtu">;
+
+def int_hexagon_A4_cmpbgtui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_cmpbgt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgt">;
+
+def int_hexagon_A4_cmpbgti :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_vcmpheq :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpheq">;
+
+def int_hexagon_A2_vcmphgt :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgt">;
+
+def int_hexagon_A2_vcmphgtu :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgtu">;
+
+def int_hexagon_A4_vcmpheqi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpheqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_vcmphgti :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_vcmphgtui :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_cmpheq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheq">;
+
+def int_hexagon_A4_cmphgt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgt">;
+
+def int_hexagon_A4_cmphgtu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtu">;
+
+def int_hexagon_A4_cmpheqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_cmphgti :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_cmphgtui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_vcmpweq :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpweq">;
+
+def int_hexagon_A2_vcmpwgt :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgt">;
+
+def int_hexagon_A2_vcmpwgtu :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgtu">;
+
+def int_hexagon_A4_vcmpweqi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpweqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_vcmpwgti :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_vcmpwgtui :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_boundscheck :
+Hexagon_i32_i32i64_Intrinsic<"HEXAGON_A4_boundscheck">;
+
+def int_hexagon_A4_tlbmatch :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_tlbmatch">;
+
+def int_hexagon_C2_tfrpr :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrpr">;
+
+def int_hexagon_C2_tfrrp :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrrp">;
+
+def int_hexagon_C4_fastcorner9 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9">;
+
+def int_hexagon_C4_fastcorner9_not :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9_not">;
+
+def int_hexagon_M2_mpy_acc_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;
+
+def int_hexagon_M2_mpy_acc_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;
+
+def int_hexagon_M2_mpy_acc_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;
+
+def int_hexagon_M2_mpy_acc_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;
+
+def int_hexagon_M2_mpy_acc_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;
+
+def int_hexagon_M2_mpy_acc_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;
+
+def int_hexagon_M2_mpy_acc_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;
+
+def int_hexagon_M2_mpy_acc_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;
+
+def int_hexagon_M2_mpy_nac_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;
+
+def int_hexagon_M2_mpy_nac_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;
+
+def int_hexagon_M2_mpy_nac_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;
+
+def int_hexagon_M2_mpy_nac_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;
+
+def int_hexagon_M2_mpy_nac_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;
+
+def int_hexagon_M2_mpy_nac_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;
+
+def int_hexagon_M2_mpy_nac_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;
+
+def int_hexagon_M2_mpy_nac_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;
+
+def int_hexagon_M2_mpy_acc_sat_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;
+
+def int_hexagon_M2_mpy_acc_sat_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;
+
+def int_hexagon_M2_mpy_acc_sat_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;
+
+def int_hexagon_M2_mpy_acc_sat_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;
+
+def int_hexagon_M2_mpy_acc_sat_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;
+
+def int_hexagon_M2_mpy_acc_sat_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;
+
+def int_hexagon_M2_mpy_acc_sat_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;
+
+def int_hexagon_M2_mpy_acc_sat_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;
+
+def int_hexagon_M2_mpy_nac_sat_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;
+
+def int_hexagon_M2_mpy_nac_sat_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;
+
+def int_hexagon_M2_mpy_nac_sat_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;
+
+def int_hexagon_M2_mpy_nac_sat_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;
+
+def int_hexagon_M2_mpy_nac_sat_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;
+
+def int_hexagon_M2_mpy_nac_sat_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;
+
+def int_hexagon_M2_mpy_nac_sat_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;
+
+def int_hexagon_M2_mpy_nac_sat_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;
+
+def int_hexagon_M2_mpy_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;
+
+def int_hexagon_M2_mpy_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;
+
+def int_hexagon_M2_mpy_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;
+
+def int_hexagon_M2_mpy_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;
+
+def int_hexagon_M2_mpy_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;
+
+def int_hexagon_M2_mpy_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;
+
+def int_hexagon_M2_mpy_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;
+
+def int_hexagon_M2_mpy_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;
+
+def int_hexagon_M2_mpy_sat_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;
+
+def int_hexagon_M2_mpy_sat_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;
+
+def int_hexagon_M2_mpy_sat_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;
+
+def int_hexagon_M2_mpy_sat_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;
+
+def int_hexagon_M2_mpy_sat_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;
+
+def int_hexagon_M2_mpy_sat_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;
+
+def int_hexagon_M2_mpy_sat_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;
+
+def int_hexagon_M2_mpy_sat_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;
+
+def int_hexagon_M2_mpy_rnd_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;
+
+def int_hexagon_M2_mpy_rnd_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;
+
+def int_hexagon_M2_mpy_rnd_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;
+
+def int_hexagon_M2_mpy_rnd_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;
+
+def int_hexagon_M2_mpy_rnd_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;
+
+def int_hexagon_M2_mpy_rnd_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;
+
+def int_hexagon_M2_mpy_rnd_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;
+
+def int_hexagon_M2_mpy_rnd_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;
+
+def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;
+
+def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;
+
+def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;
+
+def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;
+
+def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;
+
+def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;
+
+def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;
+
+def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;
+
+def int_hexagon_M2_mpyd_acc_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;
+
+def int_hexagon_M2_mpyd_acc_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;
+
+def int_hexagon_M2_mpyd_acc_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;
+
+def int_hexagon_M2_mpyd_acc_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;
+
+def int_hexagon_M2_mpyd_acc_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;
+
+def int_hexagon_M2_mpyd_acc_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;
+
+def int_hexagon_M2_mpyd_acc_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;
+
+def int_hexagon_M2_mpyd_acc_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;
+
+def int_hexagon_M2_mpyd_nac_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;
+
+def int_hexagon_M2_mpyd_nac_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;
+
+def int_hexagon_M2_mpyd_nac_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;
+
+def int_hexagon_M2_mpyd_nac_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;
+
+def int_hexagon_M2_mpyd_nac_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;
+
+def int_hexagon_M2_mpyd_nac_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;
+
+def int_hexagon_M2_mpyd_nac_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;
+
+def int_hexagon_M2_mpyd_nac_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;
+
+def int_hexagon_M2_mpyd_hh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;
+
+def int_hexagon_M2_mpyd_hh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;
+
+def int_hexagon_M2_mpyd_hl_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;
+
+def int_hexagon_M2_mpyd_hl_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;
+
+def int_hexagon_M2_mpyd_lh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;
+
+def int_hexagon_M2_mpyd_lh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;
+
+def int_hexagon_M2_mpyd_ll_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;
+
+def int_hexagon_M2_mpyd_ll_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;
+
+def int_hexagon_M2_mpyd_rnd_hh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;
+
+def int_hexagon_M2_mpyd_rnd_hh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;
+
+def int_hexagon_M2_mpyd_rnd_hl_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;
+
+def int_hexagon_M2_mpyd_rnd_hl_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;
+
+def int_hexagon_M2_mpyd_rnd_lh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;
+
+def int_hexagon_M2_mpyd_rnd_lh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;
+
+def int_hexagon_M2_mpyd_rnd_ll_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;
+
+def int_hexagon_M2_mpyd_rnd_ll_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
+
+def int_hexagon_M2_mpyu_acc_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;
+
+def int_hexagon_M2_mpyu_acc_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;
+
+def int_hexagon_M2_mpyu_acc_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;
+
+def int_hexagon_M2_mpyu_acc_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;
+
+def int_hexagon_M2_mpyu_acc_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;
+
+def int_hexagon_M2_mpyu_acc_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;
+
+def int_hexagon_M2_mpyu_acc_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;
+
+def int_hexagon_M2_mpyu_acc_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;
+
+def int_hexagon_M2_mpyu_nac_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;
+
+def int_hexagon_M2_mpyu_nac_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;
+
+def int_hexagon_M2_mpyu_nac_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;
+
+def int_hexagon_M2_mpyu_nac_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;
+
+def int_hexagon_M2_mpyu_nac_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;
+
+def int_hexagon_M2_mpyu_nac_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;
+
+def int_hexagon_M2_mpyu_nac_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;
+
+def int_hexagon_M2_mpyu_nac_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;
+
+def int_hexagon_M2_mpyu_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;
+
+def int_hexagon_M2_mpyu_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;
+
+def int_hexagon_M2_mpyu_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;
+
+def int_hexagon_M2_mpyu_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;
+
+def int_hexagon_M2_mpyu_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;
+
+def int_hexagon_M2_mpyu_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;
+
+def int_hexagon_M2_mpyu_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;
+
+def int_hexagon_M2_mpyu_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;
+
+def int_hexagon_M2_mpyud_acc_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;
+
+def int_hexagon_M2_mpyud_acc_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;
+
+def int_hexagon_M2_mpyud_acc_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;
+
+def int_hexagon_M2_mpyud_acc_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;
+
+def int_hexagon_M2_mpyud_acc_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;
+
+def int_hexagon_M2_mpyud_acc_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;
+
+def int_hexagon_M2_mpyud_acc_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;
+
+def int_hexagon_M2_mpyud_acc_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;
+
+def int_hexagon_M2_mpyud_nac_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;
+
+def int_hexagon_M2_mpyud_nac_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;
+
+def int_hexagon_M2_mpyud_nac_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;
+
+def int_hexagon_M2_mpyud_nac_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;
+
+def int_hexagon_M2_mpyud_nac_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;
+
+def int_hexagon_M2_mpyud_nac_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;
+
+def int_hexagon_M2_mpyud_nac_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;
+
+def int_hexagon_M2_mpyud_nac_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
+
+def int_hexagon_M2_mpyud_hh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;
+
+def int_hexagon_M2_mpyud_hh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;
+
+def int_hexagon_M2_mpyud_hl_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;
+
+def int_hexagon_M2_mpyud_hl_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;
+
+def int_hexagon_M2_mpyud_lh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;
+
+def int_hexagon_M2_mpyud_lh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;
+
+def int_hexagon_M2_mpyud_ll_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;
+
+def int_hexagon_M2_mpyud_ll_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;
+
+def int_hexagon_M2_mpysmi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysmi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_M2_macsip :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsip", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_M2_macsin :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsin", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_M2_dpmpyss_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
+
+def int_hexagon_M2_dpmpyss_acc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;
+
+def int_hexagon_M2_dpmpyss_nac_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;
+
+def int_hexagon_M2_dpmpyuu_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
+
+def int_hexagon_M2_dpmpyuu_acc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
+
+def int_hexagon_M2_dpmpyuu_nac_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
+
+def int_hexagon_M2_mpy_up :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up">;
+
+def int_hexagon_M2_mpy_up_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1">;
+
+def int_hexagon_M2_mpy_up_s1_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;
+
+def int_hexagon_M2_mpyu_up :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_up">;
+
+def int_hexagon_M2_mpysu_up :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysu_up">;
+
+def int_hexagon_M2_dpmpyss_rnd_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
+
+def int_hexagon_M4_mac_up_s1_sat :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;
+
+def int_hexagon_M4_nac_up_s1_sat :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;
+
+def int_hexagon_M2_mpyi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyi">;
+
+def int_hexagon_M2_mpyui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyui">;
+
+def int_hexagon_M2_maci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_maci">;
+
+def int_hexagon_M2_acci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_acci">;
+
+def int_hexagon_M2_accii :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_accii", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_M2_nacci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_nacci">;
+
+def int_hexagon_M2_naccii :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_naccii", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_M2_subacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_subacc">;
+
+def int_hexagon_M4_mpyrr_addr :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addr">;
+
+def int_hexagon_M4_mpyri_addr_u2 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr_u2", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_M4_mpyri_addr :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_M4_mpyri_addi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addi", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_M4_mpyrr_addi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addi", [IntrNoMem, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_M2_vmpy2s_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;
+
+def int_hexagon_M2_vmpy2s_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;
+
+def int_hexagon_M2_vmac2s_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s0">;
+
+def int_hexagon_M2_vmac2s_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s1">;
+
+def int_hexagon_M2_vmpy2su_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;
+
+def int_hexagon_M2_vmpy2su_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;
+
+def int_hexagon_M2_vmac2su_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s0">;
+
+def int_hexagon_M2_vmac2su_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s1">;
+
+def int_hexagon_M2_vmpy2s_s0pack :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;
+
+def int_hexagon_M2_vmpy2s_s1pack :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;
+
+def int_hexagon_M2_vmac2 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2">;
+
+def int_hexagon_M2_vmpy2es_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;
+
+def int_hexagon_M2_vmpy2es_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;
+
+def int_hexagon_M2_vmac2es_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s0">;
+
+def int_hexagon_M2_vmac2es_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s1">;
+
+def int_hexagon_M2_vmac2es :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es">;
+
+def int_hexagon_M2_vrmac_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrmac_s0">;
+
+def int_hexagon_M2_vrmpy_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrmpy_s0">;
+
+def int_hexagon_M2_vdmpyrs_s0 :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;
+
+def int_hexagon_M2_vdmpyrs_s1 :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;
+
+def int_hexagon_M5_vrmpybuu :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybuu">;
+
+def int_hexagon_M5_vrmacbuu :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbuu">;
+
+def int_hexagon_M5_vrmpybsu :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybsu">;
+
+def int_hexagon_M5_vrmacbsu :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbsu">;
+
+def int_hexagon_M5_vmpybuu :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybuu">;
+
+def int_hexagon_M5_vmpybsu :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybsu">;
+
+def int_hexagon_M5_vmacbuu :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbuu">;
+
+def int_hexagon_M5_vmacbsu :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbsu">;
+
+def int_hexagon_M5_vdmpybsu :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vdmpybsu">;
+
+def int_hexagon_M5_vdmacbsu :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vdmacbsu">;
+
+def int_hexagon_M2_vdmacs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s0">;
+
+def int_hexagon_M2_vdmacs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s1">;
+
+def int_hexagon_M2_vdmpys_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s0">;
+
+def int_hexagon_M2_vdmpys_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s1">;
+
+def int_hexagon_M2_cmpyrs_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;
+
+def int_hexagon_M2_cmpyrs_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;
+
+def int_hexagon_M2_cmpyrsc_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;
+
+def int_hexagon_M2_cmpyrsc_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;
+
+def int_hexagon_M2_cmacs_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s0">;
+
+def int_hexagon_M2_cmacs_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s1">;
+
+def int_hexagon_M2_cmacsc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s0">;
+
+def int_hexagon_M2_cmacsc_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s1">;
+
+def int_hexagon_M2_cmpys_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s0">;
+
+def int_hexagon_M2_cmpys_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s1">;
+
+def int_hexagon_M2_cmpysc_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s0">;
+
+def int_hexagon_M2_cmpysc_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s1">;
+
+def int_hexagon_M2_cnacs_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s0">;
+
+def int_hexagon_M2_cnacs_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s1">;
+
+def int_hexagon_M2_cnacsc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s0">;
+
+def int_hexagon_M2_cnacsc_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s1">;
+
+def int_hexagon_M2_vrcmpys_s1 :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;
+
+def int_hexagon_M2_vrcmpys_acc_s1 :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;
+
+def int_hexagon_M2_vrcmpys_s1rp :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;
+
+def int_hexagon_M2_mmacls_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s0">;
+
+def int_hexagon_M2_mmacls_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s1">;
+
+def int_hexagon_M2_mmachs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s0">;
+
+def int_hexagon_M2_mmachs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s1">;
+
+def int_hexagon_M2_mmpyl_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s0">;
+
+def int_hexagon_M2_mmpyl_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s1">;
+
+def int_hexagon_M2_mmpyh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s0">;
+
+def int_hexagon_M2_mmpyh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s1">;
+
+def int_hexagon_M2_mmacls_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs0">;
+
+def int_hexagon_M2_mmacls_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs1">;
+
+def int_hexagon_M2_mmachs_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs0">;
+
+def int_hexagon_M2_mmachs_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs1">;
+
+def int_hexagon_M2_mmpyl_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;
+
+def int_hexagon_M2_mmpyl_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;
+
+def int_hexagon_M2_mmpyh_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;
+
+def int_hexagon_M2_mmpyh_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;
+
+def int_hexagon_M4_vrmpyeh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;
+
+def int_hexagon_M4_vrmpyeh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;
+
+def int_hexagon_M4_vrmpyeh_acc_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;
+
+def int_hexagon_M4_vrmpyeh_acc_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;
+
+def int_hexagon_M4_vrmpyoh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;
+
+def int_hexagon_M4_vrmpyoh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;
+
+def int_hexagon_M4_vrmpyoh_acc_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;
+
+def int_hexagon_M4_vrmpyoh_acc_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;
+
+def int_hexagon_M2_hmmpyl_rs1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;
+
+def int_hexagon_M2_hmmpyh_rs1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
+
+def int_hexagon_M2_hmmpyl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;
+
+def int_hexagon_M2_hmmpyh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;
+
+def int_hexagon_M2_mmaculs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s0">;
+
+def int_hexagon_M2_mmaculs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s1">;
+
+def int_hexagon_M2_mmacuhs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;
+
+def int_hexagon_M2_mmacuhs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;
+
+def int_hexagon_M2_mmpyul_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s0">;
+
+def int_hexagon_M2_mmpyul_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s1">;
+
+def int_hexagon_M2_mmpyuh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;
+
+def int_hexagon_M2_mmpyuh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;
+
+def int_hexagon_M2_mmaculs_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;
+
+def int_hexagon_M2_mmaculs_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;
+
+def int_hexagon_M2_mmacuhs_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;
+
+def int_hexagon_M2_mmacuhs_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;
+
+def int_hexagon_M2_mmpyul_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;
+
+def int_hexagon_M2_mmpyul_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;
+
+def int_hexagon_M2_mmpyuh_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;
+
+def int_hexagon_M2_mmpyuh_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;
+
+def int_hexagon_M2_vrcmaci_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;
+
+def int_hexagon_M2_vrcmacr_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;
+
+def int_hexagon_M2_vrcmaci_s0c :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;
+
+def int_hexagon_M2_vrcmacr_s0c :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;
+
+def int_hexagon_M2_cmaci_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmaci_s0">;
+
+def int_hexagon_M2_cmacr_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacr_s0">;
+
+def int_hexagon_M2_vrcmpyi_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;
+
+def int_hexagon_M2_vrcmpyr_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;
+
+def int_hexagon_M2_vrcmpyi_s0c :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;
+
+def int_hexagon_M2_vrcmpyr_s0c :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;
+
+def int_hexagon_M2_cmpyi_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyi_s0">;
+
+def int_hexagon_M2_cmpyr_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyr_s0">;
+
+def int_hexagon_M4_cmpyi_wh :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_wh">;
+
+def int_hexagon_M4_cmpyr_wh :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_wh">;
+
+def int_hexagon_M4_cmpyi_whc :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_whc">;
+
+def int_hexagon_M4_cmpyr_whc :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_whc">;
+
+def int_hexagon_M2_vcmpy_s0_sat_i :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;
+
+def int_hexagon_M2_vcmpy_s0_sat_r :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;
+
+def int_hexagon_M2_vcmpy_s1_sat_i :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;
+
+def int_hexagon_M2_vcmpy_s1_sat_r :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;
+
+def int_hexagon_M2_vcmac_s0_sat_i :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;
+
+def int_hexagon_M2_vcmac_s0_sat_r :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;
+
+def int_hexagon_S2_vcrotate :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcrotate">;
+
+def int_hexagon_S4_vrcrotate_acc :
+Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_S4_vrcrotate :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_vcnegh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcnegh">;
+
+def int_hexagon_S2_vrcnegh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vrcnegh">;
+
+def int_hexagon_M4_pmpyw :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_pmpyw">;
+
+def int_hexagon_M4_vpmpyh :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_vpmpyh">;
+
+def int_hexagon_M4_pmpyw_acc :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_pmpyw_acc">;
+
+def int_hexagon_M4_vpmpyh_acc :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;
+
+def int_hexagon_A2_add :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_add">;
+
+def int_hexagon_A2_sub :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_sub">;
+
+def int_hexagon_A2_addsat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addsat">;
+
+def int_hexagon_A2_subsat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subsat">;
+
+def int_hexagon_A2_addi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_addh_l16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_ll">;
+
+def int_hexagon_A2_addh_l16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_hl">;
+
+def int_hexagon_A2_addh_l16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
+
+def int_hexagon_A2_addh_l16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;
+
+def int_hexagon_A2_subh_l16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_ll">;
+
+def int_hexagon_A2_subh_l16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_hl">;
+
+def int_hexagon_A2_subh_l16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
+
+def int_hexagon_A2_subh_l16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;
+
+def int_hexagon_A2_addh_h16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_ll">;
+
+def int_hexagon_A2_addh_h16_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_lh">;
+
+def int_hexagon_A2_addh_h16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hl">;
+
+def int_hexagon_A2_addh_h16_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
+
+def int_hexagon_A2_addh_h16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;
+
+def int_hexagon_A2_addh_h16_sat_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;
+
+def int_hexagon_A2_addh_h16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;
+
+def int_hexagon_A2_addh_h16_sat_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;
+
+def int_hexagon_A2_subh_h16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_ll">;
+
+def int_hexagon_A2_subh_h16_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_lh">;
+
+def int_hexagon_A2_subh_h16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hl">;
+
+def int_hexagon_A2_subh_h16_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
+
+def int_hexagon_A2_subh_h16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;
+
+def int_hexagon_A2_subh_h16_sat_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;
+
+def int_hexagon_A2_subh_h16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;
+
+def int_hexagon_A2_subh_h16_sat_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;
+
+def int_hexagon_A2_aslh :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_aslh">;
+
+def int_hexagon_A2_asrh :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_asrh">;
+
+def int_hexagon_A2_addp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addp">;
+
+def int_hexagon_A2_addpsat :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addpsat">;
+
+def int_hexagon_A2_addsp :
+Hexagon_i64_i32i64_Intrinsic<"HEXAGON_A2_addsp">;
+
+def int_hexagon_A2_subp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_subp">;
+
+def int_hexagon_A2_neg :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_neg">;
+
+def int_hexagon_A2_negsat :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_negsat">;
+
+def int_hexagon_A2_abs :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abs">;
+
+def int_hexagon_A2_abssat :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abssat">;
+
+def int_hexagon_A2_vconj :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vconj">;
+
+def int_hexagon_A2_negp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_negp">;
+
+def int_hexagon_A2_absp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_absp">;
+
+def int_hexagon_A2_max :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_max">;
+
+def int_hexagon_A2_maxu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_maxu">;
+
+def int_hexagon_A2_min :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_min">;
+
+def int_hexagon_A2_minu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_minu">;
+
+def int_hexagon_A2_maxp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxp">;
+
+def int_hexagon_A2_maxup :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxup">;
+
+def int_hexagon_A2_minp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minp">;
+
+def int_hexagon_A2_minup :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minup">;
+
+def int_hexagon_A2_tfr :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfr">;
+
+def int_hexagon_A2_tfrsi :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrsi", [IntrNoMem, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_A2_tfrp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_tfrp">;
+
+def int_hexagon_A2_tfrpi :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_tfrpi", [IntrNoMem, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_A2_zxtb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxtb">;
+
+def int_hexagon_A2_sxtb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxtb">;
+
+def int_hexagon_A2_zxth :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxth">;
+
+def int_hexagon_A2_sxth :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxth">;
+
+def int_hexagon_A2_combinew :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combinew">;
+
+def int_hexagon_A4_combineri :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_combineir :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineir", [IntrNoMem, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_A2_combineii :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combineii", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_combine_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hh">;
+
+def int_hexagon_A2_combine_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hl">;
+
+def int_hexagon_A2_combine_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_lh">;
+
+def int_hexagon_A2_combine_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_ll">;
+
+def int_hexagon_A2_tfril :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfril", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_tfrih :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfrih", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_and :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_and">;
+
+def int_hexagon_A2_or :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_or">;
+
+def int_hexagon_A2_xor :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_xor">;
+
+def int_hexagon_A2_not :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_not">;
+
+def int_hexagon_M2_xor_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_xor_xacc">;
+
+def int_hexagon_M4_xor_xacc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_xor_xacc">;
+
+def int_hexagon_A4_andn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_andn">;
+
+def int_hexagon_A4_orn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_orn">;
+
+def int_hexagon_A4_andnp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_andnp">;
+
+def int_hexagon_A4_ornp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_ornp">;
+
+def int_hexagon_S4_addaddi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addaddi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_subaddi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subaddi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_M4_and_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_and">;
+
+def int_hexagon_M4_and_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_andn">;
+
+def int_hexagon_M4_and_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_or">;
+
+def int_hexagon_M4_and_xor :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_xor">;
+
+def int_hexagon_M4_or_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_and">;
+
+def int_hexagon_M4_or_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_andn">;
+
+def int_hexagon_M4_or_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_or">;
+
+def int_hexagon_M4_or_xor :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_xor">;
+
+def int_hexagon_S4_or_andix :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andix", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_or_andi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_or_ori :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_ori", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_M4_xor_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_and">;
+
+def int_hexagon_M4_xor_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_or">;
+
+def int_hexagon_M4_xor_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_andn">;
+
+def int_hexagon_A2_subri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subri", [IntrNoMem, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_A2_andir :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_andir", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_orir :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_orir", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A2_andp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_andp">;
+
+def int_hexagon_A2_orp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_orp">;
+
+def int_hexagon_A2_xorp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_xorp">;
+
+def int_hexagon_A2_notp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_notp">;
+
+def int_hexagon_A2_sxtw :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_sxtw">;
+
+def int_hexagon_A2_sat :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_sat">;
+
+def int_hexagon_A2_roundsat :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_roundsat">;
+
+def int_hexagon_A2_sath :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sath">;
+
+def int_hexagon_A2_satuh :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satuh">;
+
+def int_hexagon_A2_satub :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satub">;
+
+def int_hexagon_A2_satb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satb">;
+
+def int_hexagon_A2_vaddub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddub">;
+
+def int_hexagon_A2_vaddb_map :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddb_map">;
+
+def int_hexagon_A2_vaddubs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddubs">;
+
+def int_hexagon_A2_vaddh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddh">;
+
+def int_hexagon_A2_vaddhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddhs">;
+
+def int_hexagon_A2_vadduhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vadduhs">;
+
+def int_hexagon_A5_vaddhubs :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A5_vaddhubs">;
+
+def int_hexagon_A2_vaddw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddw">;
+
+def int_hexagon_A2_vaddws :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddws">;
+
+def int_hexagon_S4_vxaddsubw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubw">;
+
+def int_hexagon_S4_vxsubaddw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddw">;
+
+def int_hexagon_S4_vxaddsubh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubh">;
+
+def int_hexagon_S4_vxsubaddh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddh">;
+
+def int_hexagon_S4_vxaddsubhr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubhr">;
+
+def int_hexagon_S4_vxsubaddhr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddhr">;
+
+def int_hexagon_A2_svavgh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavgh">;
+
+def int_hexagon_A2_svavghs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavghs">;
+
+def int_hexagon_A2_svnavgh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svnavgh">;
+
+def int_hexagon_A2_svaddh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddh">;
+
+def int_hexagon_A2_svaddhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddhs">;
+
+def int_hexagon_A2_svadduhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svadduhs">;
+
+def int_hexagon_A2_svsubh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubh">;
+
+def int_hexagon_A2_svsubhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubhs">;
+
+def int_hexagon_A2_svsubuhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubuhs">;
+
+def int_hexagon_A2_vraddub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vraddub">;
+
+def int_hexagon_A2_vraddub_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vraddub_acc">;
+
+def int_hexagon_M2_vraddh :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vraddh">;
+
+def int_hexagon_M2_vradduh :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vradduh">;
+
+def int_hexagon_A2_vsubub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubub">;
+
+def int_hexagon_A2_vsubb_map :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubb_map">;
+
+def int_hexagon_A2_vsububs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsububs">;
+
+def int_hexagon_A2_vsubh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubh">;
+
+def int_hexagon_A2_vsubhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubhs">;
+
+def int_hexagon_A2_vsubuhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubuhs">;
+
+def int_hexagon_A2_vsubw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubw">;
+
+def int_hexagon_A2_vsubws :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubws">;
+
+def int_hexagon_A2_vabsh :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsh">;
+
+def int_hexagon_A2_vabshsat :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabshsat">;
+
+def int_hexagon_A2_vabsw :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsw">;
+
+def int_hexagon_A2_vabswsat :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabswsat">;
+
+def int_hexagon_M2_vabsdiffw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffw">;
+
+def int_hexagon_M2_vabsdiffh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffh">;
+
+def int_hexagon_A2_vrsadub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vrsadub">;
+
+def int_hexagon_A2_vrsadub_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vrsadub_acc">;
+
+def int_hexagon_A2_vavgub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgub">;
+
+def int_hexagon_A2_vavguh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguh">;
+
+def int_hexagon_A2_vavgh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgh">;
+
+def int_hexagon_A2_vnavgh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgh">;
+
+def int_hexagon_A2_vavgw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgw">;
+
+def int_hexagon_A2_vnavgw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgw">;
+
+def int_hexagon_A2_vavgwr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwr">;
+
+def int_hexagon_A2_vnavgwr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwr">;
+
+def int_hexagon_A2_vavgwcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwcr">;
+
+def int_hexagon_A2_vnavgwcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwcr">;
+
+def int_hexagon_A2_vavghcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghcr">;
+
+def int_hexagon_A2_vnavghcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghcr">;
+
+def int_hexagon_A2_vavguw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguw">;
+
+def int_hexagon_A2_vavguwr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguwr">;
+
+def int_hexagon_A2_vavgubr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgubr">;
+
+def int_hexagon_A2_vavguhr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguhr">;
+
+def int_hexagon_A2_vavghr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghr">;
+
+def int_hexagon_A2_vnavghr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghr">;
+
+def int_hexagon_A4_round_ri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_round_rr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr">;
+
+def int_hexagon_A4_round_ri_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri_sat", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_round_rr_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr_sat">;
+
+def int_hexagon_A4_cround_ri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_ri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_cround_rr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_rr">;
+
+def int_hexagon_A4_vrminh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminh">;
+
+def int_hexagon_A4_vrmaxh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxh">;
+
+def int_hexagon_A4_vrminuh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuh">;
+
+def int_hexagon_A4_vrmaxuh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuh">;
+
+def int_hexagon_A4_vrminw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminw">;
+
+def int_hexagon_A4_vrmaxw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxw">;
+
+def int_hexagon_A4_vrminuw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuw">;
+
+def int_hexagon_A4_vrmaxuw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuw">;
+
+def int_hexagon_A2_vminb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminb">;
+
+def int_hexagon_A2_vmaxb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxb">;
+
+def int_hexagon_A2_vminub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminub">;
+
+def int_hexagon_A2_vmaxub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxub">;
+
+def int_hexagon_A2_vminh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminh">;
+
+def int_hexagon_A2_vmaxh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxh">;
+
+def int_hexagon_A2_vminuh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuh">;
+
+def int_hexagon_A2_vmaxuh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuh">;
+
+def int_hexagon_A2_vminw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminw">;
+
+def int_hexagon_A2_vmaxw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxw">;
+
+def int_hexagon_A2_vminuw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuw">;
+
+def int_hexagon_A2_vmaxuw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuw">;
+
+def int_hexagon_A4_modwrapu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_modwrapu">;
+
+def int_hexagon_F2_sfadd :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfadd", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfsub :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfsub", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfmpy :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmpy", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sffma :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sffma_sc :
+Hexagon_float_floatfloatfloati32_Intrinsic<"HEXAGON_F2_sffma_sc", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sffms :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sffma_lib :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma_lib", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sffms_lib :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms_lib", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfcmpeq :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpeq", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfcmpgt :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpgt", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfcmpge :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpge", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfcmpuo :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpuo", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfmax :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmax", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfmin :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmin", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sfclass :
+Hexagon_i32_floati32_Intrinsic<"HEXAGON_F2_sfclass", [IntrNoMem, Throws, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_F2_sfimm_p :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_p", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_F2_sfimm_n :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_n", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_F2_sffixupn :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupn", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sffixupd :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupd", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_sffixupr :
+Hexagon_float_float_Intrinsic<"HEXAGON_F2_sffixupr", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfcmpeq :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpeq", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfcmpgt :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpgt", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfcmpge :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpge", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfcmpuo :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpuo", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfclass :
+Hexagon_i32_doublei32_Intrinsic<"HEXAGON_F2_dfclass", [IntrNoMem, Throws, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_F2_dfimm_p :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_p", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_F2_dfimm_n :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_n", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_F2_conv_sf2df :
+Hexagon_double_float_Intrinsic<"HEXAGON_F2_conv_sf2df">;
+
+def int_hexagon_F2_conv_df2sf :
+Hexagon_float_double_Intrinsic<"HEXAGON_F2_conv_df2sf">;
+
+def int_hexagon_F2_conv_uw2sf :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_uw2sf">;
+
+def int_hexagon_F2_conv_uw2df :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_uw2df">;
+
+def int_hexagon_F2_conv_w2sf :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_w2sf">;
+
+def int_hexagon_F2_conv_w2df :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_w2df">;
+
+def int_hexagon_F2_conv_ud2sf :
+Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_ud2sf">;
+
+def int_hexagon_F2_conv_ud2df :
+Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_ud2df">;
+
+def int_hexagon_F2_conv_d2sf :
+Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_d2sf">;
+
+def int_hexagon_F2_conv_d2df :
+Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_d2df">;
+
+def int_hexagon_F2_conv_sf2uw :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw">;
+
+def int_hexagon_F2_conv_sf2w :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w">;
+
+def int_hexagon_F2_conv_sf2ud :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud">;
+
+def int_hexagon_F2_conv_sf2d :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d">;
+
+def int_hexagon_F2_conv_df2uw :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw">;
+
+def int_hexagon_F2_conv_df2w :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w">;
+
+def int_hexagon_F2_conv_df2ud :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud">;
+
+def int_hexagon_F2_conv_df2d :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d">;
+
+def int_hexagon_F2_conv_sf2uw_chop :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;
+
+def int_hexagon_F2_conv_sf2w_chop :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;
+
+def int_hexagon_F2_conv_sf2ud_chop :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;
+
+def int_hexagon_F2_conv_sf2d_chop :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;
+
+def int_hexagon_F2_conv_df2uw_chop :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;
+
+def int_hexagon_F2_conv_df2w_chop :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;
+
+def int_hexagon_F2_conv_df2ud_chop :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;
+
+def int_hexagon_F2_conv_df2d_chop :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;
+
+def int_hexagon_S2_asr_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r">;
+
+def int_hexagon_S2_asl_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r">;
+
+def int_hexagon_S2_lsr_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r">;
+
+def int_hexagon_S2_lsl_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r">;
+
+def int_hexagon_S2_asr_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_p">;
+
+def int_hexagon_S2_asl_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_p">;
+
+def int_hexagon_S2_lsr_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p">;
+
+def int_hexagon_S2_lsl_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p">;
+
+def int_hexagon_S2_asr_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;
+
+def int_hexagon_S2_asl_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;
+
+def int_hexagon_S2_lsr_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;
+
+def int_hexagon_S2_lsl_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;
+
+def int_hexagon_S2_asr_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;
+
+def int_hexagon_S2_asl_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;
+
+def int_hexagon_S2_lsr_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;
+
+def int_hexagon_S2_lsl_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;
+
+def int_hexagon_S2_asr_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;
+
+def int_hexagon_S2_asl_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;
+
+def int_hexagon_S2_lsr_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;
+
+def int_hexagon_S2_lsl_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;
+
+def int_hexagon_S2_asr_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;
+
+def int_hexagon_S2_asl_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;
+
+def int_hexagon_S2_lsr_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;
+
+def int_hexagon_S2_lsl_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;
+
+def int_hexagon_S2_asr_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_and">;
+
+def int_hexagon_S2_asl_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_and">;
+
+def int_hexagon_S2_lsr_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;
+
+def int_hexagon_S2_lsl_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;
+
+def int_hexagon_S2_asr_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_or">;
+
+def int_hexagon_S2_asl_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_or">;
+
+def int_hexagon_S2_lsr_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;
+
+def int_hexagon_S2_lsl_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;
+
+def int_hexagon_S2_asr_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_and">;
+
+def int_hexagon_S2_asl_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_and">;
+
+def int_hexagon_S2_lsr_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;
+
+def int_hexagon_S2_lsl_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;
+
+def int_hexagon_S2_asr_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_or">;
+
+def int_hexagon_S2_asl_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_or">;
+
+def int_hexagon_S2_lsr_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;
+
+def int_hexagon_S2_lsl_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;
+
+def int_hexagon_S2_asr_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;
+
+def int_hexagon_S2_asl_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;
+
+def int_hexagon_S2_lsr_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;
+
+def int_hexagon_S2_lsl_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;
+
+def int_hexagon_S2_asr_r_r_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;
+
+def int_hexagon_S2_asl_r_r_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;
+
+def int_hexagon_S2_asr_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_lsr_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asl_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_lsr_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asl_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asr_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asr_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asr_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_r_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_r_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_p_xacc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_p_xacc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asr_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asr_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asr_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asr_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_lsr_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_asl_i_r_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_sat", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_i_r_rnd :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_i_p_rnd :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S4_lsli :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_lsli", [IntrNoMem, ImmArg<ArgIndex<0>>]>;
+
+def int_hexagon_S2_addasl_rrri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_addasl_rrri", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_andi_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_ori_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_addi_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_subi_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_andi_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_ori_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_addi_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S4_subi_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_valignib :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignib", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_valignrb :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignrb">;
+
+def int_hexagon_S2_vspliceib :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vspliceib", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_vsplicerb :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vsplicerb">;
+
+def int_hexagon_S2_vsplatrh :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsplatrh">;
+
+def int_hexagon_S2_vsplatrb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_vsplatrb">;
+
+def int_hexagon_S2_insert :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_insert", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_S2_tableidxb_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_S2_tableidxh_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_S2_tableidxw_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_S2_tableidxd_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_A4_bitspliti :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitspliti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A4_bitsplit :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitsplit">;
+
+def int_hexagon_S4_extract :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_extract", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_extractu :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_extractu", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_insertp :
+Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S2_insertp", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_S4_extractp :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_extractp", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_extractup :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S2_extractup", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S2_insert_rp :
+Hexagon_i32_i32i32i64_Intrinsic<"HEXAGON_S2_insert_rp">;
+
+def int_hexagon_S4_extract_rp :
+Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S4_extract_rp">;
+
+def int_hexagon_S2_extractu_rp :
+Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S2_extractu_rp">;
+
+def int_hexagon_S2_insertp_rp :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_S2_insertp_rp">;
+
+def int_hexagon_S4_extractp_rp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_extractp_rp">;
+
+def int_hexagon_S2_extractup_rp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_extractup_rp">;
+
+def int_hexagon_S2_tstbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S4_ntstbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_setbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_togglebit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_clrbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_tstbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_r">;
+
+def int_hexagon_S4_ntstbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_r">;
+
+def int_hexagon_S2_setbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_r">;
+
+def int_hexagon_S2_togglebit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_r">;
+
+def int_hexagon_S2_clrbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_r">;
+
+def int_hexagon_S2_asr_i_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vh", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_lsr_i_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vh", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asl_i_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vh", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vh">;
+
+def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S5_asrhub_sat :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_sat", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S5_vasrhrnd_goodsyntax :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asl_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vh">;
+
+def int_hexagon_S2_lsr_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vh">;
+
+def int_hexagon_S2_lsl_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vh">;
+
+def int_hexagon_S2_asr_i_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vw", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_i_svw_trun :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_i_svw_trun", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_r_svw_trun :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;
+
+def int_hexagon_S2_lsr_i_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vw", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asl_i_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vw", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_asr_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vw">;
+
+def int_hexagon_S2_asl_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vw">;
+
+def int_hexagon_S2_lsr_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vw">;
+
+def int_hexagon_S2_lsl_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
+
+def int_hexagon_S2_vrndpackwh :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwh">;
+
+def int_hexagon_S2_vrndpackwhs :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwhs">;
+
+def int_hexagon_S2_vsxtbh :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxtbh">;
+
+def int_hexagon_S2_vzxtbh :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxtbh">;
+
+def int_hexagon_S2_vsathub :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathub">;
+
+def int_hexagon_S2_svsathub :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathub">;
+
+def int_hexagon_S2_svsathb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathb">;
+
+def int_hexagon_S2_vsathb :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathb">;
+
+def int_hexagon_S2_vtrunohb :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunohb">;
+
+def int_hexagon_S2_vtrunewh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunewh">;
+
+def int_hexagon_S2_vtrunowh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunowh">;
+
+def int_hexagon_S2_vtrunehb :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunehb">;
+
+def int_hexagon_S2_vsxthw :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxthw">;
+
+def int_hexagon_S2_vzxthw :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxthw">;
+
+def int_hexagon_S2_vsatwh :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwh">;
+
+def int_hexagon_S2_vsatwuh :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwuh">;
+
+def int_hexagon_S2_packhl :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_S2_packhl">;
+
+def int_hexagon_A2_swiz :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_swiz">;
+
+def int_hexagon_S2_vsathub_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathub_nopack">;
+
+def int_hexagon_S2_vsathb_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathb_nopack">;
+
+def int_hexagon_S2_vsatwh_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;
+
+def int_hexagon_S2_vsatwuh_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;
+
+def int_hexagon_S2_shuffob :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffob">;
+
+def int_hexagon_S2_shuffeb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeb">;
+
+def int_hexagon_S2_shuffoh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffoh">;
+
+def int_hexagon_S2_shuffeh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeh">;
+
+def int_hexagon_S5_popcountp :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S5_popcountp">;
+
+def int_hexagon_S4_parity :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_parity">;
+
+def int_hexagon_S2_parityp :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_S2_parityp">;
+
+def int_hexagon_S2_lfsp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_lfsp">;
+
+def int_hexagon_S2_clbnorm :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clbnorm">;
+
+def int_hexagon_S4_clbaddi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_clbaddi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S4_clbpnorm :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S4_clbpnorm">;
+
+def int_hexagon_S4_clbpaddi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S4_clbpaddi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S2_clb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clb">;
+
+def int_hexagon_S2_cl0 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl0">;
+
+def int_hexagon_S2_cl1 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl1">;
+
+def int_hexagon_S2_clbp :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_clbp">;
+
+def int_hexagon_S2_cl0p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl0p">;
+
+def int_hexagon_S2_cl1p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl1p">;
+
+def int_hexagon_S2_brev :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_brev">;
+
+def int_hexagon_S2_brevp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_brevp">;
+
+def int_hexagon_S2_ct0 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct0">;
+
+def int_hexagon_S2_ct1 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct1">;
+
+def int_hexagon_S2_ct0p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct0p">;
+
+def int_hexagon_S2_ct1p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct1p">;
+
+def int_hexagon_S2_interleave :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_interleave">;
+
+def int_hexagon_S2_deinterleave :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_deinterleave">;
+
+def int_hexagon_Y2_dcfetch :
+Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dcfetch", []>;
+
+def int_hexagon_Y2_dczeroa :
+Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dczeroa", []>;
+
+def int_hexagon_Y2_dccleana :
+Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dccleana", []>;
+
+def int_hexagon_Y2_dccleaninva :
+Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dccleaninva", []>;
+
+def int_hexagon_Y2_dcinva :
+Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dcinva", []>;
+
+def int_hexagon_Y4_l2fetch :
+Hexagon__ptri32_Intrinsic<"HEXAGON_Y4_l2fetch", []>;
+
+def int_hexagon_Y5_l2fetch :
+Hexagon__ptri64_Intrinsic<"HEXAGON_Y5_l2fetch", []>;
+
+// V60 Scalar Instructions.
+
+def int_hexagon_S6_rol_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S6_rol_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S6_rol_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S6_rol_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_S6_rol_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_r_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_p_xacc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_S6_rol_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+// V62 Scalar Instructions.
+
+def int_hexagon_M6_vabsdiffb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffb">;
+
+def int_hexagon_M6_vabsdiffub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffub">;
+
+def int_hexagon_S6_vsplatrbp :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S6_vsplatrbp">;
+
+def int_hexagon_S6_vtrunehb_ppp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">;
+
+def int_hexagon_S6_vtrunohb_ppp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">;
+
+// V65 Scalar Instructions.
+
+def int_hexagon_A6_vcmpbeq_notany :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A6_vcmpbeq_notany">;
+
+// V66 Scalar Instructions.
+
+def int_hexagon_M2_mnaci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mnaci">;
+
+def int_hexagon_F2_dfadd :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfadd", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfsub :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfsub", [IntrNoMem, Throws]>;
+
+def int_hexagon_S2_mask :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_mask", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
+
+// V67 Scalar Instructions.
+
+def int_hexagon_M7_dcmpyrw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyrw">;
+
+def int_hexagon_M7_dcmpyrw_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyrw_acc">;
+
+def int_hexagon_M7_dcmpyrwc :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyrwc">;
+
+def int_hexagon_M7_dcmpyrwc_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyrwc_acc">;
+
+def int_hexagon_M7_dcmpyiw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyiw">;
+
+def int_hexagon_M7_dcmpyiw_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyiw_acc">;
+
+def int_hexagon_M7_dcmpyiwc :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyiwc">;
+
+def int_hexagon_M7_dcmpyiwc_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyiwc_acc">;
+
+def int_hexagon_M7_vdmpy :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_vdmpy">;
+
+def int_hexagon_M7_vdmpy_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_vdmpy_acc">;
+
+def int_hexagon_M7_wcmpyrw :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrw">;
+
+def int_hexagon_M7_wcmpyrwc :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrwc">;
+
+def int_hexagon_M7_wcmpyiw :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiw">;
+
+def int_hexagon_M7_wcmpyiwc :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiwc">;
+
+def int_hexagon_M7_wcmpyrw_rnd :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrw_rnd">;
+
+def int_hexagon_M7_wcmpyrwc_rnd :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrwc_rnd">;
+
+def int_hexagon_M7_wcmpyiw_rnd :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiw_rnd">;
+
+def int_hexagon_M7_wcmpyiwc_rnd :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiwc_rnd">;
+
+def int_hexagon_A7_croundd_ri :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_A7_croundd_ri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A7_croundd_rr :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_A7_croundd_rr">;
+
+def int_hexagon_A7_clip :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A7_clip", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_A7_vclip :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_A7_vclip", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+def int_hexagon_F2_dfmax :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmax", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfmin :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmin", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfmpyfix :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmpyfix", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfmpyll :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmpyll", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfmpylh :
+Hexagon_double_doubledoubledouble_Intrinsic<"HEXAGON_F2_dfmpylh", [IntrNoMem, Throws]>;
+
+def int_hexagon_F2_dfmpyhh :
+Hexagon_double_doubledoubledouble_Intrinsic<"HEXAGON_F2_dfmpyhh", [IntrNoMem, Throws]>;
+
+// V60 HVX Instructions.
+
+def int_hexagon_V6_vS32b_qpred_ai :
+Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+
+def int_hexagon_V6_vS32b_qpred_ai_128B :
+Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+
+def int_hexagon_V6_vS32b_nqpred_ai :
+Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+
+def int_hexagon_V6_vS32b_nqpred_ai_128B :
+Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+
+def int_hexagon_V6_vS32b_nt_qpred_ai :
+Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+
+def int_hexagon_V6_vS32b_nt_qpred_ai_128B :
+Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+
+def int_hexagon_V6_vS32b_nt_nqpred_ai :
+Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+
+def int_hexagon_V6_vS32b_nt_nqpred_ai_128B :
+Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+
+def int_hexagon_V6_valignb :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignb">;
+
+def int_hexagon_V6_valignb_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignb_128B">;
+
+def int_hexagon_V6_vlalignb :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignb">;
+
+def int_hexagon_V6_vlalignb_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignb_128B">;
+
+def int_hexagon_V6_valignbi :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignbi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_valignbi_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignbi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vlalignbi :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignbi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vlalignbi_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignbi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vror :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vror">;
+
+def int_hexagon_V6_vror_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vror_128B">;
+
+def int_hexagon_V6_vunpackub :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackub">;
+
+def int_hexagon_V6_vunpackub_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackub_128B">;
+
+def int_hexagon_V6_vunpackb :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackb">;
+
+def int_hexagon_V6_vunpackb_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackb_128B">;
+
+def int_hexagon_V6_vunpackuh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackuh">;
+
+def int_hexagon_V6_vunpackuh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackuh_128B">;
+
+def int_hexagon_V6_vunpackh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackh">;
+
+def int_hexagon_V6_vunpackh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackh_128B">;
+
+def int_hexagon_V6_vunpackob :
+Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackob">;
+
+def int_hexagon_V6_vunpackob_128B :
+Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackob_128B">;
+
+def int_hexagon_V6_vunpackoh :
+Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackoh">;
+
+def int_hexagon_V6_vunpackoh_128B :
+Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackoh_128B">;
+
+def int_hexagon_V6_vpackeb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeb">;
+
+def int_hexagon_V6_vpackeb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeb_128B">;
+
+def int_hexagon_V6_vpackeh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeh">;
+
+def int_hexagon_V6_vpackeh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeh_128B">;
+
+def int_hexagon_V6_vpackob :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackob">;
+
+def int_hexagon_V6_vpackob_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackob_128B">;
+
+def int_hexagon_V6_vpackoh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackoh">;
+
+def int_hexagon_V6_vpackoh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackoh_128B">;
+
+def int_hexagon_V6_vpackhub_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhub_sat">;
+
+def int_hexagon_V6_vpackhub_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhub_sat_128B">;
+
+def int_hexagon_V6_vpackhb_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhb_sat">;
+
+def int_hexagon_V6_vpackhb_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhb_sat_128B">;
+
+def int_hexagon_V6_vpackwuh_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat">;
+
+def int_hexagon_V6_vpackwuh_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat_128B">;
+
+def int_hexagon_V6_vpackwh_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwh_sat">;
+
+def int_hexagon_V6_vpackwh_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwh_sat_128B">;
+
+def int_hexagon_V6_vzb :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzb">;
+
+def int_hexagon_V6_vzb_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzb_128B">;
+
+def int_hexagon_V6_vsb :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsb">;
+
+def int_hexagon_V6_vsb_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsb_128B">;
+
+def int_hexagon_V6_vzh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzh">;
+
+def int_hexagon_V6_vzh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzh_128B">;
+
+def int_hexagon_V6_vsh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsh">;
+
+def int_hexagon_V6_vsh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsh_128B">;
+
+def int_hexagon_V6_vdmpybus :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus">;
+
+def int_hexagon_V6_vdmpybus_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_128B">;
+
+def int_hexagon_V6_vdmpybus_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc">;
+
+def int_hexagon_V6_vdmpybus_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc_128B">;
+
+def int_hexagon_V6_vdmpybus_dv :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv">;
+
+def int_hexagon_V6_vdmpybus_dv_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_128B">;
+
+def int_hexagon_V6_vdmpybus_dv_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc">;
+
+def int_hexagon_V6_vdmpybus_dv_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc_128B">;
+
+def int_hexagon_V6_vdmpyhb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb">;
+
+def int_hexagon_V6_vdmpyhb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_128B">;
+
+def int_hexagon_V6_vdmpyhb_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc">;
+
+def int_hexagon_V6_vdmpyhb_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc_128B">;
+
+def int_hexagon_V6_vdmpyhb_dv :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv">;
+
+def int_hexagon_V6_vdmpyhb_dv_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_128B">;
+
+def int_hexagon_V6_vdmpyhb_dv_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc">;
+
+def int_hexagon_V6_vdmpyhb_dv_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc_128B">;
+
+def int_hexagon_V6_vdmpyhvsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat">;
+
+def int_hexagon_V6_vdmpyhvsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_128B">;
+
+def int_hexagon_V6_vdmpyhvsat_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc">;
+
+def int_hexagon_V6_vdmpyhvsat_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc_128B">;
+
+def int_hexagon_V6_vdmpyhsat :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat">;
+
+def int_hexagon_V6_vdmpyhsat_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_128B">;
+
+def int_hexagon_V6_vdmpyhsat_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc">;
+
+def int_hexagon_V6_vdmpyhsat_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc_128B">;
+
+def int_hexagon_V6_vdmpyhisat :
+Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat">;
+
+def int_hexagon_V6_vdmpyhisat_128B :
+Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_128B">;
+
+def int_hexagon_V6_vdmpyhisat_acc :
+Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc">;
+
+def int_hexagon_V6_vdmpyhisat_acc_128B :
+Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc_128B">;
+
+def int_hexagon_V6_vdmpyhsusat :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat">;
+
+def int_hexagon_V6_vdmpyhsusat_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_128B">;
+
+def int_hexagon_V6_vdmpyhsusat_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc">;
+
+def int_hexagon_V6_vdmpyhsusat_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc_128B">;
+
+def int_hexagon_V6_vdmpyhsuisat :
+Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat">;
+
+def int_hexagon_V6_vdmpyhsuisat_128B :
+Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_128B">;
+
+def int_hexagon_V6_vdmpyhsuisat_acc :
+Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc">;
+
+def int_hexagon_V6_vdmpyhsuisat_acc_128B :
+Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc_128B">;
+
+def int_hexagon_V6_vtmpyb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb">;
+
+def int_hexagon_V6_vtmpyb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_128B">;
+
+def int_hexagon_V6_vtmpyb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc">;
+
+def int_hexagon_V6_vtmpyb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc_128B">;
+
+def int_hexagon_V6_vtmpybus :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus">;
+
+def int_hexagon_V6_vtmpybus_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_128B">;
+
+def int_hexagon_V6_vtmpybus_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc">;
+
+def int_hexagon_V6_vtmpybus_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc_128B">;
+
+def int_hexagon_V6_vtmpyhb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb">;
+
+def int_hexagon_V6_vtmpyhb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_128B">;
+
+def int_hexagon_V6_vtmpyhb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc">;
+
+def int_hexagon_V6_vtmpyhb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc_128B">;
+
+def int_hexagon_V6_vrmpyub :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub">;
+
+def int_hexagon_V6_vrmpyub_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_128B">;
+
+def int_hexagon_V6_vrmpyub_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc">;
+
+def int_hexagon_V6_vrmpyub_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc_128B">;
+
+def int_hexagon_V6_vrmpyubv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv">;
+
+def int_hexagon_V6_vrmpyubv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_128B">;
+
+def int_hexagon_V6_vrmpyubv_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc">;
+
+def int_hexagon_V6_vrmpyubv_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc_128B">;
+
+def int_hexagon_V6_vrmpybv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv">;
+
+def int_hexagon_V6_vrmpybv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_128B">;
+
+def int_hexagon_V6_vrmpybv_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc">;
+
+def int_hexagon_V6_vrmpybv_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc_128B">;
+
+def int_hexagon_V6_vrmpyubi :
+Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vrmpyubi_128B :
+Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vrmpyubi_acc :
+Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vrmpyubi_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vrmpybus :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus">;
+
+def int_hexagon_V6_vrmpybus_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_128B">;
+
+def int_hexagon_V6_vrmpybus_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc">;
+
+def int_hexagon_V6_vrmpybus_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc_128B">;
+
+def int_hexagon_V6_vrmpybusi :
+Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vrmpybusi_128B :
+Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vrmpybusi_acc :
+Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vrmpybusi_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vrmpybusv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv">;
+
+def int_hexagon_V6_vrmpybusv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_128B">;
+
+def int_hexagon_V6_vrmpybusv_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc">;
+
+def int_hexagon_V6_vrmpybusv_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc_128B">;
+
+def int_hexagon_V6_vdsaduh :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh">;
+
+def int_hexagon_V6_vdsaduh_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_128B">;
+
+def int_hexagon_V6_vdsaduh_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc">;
+
+def int_hexagon_V6_vdsaduh_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;
+
+def int_hexagon_V6_vrsadubi :
+Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vrsadubi_128B :
+Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vrsadubi_acc :
+Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vrsadubi_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vasrw :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrw">;
+
+def int_hexagon_V6_vasrw_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_128B">;
+
+def int_hexagon_V6_vaslw :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslw">;
+
+def int_hexagon_V6_vaslw_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_128B">;
+
+def int_hexagon_V6_vlsrw :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrw">;
+
+def int_hexagon_V6_vlsrw_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrw_128B">;
+
+def int_hexagon_V6_vasrwv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrwv">;
+
+def int_hexagon_V6_vasrwv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrwv_128B">;
+
+def int_hexagon_V6_vaslwv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslwv">;
+
+def int_hexagon_V6_vaslwv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslwv_128B">;
+
+def int_hexagon_V6_vlsrwv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrwv">;
+
+def int_hexagon_V6_vlsrwv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrwv_128B">;
+
+def int_hexagon_V6_vasrh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrh">;
+
+def int_hexagon_V6_vasrh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_128B">;
+
+def int_hexagon_V6_vaslh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslh">;
+
+def int_hexagon_V6_vaslh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_128B">;
+
+def int_hexagon_V6_vlsrh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrh">;
+
+def int_hexagon_V6_vlsrh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrh_128B">;
+
+def int_hexagon_V6_vasrhv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrhv">;
+
+def int_hexagon_V6_vasrhv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrhv_128B">;
+
+def int_hexagon_V6_vaslhv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslhv">;
+
+def int_hexagon_V6_vaslhv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslhv_128B">;
+
+def int_hexagon_V6_vlsrhv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrhv">;
+
+def int_hexagon_V6_vlsrhv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrhv_128B">;
+
+def int_hexagon_V6_vasrwh :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwh">;
+
+def int_hexagon_V6_vasrwh_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwh_128B">;
+
+def int_hexagon_V6_vasrwhsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat">;
+
+def int_hexagon_V6_vasrwhsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat_128B">;
+
+def int_hexagon_V6_vasrwhrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat">;
+
+def int_hexagon_V6_vasrwhrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat_128B">;
+
+def int_hexagon_V6_vasrwuhsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat">;
+
+def int_hexagon_V6_vasrwuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat_128B">;
+
+def int_hexagon_V6_vroundwh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwh">;
+
+def int_hexagon_V6_vroundwh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwh_128B">;
+
+def int_hexagon_V6_vroundwuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwuh">;
+
+def int_hexagon_V6_vroundwuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwuh_128B">;
+
+def int_hexagon_V6_vasrhubsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat">;
+
+def int_hexagon_V6_vasrhubsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat_128B">;
+
+def int_hexagon_V6_vasrhubrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat">;
+
+def int_hexagon_V6_vasrhubrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat_128B">;
+
+def int_hexagon_V6_vasrhbrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat">;
+
+def int_hexagon_V6_vasrhbrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat_128B">;
+
+def int_hexagon_V6_vroundhb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhb">;
+
+def int_hexagon_V6_vroundhb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhb_128B">;
+
+def int_hexagon_V6_vroundhub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhub">;
+
+def int_hexagon_V6_vroundhub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhub_128B">;
+
+def int_hexagon_V6_vaslw_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc">;
+
+def int_hexagon_V6_vaslw_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc_128B">;
+
+def int_hexagon_V6_vasrw_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc">;
+
+def int_hexagon_V6_vasrw_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc_128B">;
+
+def int_hexagon_V6_vaddb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddb">;
+
+def int_hexagon_V6_vaddb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_128B">;
+
+def int_hexagon_V6_vsubb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubb">;
+
+def int_hexagon_V6_vsubb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_128B">;
+
+def int_hexagon_V6_vaddb_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_dv">;
+
+def int_hexagon_V6_vaddb_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;
+
+def int_hexagon_V6_vsubb_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_dv">;
+
+def int_hexagon_V6_vsubb_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;
+
+def int_hexagon_V6_vaddh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddh">;
+
+def int_hexagon_V6_vaddh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_128B">;
+
+def int_hexagon_V6_vsubh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubh">;
+
+def int_hexagon_V6_vsubh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_128B">;
+
+def int_hexagon_V6_vaddh_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_dv">;
+
+def int_hexagon_V6_vaddh_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;
+
+def int_hexagon_V6_vsubh_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_dv">;
+
+def int_hexagon_V6_vsubh_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;
+
+def int_hexagon_V6_vaddw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddw">;
+
+def int_hexagon_V6_vaddw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_128B">;
+
+def int_hexagon_V6_vsubw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubw">;
+
+def int_hexagon_V6_vsubw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_128B">;
+
+def int_hexagon_V6_vaddw_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_dv">;
+
+def int_hexagon_V6_vaddw_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;
+
+def int_hexagon_V6_vsubw_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_dv">;
+
+def int_hexagon_V6_vsubw_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;
+
+def int_hexagon_V6_vaddubsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubsat">;
+
+def int_hexagon_V6_vaddubsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_128B">;
+
+def int_hexagon_V6_vaddubsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv">;
+
+def int_hexagon_V6_vaddubsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv_128B">;
+
+def int_hexagon_V6_vsububsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububsat">;
+
+def int_hexagon_V6_vsububsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_128B">;
+
+def int_hexagon_V6_vsububsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_dv">;
+
+def int_hexagon_V6_vsububsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsububsat_dv_128B">;
+
+def int_hexagon_V6_vadduhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhsat">;
+
+def int_hexagon_V6_vadduhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_128B">;
+
+def int_hexagon_V6_vadduhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv">;
+
+def int_hexagon_V6_vadduhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv_128B">;
+
+def int_hexagon_V6_vsubuhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhsat">;
+
+def int_hexagon_V6_vsubuhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_128B">;
+
+def int_hexagon_V6_vsubuhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv">;
+
+def int_hexagon_V6_vsubuhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv_128B">;
+
+def int_hexagon_V6_vaddhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhsat">;
+
+def int_hexagon_V6_vaddhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_128B">;
+
+def int_hexagon_V6_vaddhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv">;
+
+def int_hexagon_V6_vaddhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv_128B">;
+
+def int_hexagon_V6_vsubhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhsat">;
+
+def int_hexagon_V6_vsubhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_128B">;
+
+def int_hexagon_V6_vsubhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv">;
+
+def int_hexagon_V6_vsubhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv_128B">;
+
+def int_hexagon_V6_vaddwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwsat">;
+
+def int_hexagon_V6_vaddwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_128B">;
+
+def int_hexagon_V6_vaddwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv">;
+
+def int_hexagon_V6_vaddwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv_128B">;
+
+def int_hexagon_V6_vsubwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwsat">;
+
+def int_hexagon_V6_vsubwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_128B">;
+
+def int_hexagon_V6_vsubwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv">;
+
+def int_hexagon_V6_vsubwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;
+
+def int_hexagon_V6_vavgub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgub">;
+
+def int_hexagon_V6_vavgub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgub_128B">;
+
+def int_hexagon_V6_vavgubrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgubrnd">;
+
+def int_hexagon_V6_vavgubrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgubrnd_128B">;
+
+def int_hexagon_V6_vavguh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguh">;
+
+def int_hexagon_V6_vavguh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguh_128B">;
+
+def int_hexagon_V6_vavguhrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguhrnd">;
+
+def int_hexagon_V6_vavguhrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguhrnd_128B">;
+
+def int_hexagon_V6_vavgh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgh">;
+
+def int_hexagon_V6_vavgh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgh_128B">;
+
+def int_hexagon_V6_vavghrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavghrnd">;
+
+def int_hexagon_V6_vavghrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavghrnd_128B">;
+
+def int_hexagon_V6_vnavgh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgh">;
+
+def int_hexagon_V6_vnavgh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgh_128B">;
+
+def int_hexagon_V6_vavgw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgw">;
+
+def int_hexagon_V6_vavgw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgw_128B">;
+
+def int_hexagon_V6_vavgwrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgwrnd">;
+
+def int_hexagon_V6_vavgwrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgwrnd_128B">;
+
+def int_hexagon_V6_vnavgw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgw">;
+
+def int_hexagon_V6_vnavgw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgw_128B">;
+
+def int_hexagon_V6_vabsdiffub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffub">;
+
+def int_hexagon_V6_vabsdiffub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffub_128B">;
+
+def int_hexagon_V6_vabsdiffuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffuh">;
+
+def int_hexagon_V6_vabsdiffuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffuh_128B">;
+
+def int_hexagon_V6_vabsdiffh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffh">;
+
+def int_hexagon_V6_vabsdiffh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffh_128B">;
+
+def int_hexagon_V6_vabsdiffw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffw">;
+
+def int_hexagon_V6_vabsdiffw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffw_128B">;
+
+def int_hexagon_V6_vnavgub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgub">;
+
+def int_hexagon_V6_vnavgub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgub_128B">;
+
+def int_hexagon_V6_vaddubh :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh">;
+
+def int_hexagon_V6_vaddubh_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_128B">;
+
+def int_hexagon_V6_vsububh :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububh">;
+
+def int_hexagon_V6_vsububh_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububh_128B">;
+
+def int_hexagon_V6_vaddhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw">;
+
+def int_hexagon_V6_vaddhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_128B">;
+
+def int_hexagon_V6_vsubhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhw">;
+
+def int_hexagon_V6_vsubhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhw_128B">;
+
+def int_hexagon_V6_vadduhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw">;
+
+def int_hexagon_V6_vadduhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_128B">;
+
+def int_hexagon_V6_vsubuhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhw">;
+
+def int_hexagon_V6_vsubuhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhw_128B">;
+
+def int_hexagon_V6_vd0 :
+Hexagon_v16i32__Intrinsic<"HEXAGON_V6_vd0">;
+
+def int_hexagon_V6_vd0_128B :
+Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vd0_128B">;
+
+def int_hexagon_V6_vaddbq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vaddbq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vsubbq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vsubbq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vaddbnq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vaddbnq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vsubbnq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vsubbnq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vaddhq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vaddhq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vsubhq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vsubhq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vaddhnq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vaddhnq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vsubhnq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vsubhnq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vaddwq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vaddwq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vsubwq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vsubwq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vaddwnq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vaddwnq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vsubwnq :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vsubwnq_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vabsh :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh">;
+
+def int_hexagon_V6_vabsh_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_128B">;
+
+def int_hexagon_V6_vabsh_sat :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh_sat">;
+
+def int_hexagon_V6_vabsh_sat_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_sat_128B">;
+
+def int_hexagon_V6_vabsw :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw">;
+
+def int_hexagon_V6_vabsw_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_128B">;
+
+def int_hexagon_V6_vabsw_sat :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw_sat">;
+
+def int_hexagon_V6_vabsw_sat_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_sat_128B">;
+
+def int_hexagon_V6_vmpybv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv">;
+
+def int_hexagon_V6_vmpybv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_128B">;
+
+def int_hexagon_V6_vmpybv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv_acc">;
+
+def int_hexagon_V6_vmpybv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_acc_128B">;
+
+def int_hexagon_V6_vmpyubv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv">;
+
+def int_hexagon_V6_vmpyubv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_128B">;
+
+def int_hexagon_V6_vmpyubv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc">;
+
+def int_hexagon_V6_vmpyubv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc_128B">;
+
+def int_hexagon_V6_vmpybusv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv">;
+
+def int_hexagon_V6_vmpybusv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_128B">;
+
+def int_hexagon_V6_vmpybusv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc">;
+
+def int_hexagon_V6_vmpybusv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc_128B">;
+
+def int_hexagon_V6_vmpabusv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabusv">;
+
+def int_hexagon_V6_vmpabusv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabusv_128B">;
+
+def int_hexagon_V6_vmpabuuv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabuuv">;
+
+def int_hexagon_V6_vmpabuuv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabuuv_128B">;
+
+def int_hexagon_V6_vmpyhv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv">;
+
+def int_hexagon_V6_vmpyhv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_128B">;
+
+def int_hexagon_V6_vmpyhv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc">;
+
+def int_hexagon_V6_vmpyhv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc_128B">;
+
+def int_hexagon_V6_vmpyuhv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv">;
+
+def int_hexagon_V6_vmpyuhv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_128B">;
+
+def int_hexagon_V6_vmpyuhv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc">;
+
+def int_hexagon_V6_vmpyuhv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;
+
+def int_hexagon_V6_vmpyhvsrs :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs">;
+
+def int_hexagon_V6_vmpyhvsrs_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs_128B">;
+
+def int_hexagon_V6_vmpyhus :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus">;
+
+def int_hexagon_V6_vmpyhus_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_128B">;
+
+def int_hexagon_V6_vmpyhus_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc">;
+
+def int_hexagon_V6_vmpyhus_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc_128B">;
+
+def int_hexagon_V6_vmpyih :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih">;
+
+def int_hexagon_V6_vmpyih_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_128B">;
+
+def int_hexagon_V6_vmpyih_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih_acc">;
+
+def int_hexagon_V6_vmpyih_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_acc_128B">;
+
+def int_hexagon_V6_vmpyewuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh">;
+
+def int_hexagon_V6_vmpyewuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_128B">;
+
+def int_hexagon_V6_vmpyowh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh">;
+
+def int_hexagon_V6_vmpyowh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_128B">;
+
+def int_hexagon_V6_vmpyowh_rnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd">;
+
+def int_hexagon_V6_vmpyowh_rnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_128B">;
+
+def int_hexagon_V6_vmpyowh_sacc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc">;
+
+def int_hexagon_V6_vmpyowh_sacc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc_128B">;
+
+def int_hexagon_V6_vmpyowh_rnd_sacc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc">;
+
+def int_hexagon_V6_vmpyowh_rnd_sacc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc_128B">;
+
+def int_hexagon_V6_vmpyieoh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyieoh">;
+
+def int_hexagon_V6_vmpyieoh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyieoh_128B">;
+
+def int_hexagon_V6_vmpyiewuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh">;
+
+def int_hexagon_V6_vmpyiewuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_128B">;
+
+def int_hexagon_V6_vmpyiowh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiowh">;
+
+def int_hexagon_V6_vmpyiowh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiowh_128B">;
+
+def int_hexagon_V6_vmpyiewh_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc">;
+
+def int_hexagon_V6_vmpyiewh_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc_128B">;
+
+def int_hexagon_V6_vmpyiewuh_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc">;
+
+def int_hexagon_V6_vmpyiewuh_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc_128B">;
+
+def int_hexagon_V6_vmpyub :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub">;
+
+def int_hexagon_V6_vmpyub_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_128B">;
+
+def int_hexagon_V6_vmpyub_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc">;
+
+def int_hexagon_V6_vmpyub_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc_128B">;
+
+def int_hexagon_V6_vmpybus :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus">;
+
+def int_hexagon_V6_vmpybus_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_128B">;
+
+def int_hexagon_V6_vmpybus_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc">;
+
+def int_hexagon_V6_vmpybus_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc_128B">;
+
+def int_hexagon_V6_vmpabus :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus">;
+
+def int_hexagon_V6_vmpabus_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_128B">;
+
+def int_hexagon_V6_vmpabus_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc">;
+
+def int_hexagon_V6_vmpabus_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc_128B">;
+
+def int_hexagon_V6_vmpahb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb">;
+
+def int_hexagon_V6_vmpahb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_128B">;
+
+def int_hexagon_V6_vmpahb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc">;
+
+def int_hexagon_V6_vmpahb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc_128B">;
+
+def int_hexagon_V6_vmpyh :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh">;
+
+def int_hexagon_V6_vmpyh_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_128B">;
+
+def int_hexagon_V6_vmpyhsat_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc">;
+
+def int_hexagon_V6_vmpyhsat_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc_128B">;
+
+def int_hexagon_V6_vmpyhss :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhss">;
+
+def int_hexagon_V6_vmpyhss_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhss_128B">;
+
+def int_hexagon_V6_vmpyhsrs :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs">;
+
+def int_hexagon_V6_vmpyhsrs_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs_128B">;
+
+def int_hexagon_V6_vmpyuh :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh">;
+
+def int_hexagon_V6_vmpyuh_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_128B">;
+
+def int_hexagon_V6_vmpyuh_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc">;
+
+def int_hexagon_V6_vmpyuh_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc_128B">;
+
+def int_hexagon_V6_vmpyihb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb">;
+
+def int_hexagon_V6_vmpyihb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_128B">;
+
+def int_hexagon_V6_vmpyihb_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc">;
+
+def int_hexagon_V6_vmpyihb_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc_128B">;
+
+def int_hexagon_V6_vmpyiwb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb">;
+
+def int_hexagon_V6_vmpyiwb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_128B">;
+
+def int_hexagon_V6_vmpyiwb_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc">;
+
+def int_hexagon_V6_vmpyiwb_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc_128B">;
+
+def int_hexagon_V6_vmpyiwh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh">;
+
+def int_hexagon_V6_vmpyiwh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_128B">;
+
+def int_hexagon_V6_vmpyiwh_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc">;
+
+def int_hexagon_V6_vmpyiwh_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc_128B">;
+
+def int_hexagon_V6_vand :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vand">;
+
+def int_hexagon_V6_vand_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vand_128B">;
+
+def int_hexagon_V6_vor :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vor">;
+
+def int_hexagon_V6_vor_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vor_128B">;
+
+def int_hexagon_V6_vxor :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vxor">;
+
+def int_hexagon_V6_vxor_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vxor_128B">;
+
+def int_hexagon_V6_vnot :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnot">;
+
+def int_hexagon_V6_vnot_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnot_128B">;
+
+def int_hexagon_V6_vandqrt :
+Hexagon_custom_v16i32_v64i1i32_Intrinsic;
+
+def int_hexagon_V6_vandqrt_128B :
+Hexagon_custom_v32i32_v128i1i32_Intrinsic_128B;
+
+def int_hexagon_V6_vandqrt_acc :
+Hexagon_custom_v16i32_v16i32v64i1i32_Intrinsic;
+
+def int_hexagon_V6_vandqrt_acc_128B :
+Hexagon_custom_v32i32_v32i32v128i1i32_Intrinsic_128B;
+
+def int_hexagon_V6_vandvrt :
+Hexagon_custom_v64i1_v16i32i32_Intrinsic;
+
+def int_hexagon_V6_vandvrt_128B :
+Hexagon_custom_v128i1_v32i32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vandvrt_acc :
+Hexagon_custom_v64i1_v64i1v16i32i32_Intrinsic;
+
+def int_hexagon_V6_vandvrt_acc_128B :
+Hexagon_custom_v128i1_v128i1v32i32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtw :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtw_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtw_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtw_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtw_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtw_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtw_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtw_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqw :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqw_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqw_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqw_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqw_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqw_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqw_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqw_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgth :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgth_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgth_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgth_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgth_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgth_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgth_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgth_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqh :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqh_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqh_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqh_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqh_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqh_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqh_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqh_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtb :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtb_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtb_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtb_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtb_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtb_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtb_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtb_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqb :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqb_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqb_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqb_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqb_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqb_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_veqb_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_veqb_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuw :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuw_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuw_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuw_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuw_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuw_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuw_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuw_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuh :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuh_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuh_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuh_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuh_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuh_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtuh_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtuh_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtub :
+Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtub_128B :
+Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtub_and :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtub_and_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtub_or :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtub_or_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vgtub_xor :
+Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vgtub_xor_128B :
+Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_pred_or :
+Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+
+def int_hexagon_V6_pred_or_128B :
+Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_pred_and :
+Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+
+def int_hexagon_V6_pred_and_128B :
+Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_pred_not :
+Hexagon_custom_v64i1_v64i1_Intrinsic;
+
+def int_hexagon_V6_pred_not_128B :
+Hexagon_custom_v128i1_v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_pred_xor :
+Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+
+def int_hexagon_V6_pred_xor_128B :
+Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_pred_and_n :
+Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+
+def int_hexagon_V6_pred_and_n_128B :
+Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_pred_or_n :
+Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+
+def int_hexagon_V6_pred_or_n_128B :
+Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_pred_scalar2 :
+Hexagon_custom_v64i1_i32_Intrinsic;
+
+def int_hexagon_V6_pred_scalar2_128B :
+Hexagon_custom_v128i1_i32_Intrinsic_128B;
+
+def int_hexagon_V6_vmux :
+Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vmux_128B :
+Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vswap :
+Hexagon_custom_v32i32_v64i1v16i32v16i32_Intrinsic;
+
+def int_hexagon_V6_vswap_128B :
+Hexagon_custom_v64i32_v128i1v32i32v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vmaxub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxub">;
+
+def int_hexagon_V6_vmaxub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxub_128B">;
+
+def int_hexagon_V6_vminub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminub">;
+
+def int_hexagon_V6_vminub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminub_128B">;
+
+def int_hexagon_V6_vmaxuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxuh">;
+
+def int_hexagon_V6_vmaxuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxuh_128B">;
+
+def int_hexagon_V6_vminuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminuh">;
+
+def int_hexagon_V6_vminuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminuh_128B">;
+
+def int_hexagon_V6_vmaxh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxh">;
+
+def int_hexagon_V6_vmaxh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxh_128B">;
+
+def int_hexagon_V6_vminh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminh">;
+
+def int_hexagon_V6_vminh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminh_128B">;
+
+def int_hexagon_V6_vmaxw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxw">;
+
+def int_hexagon_V6_vmaxw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxw_128B">;
+
+def int_hexagon_V6_vminw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminw">;
+
+def int_hexagon_V6_vminw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminw_128B">;
+
+def int_hexagon_V6_vsathub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsathub">;
+
+def int_hexagon_V6_vsathub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsathub_128B">;
+
+def int_hexagon_V6_vsatwh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatwh">;
+
+def int_hexagon_V6_vsatwh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatwh_128B">;
+
+def int_hexagon_V6_vshuffeb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffeb">;
+
+def int_hexagon_V6_vshuffeb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffeb_128B">;
+
+def int_hexagon_V6_vshuffob :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffob">;
+
+def int_hexagon_V6_vshuffob_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffob_128B">;
+
+def int_hexagon_V6_vshufeh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufeh">;
+
+def int_hexagon_V6_vshufeh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufeh_128B">;
+
+def int_hexagon_V6_vshufoh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoh">;
+
+def int_hexagon_V6_vshufoh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoh_128B">;
+
+def int_hexagon_V6_vshuffvdd :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd">;
+
+def int_hexagon_V6_vshuffvdd_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd_128B">;
+
+def int_hexagon_V6_vdealvdd :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdealvdd">;
+
+def int_hexagon_V6_vdealvdd_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdealvdd_128B">;
+
+def int_hexagon_V6_vshufoeh :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeh">;
+
+def int_hexagon_V6_vshufoeh_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeh_128B">;
+
+def int_hexagon_V6_vshufoeb :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeb">;
+
+def int_hexagon_V6_vshufoeb_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeb_128B">;
+
+def int_hexagon_V6_vdealh :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealh">;
+
+def int_hexagon_V6_vdealh_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealh_128B">;
+
+def int_hexagon_V6_vdealb :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealb">;
+
+def int_hexagon_V6_vdealb_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealb_128B">;
+
+def int_hexagon_V6_vdealb4w :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdealb4w">;
+
+def int_hexagon_V6_vdealb4w_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdealb4w_128B">;
+
+def int_hexagon_V6_vshuffh :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffh">;
+
+def int_hexagon_V6_vshuffh_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffh_128B">;
+
+def int_hexagon_V6_vshuffb :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffb">;
+
+def int_hexagon_V6_vshuffb_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffb_128B">;
+
+def int_hexagon_V6_extractw :
+Hexagon_i32_v16i32i32_Intrinsic<"HEXAGON_V6_extractw">;
+
+def int_hexagon_V6_extractw_128B :
+Hexagon_i32_v32i32i32_Intrinsic<"HEXAGON_V6_extractw_128B">;
+
+def int_hexagon_V6_vinsertwr :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vinsertwr">;
+
+def int_hexagon_V6_vinsertwr_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vinsertwr_128B">;
+
+def int_hexagon_V6_lvsplatw :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw">;
+
+def int_hexagon_V6_lvsplatw_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;
+
+def int_hexagon_V6_vassignp :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassignp">;
+
+def int_hexagon_V6_vassignp_128B :
+Hexagon_v64i32_v64i32_Intrinsic<"HEXAGON_V6_vassignp_128B">;
+
+def int_hexagon_V6_vassign :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vassign">;
+
+def int_hexagon_V6_vassign_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassign_128B">;
+
+def int_hexagon_V6_vcombine :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcombine">;
+
+def int_hexagon_V6_vcombine_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcombine_128B">;
+
+def int_hexagon_V6_vdelta :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdelta">;
+
+def int_hexagon_V6_vdelta_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdelta_128B">;
+
+def int_hexagon_V6_vrdelta :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrdelta">;
+
+def int_hexagon_V6_vrdelta_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrdelta_128B">;
+
+def int_hexagon_V6_vcl0w :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0w">;
+
+def int_hexagon_V6_vcl0w_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0w_128B">;
+
+def int_hexagon_V6_vcl0h :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0h">;
+
+def int_hexagon_V6_vcl0h_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0h_128B">;
+
+def int_hexagon_V6_vnormamtw :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamtw">;
+
+def int_hexagon_V6_vnormamtw_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamtw_128B">;
+
+def int_hexagon_V6_vnormamth :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamth">;
+
+def int_hexagon_V6_vnormamth_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamth_128B">;
+
+def int_hexagon_V6_vpopcounth :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vpopcounth">;
+
+def int_hexagon_V6_vpopcounth_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vpopcounth_128B">;
+
+def int_hexagon_V6_vlutvvb :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb">;
+
+def int_hexagon_V6_vlutvvb_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_128B">;
+
+def int_hexagon_V6_vlutvvb_oracc :
+Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc">;
+
+def int_hexagon_V6_vlutvvb_oracc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc_128B">;
+
+def int_hexagon_V6_vlutvwh :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh">;
+
+def int_hexagon_V6_vlutvwh_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_128B">;
+
+def int_hexagon_V6_vlutvwh_oracc :
+Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">;
+
+def int_hexagon_V6_vlutvwh_oracc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">;
+
+def int_hexagon_V6_hi :
+Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_hi">;
+
+def int_hexagon_V6_hi_128B :
+Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_hi_128B">;
+
+def int_hexagon_V6_lo :
+Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_lo">;
+
+def int_hexagon_V6_lo_128B :
+Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_lo_128B">;
+
+// V62 HVX Instructions.
+
+def int_hexagon_V6_vlsrb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrb">;
+
+def int_hexagon_V6_vlsrb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrb_128B">;
+
+def int_hexagon_V6_vasrwuhrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat">;
+
+def int_hexagon_V6_vasrwuhrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat_128B">;
+
+def int_hexagon_V6_vasruwuhrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat">;
+
+def int_hexagon_V6_vasruwuhrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat_128B">;
+
+def int_hexagon_V6_vasrhbsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat">;
+
+def int_hexagon_V6_vasrhbsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat_128B">;
+
+def int_hexagon_V6_vrounduwuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduwuh">;
+
+def int_hexagon_V6_vrounduwuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduwuh_128B">;
+
+def int_hexagon_V6_vrounduhub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduhub">;
+
+def int_hexagon_V6_vrounduhub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduhub_128B">;
+
+def int_hexagon_V6_vadduwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduwsat">;
+
+def int_hexagon_V6_vadduwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_128B">;
+
+def int_hexagon_V6_vadduwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv">;
+
+def int_hexagon_V6_vadduwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">;
+
+def int_hexagon_V6_vsubuwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuwsat">;
+
+def int_hexagon_V6_vsubuwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_128B">;
+
+def int_hexagon_V6_vsubuwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv">;
+
+def int_hexagon_V6_vsubuwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv_128B">;
+
+def int_hexagon_V6_vaddbsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbsat">;
+
+def int_hexagon_V6_vaddbsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_128B">;
+
+def int_hexagon_V6_vaddbsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv">;
+
+def int_hexagon_V6_vaddbsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv_128B">;
+
+def int_hexagon_V6_vsubbsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbsat">;
+
+def int_hexagon_V6_vsubbsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_128B">;
+
+def int_hexagon_V6_vsubbsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv">;
+
+def int_hexagon_V6_vsubbsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv_128B">;
+
+def int_hexagon_V6_vaddcarry :
+Hexagon_custom_v16i32v64i1_v16i32v16i32v64i1_Intrinsic;
+
+def int_hexagon_V6_vaddcarry_128B :
+Hexagon_custom_v32i32v128i1_v32i32v32i32v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_vsubcarry :
+Hexagon_custom_v16i32v64i1_v16i32v16i32v64i1_Intrinsic;
+
+def int_hexagon_V6_vsubcarry_128B :
+Hexagon_custom_v32i32v128i1_v32i32v32i32v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_vaddububb_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddububb_sat">;
+
+def int_hexagon_V6_vaddububb_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddububb_sat_128B">;
+
+def int_hexagon_V6_vsubububb_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubububb_sat">;
+
+def int_hexagon_V6_vsubububb_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubububb_sat_128B">;
+
+def int_hexagon_V6_vaddhw_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw_acc">;
+
+def int_hexagon_V6_vaddhw_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_acc_128B">;
+
+def int_hexagon_V6_vadduhw_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw_acc">;
+
+def int_hexagon_V6_vadduhw_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_acc_128B">;
+
+def int_hexagon_V6_vaddubh_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh_acc">;
+
+def int_hexagon_V6_vaddubh_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_acc_128B">;
+
+def int_hexagon_V6_vmpyewuh_64 :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64">;
+
+def int_hexagon_V6_vmpyewuh_64_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64_128B">;
+
+def int_hexagon_V6_vmpyowh_64_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc">;
+
+def int_hexagon_V6_vmpyowh_64_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc_128B">;
+
+def int_hexagon_V6_vmpauhb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb">;
+
+def int_hexagon_V6_vmpauhb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_128B">;
+
+def int_hexagon_V6_vmpauhb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc">;
+
+def int_hexagon_V6_vmpauhb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc_128B">;
+
+def int_hexagon_V6_vmpyiwub :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub">;
+
+def int_hexagon_V6_vmpyiwub_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_128B">;
+
+def int_hexagon_V6_vmpyiwub_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc">;
+
+def int_hexagon_V6_vmpyiwub_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc_128B">;
+
+def int_hexagon_V6_vandnqrt :
+Hexagon_custom_v16i32_v64i1i32_Intrinsic;
+
+def int_hexagon_V6_vandnqrt_128B :
+Hexagon_custom_v32i32_v128i1i32_Intrinsic_128B;
+
+def int_hexagon_V6_vandnqrt_acc :
+Hexagon_custom_v16i32_v16i32v64i1i32_Intrinsic;
+
+def int_hexagon_V6_vandnqrt_acc_128B :
+Hexagon_custom_v32i32_v32i32v128i1i32_Intrinsic_128B;
+
+def int_hexagon_V6_vandvqv :
+Hexagon_custom_v16i32_v64i1v16i32_Intrinsic;
+
+def int_hexagon_V6_vandvqv_128B :
+Hexagon_custom_v32i32_v128i1v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_vandvnqv :
+Hexagon_custom_v16i32_v64i1v16i32_Intrinsic;
+
+def int_hexagon_V6_vandvnqv_128B :
+Hexagon_custom_v32i32_v128i1v32i32_Intrinsic_128B;
+
+def int_hexagon_V6_pred_scalar2v2 :
+Hexagon_custom_v64i1_i32_Intrinsic;
+
+def int_hexagon_V6_pred_scalar2v2_128B :
+Hexagon_custom_v128i1_i32_Intrinsic_128B;
+
+def int_hexagon_V6_shuffeqw :
+Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+
+def int_hexagon_V6_shuffeqw_128B :
+Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_shuffeqh :
+Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+
+def int_hexagon_V6_shuffeqh_128B :
+Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_vmaxb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxb">;
+
+def int_hexagon_V6_vmaxb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxb_128B">;
+
+def int_hexagon_V6_vminb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminb">;
+
+def int_hexagon_V6_vminb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminb_128B">;
+
+def int_hexagon_V6_vsatuwuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatuwuh">;
+
+def int_hexagon_V6_vsatuwuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatuwuh_128B">;
+
+def int_hexagon_V6_lvsplath :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplath">;
+
+def int_hexagon_V6_lvsplath_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplath_128B">;
+
+def int_hexagon_V6_lvsplatb :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb">;
+
+def int_hexagon_V6_lvsplatb_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb_128B">;
+
+def int_hexagon_V6_vaddclbw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbw">;
+
+def int_hexagon_V6_vaddclbw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbw_128B">;
+
+def int_hexagon_V6_vaddclbh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbh">;
+
+def int_hexagon_V6_vaddclbh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbh_128B">;
+
+def int_hexagon_V6_vlutvvbi :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vlutvvbi_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vlutvvb_oracci :
+Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vlutvvb_oracci_128B :
+Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vlutvwhi :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vlutvwhi_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+def int_hexagon_V6_vlutvwh_oracci :
+Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vlutvwh_oracci_128B :
+Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
+def int_hexagon_V6_vlutvvb_nm :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm">;
+
+def int_hexagon_V6_vlutvvb_nm_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm_128B">;
+
+def int_hexagon_V6_vlutvwh_nm :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm">;
+
+def int_hexagon_V6_vlutvwh_nm_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm_128B">;
+
+// V65 HVX Instructions.
+
+def int_hexagon_V6_vasruwuhsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat">;
+
+def int_hexagon_V6_vasruwuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat_128B">;
+
+def int_hexagon_V6_vasruhubsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat">;
+
+def int_hexagon_V6_vasruhubsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat_128B">;
+
+def int_hexagon_V6_vasruhubrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat">;
+
+def int_hexagon_V6_vasruhubrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat_128B">;
+
+def int_hexagon_V6_vaslh_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc">;
+
+def int_hexagon_V6_vaslh_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc_128B">;
+
+def int_hexagon_V6_vasrh_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc">;
+
+def int_hexagon_V6_vasrh_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc_128B">;
+
+def int_hexagon_V6_vavguw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguw">;
+
+def int_hexagon_V6_vavguw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguw_128B">;
+
+def int_hexagon_V6_vavguwrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguwrnd">;
+
+def int_hexagon_V6_vavguwrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguwrnd_128B">;
+
+def int_hexagon_V6_vavgb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgb">;
+
+def int_hexagon_V6_vavgb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgb_128B">;
+
+def int_hexagon_V6_vavgbrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgbrnd">;
+
+def int_hexagon_V6_vavgbrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgbrnd_128B">;
+
+def int_hexagon_V6_vnavgb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgb">;
+
+def int_hexagon_V6_vnavgb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgb_128B">;
+
+def int_hexagon_V6_vdd0 :
+Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vdd0">;
+
+def int_hexagon_V6_vdd0_128B :
+Hexagon_v64i32__Intrinsic<"HEXAGON_V6_vdd0_128B">;
+
+def int_hexagon_V6_vabsb :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb">;
+
+def int_hexagon_V6_vabsb_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_128B">;
+
+def int_hexagon_V6_vabsb_sat :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb_sat">;
+
+def int_hexagon_V6_vabsb_sat_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_sat_128B">;
+
+def int_hexagon_V6_vmpabuu :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu">;
+
+def int_hexagon_V6_vmpabuu_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_128B">;
+
+def int_hexagon_V6_vmpabuu_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc">;
+
+def int_hexagon_V6_vmpabuu_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc_128B">;
+
+def int_hexagon_V6_vmpyh_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc">;
+
+def int_hexagon_V6_vmpyh_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc_128B">;
+
+def int_hexagon_V6_vmpahhsat :
+Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat">;
+
+def int_hexagon_V6_vmpahhsat_128B :
+Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat_128B">;
+
+def int_hexagon_V6_vmpauhuhsat :
+Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat">;
+
+def int_hexagon_V6_vmpauhuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat_128B">;
+
+def int_hexagon_V6_vmpsuhuhsat :
+Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat">;
+
+def int_hexagon_V6_vmpsuhuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat_128B">;
+
+def int_hexagon_V6_vlut4 :
+Hexagon_v16i32_v16i32i64_Intrinsic<"HEXAGON_V6_vlut4">;
+
+def int_hexagon_V6_vlut4_128B :
+Hexagon_v32i32_v32i32i64_Intrinsic<"HEXAGON_V6_vlut4_128B">;
+
+def int_hexagon_V6_vmpyuhe :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe">;
+
+def int_hexagon_V6_vmpyuhe_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_128B">;
+
+def int_hexagon_V6_vmpyuhe_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc">;
+
+def int_hexagon_V6_vmpyuhe_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc_128B">;
+
+def int_hexagon_V6_vgathermw :
+Hexagon__ptri32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermw", [IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermw_128B :
+Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermw_128B", [IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermh :
+Hexagon__ptri32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermh", [IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermh_128B :
+Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermh_128B", [IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermhw :
+Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermhw", [IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermhw_128B :
+Hexagon__ptri32i32v64i32_Intrinsic<"HEXAGON_V6_vgathermhw_128B", [IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermwq :
+Hexagon_custom__ptrv64i1i32i32v16i32_Intrinsic<[IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermwq_128B :
+Hexagon_custom__ptrv128i1i32i32v32i32_Intrinsic_128B<[IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermhq :
+Hexagon_custom__ptrv64i1i32i32v16i32_Intrinsic<[IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermhq_128B :
+Hexagon_custom__ptrv128i1i32i32v32i32_Intrinsic_128B<[IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermhwq :
+Hexagon_custom__ptrv64i1i32i32v32i32_Intrinsic<[IntrArgMemOnly]>;
+
+def int_hexagon_V6_vgathermhwq_128B :
+Hexagon_custom__ptrv128i1i32i32v64i32_Intrinsic_128B<[IntrArgMemOnly]>;
+
+def int_hexagon_V6_vscattermw :
+Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermw", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermw_128B :
+Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermw_128B", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermh :
+Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermh", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermh_128B :
+Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermh_128B", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermw_add :
+Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermw_add", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermw_add_128B :
+Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermw_add_128B", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermh_add :
+Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermh_add", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermh_add_128B :
+Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermh_add_128B", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermwq :
+Hexagon_custom__v64i1i32i32v16i32v16i32_Intrinsic<[IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermwq_128B :
+Hexagon_custom__v128i1i32i32v32i32v32i32_Intrinsic_128B<[IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhq :
+Hexagon_custom__v64i1i32i32v16i32v16i32_Intrinsic<[IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhq_128B :
+Hexagon_custom__v128i1i32i32v32i32v32i32_Intrinsic_128B<[IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhw :
+Hexagon__i32i32v32i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhw", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhw_128B :
+Hexagon__i32i32v64i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhw_128B", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhwq :
+Hexagon_custom__v64i1i32i32v32i32v16i32_Intrinsic<[IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhwq_128B :
+Hexagon_custom__v128i1i32i32v64i32v32i32_Intrinsic_128B<[IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhw_add :
+Hexagon__i32i32v32i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhw_add", [IntrWriteMem]>;
+
+def int_hexagon_V6_vscattermhw_add_128B :
+Hexagon__i32i32v64i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhw_add_128B", [IntrWriteMem]>;
+
+def int_hexagon_V6_vprefixqb :
+Hexagon_custom_v16i32_v64i1_Intrinsic;
+
+def int_hexagon_V6_vprefixqb_128B :
+Hexagon_custom_v32i32_v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_vprefixqh :
+Hexagon_custom_v16i32_v64i1_Intrinsic;
+
+def int_hexagon_V6_vprefixqh_128B :
+Hexagon_custom_v32i32_v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_vprefixqw :
+Hexagon_custom_v16i32_v64i1_Intrinsic;
+
+def int_hexagon_V6_vprefixqw_128B :
+Hexagon_custom_v32i32_v128i1_Intrinsic_128B;
+
+// V66 HVX Instructions.
+
+def int_hexagon_V6_vrotr :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrotr">;
+
+def int_hexagon_V6_vrotr_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrotr_128B">;
+
+def int_hexagon_V6_vasr_into :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vasr_into">;
+
+def int_hexagon_V6_vasr_into_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vasr_into_128B">;
+
+def int_hexagon_V6_vaddcarrysat :
+Hexagon_custom_v16i32_v16i32v16i32v64i1_Intrinsic;
+
+def int_hexagon_V6_vaddcarrysat_128B :
+Hexagon_custom_v32i32_v32i32v32i32v128i1_Intrinsic_128B;
+
+def int_hexagon_V6_vsatdw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatdw">;
+
+def int_hexagon_V6_vsatdw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatdw_128B">;
+
diff --git a/llvm/include/llvm/IR/IntrinsicsMips.td b/llvm/include/llvm/IR/IntrinsicsMips.td
index bfcdd80a52d5..271142ca7788 100644
--- a/llvm/include/llvm/IR/IntrinsicsMips.td
+++ b/llvm/include/llvm/IR/IntrinsicsMips.td
@@ -234,9 +234,9 @@ def int_mips_extpdp: GCCBuiltin<"__builtin_mips_extpdp">,
// Misc
def int_mips_wrdsp: GCCBuiltin<"__builtin_mips_wrdsp">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<1>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<1>>]>;
def int_mips_rddsp: GCCBuiltin<"__builtin_mips_rddsp">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem, ImmArg<0>]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem, ImmArg<ArgIndex<0>>]>;
def int_mips_insv: GCCBuiltin<"__builtin_mips_insv">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
@@ -302,10 +302,10 @@ def int_mips_adduh_r_qb: GCCBuiltin<"__builtin_mips_adduh_r_qb">,
def int_mips_append: GCCBuiltin<"__builtin_mips_append">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_balign: GCCBuiltin<"__builtin_mips_balign">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_cmpgdu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgdu_eq_qb">,
Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
@@ -355,14 +355,14 @@ def int_mips_precr_qb_ph: GCCBuiltin<"__builtin_mips_precr_qb_ph">,
Intrinsic<[llvm_v4i8_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
def int_mips_precr_sra_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_ph_w">,
Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_precr_sra_r_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_r_ph_w">,
Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_prepend: GCCBuiltin<"__builtin_mips_prepend">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_shra_qb: GCCBuiltin<"__builtin_mips_shra_qb">,
Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -463,22 +463,22 @@ def int_mips_addv_d : GCCBuiltin<"__builtin_msa_addv_d">,
def int_mips_addvi_b : GCCBuiltin<"__builtin_msa_addvi_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
- [Commutative, IntrNoMem, ImmArg<1>]>;
+ [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_addvi_h : GCCBuiltin<"__builtin_msa_addvi_h">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty],
- [Commutative, IntrNoMem, ImmArg<1>]>;
+ [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_addvi_w : GCCBuiltin<"__builtin_msa_addvi_w">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty],
- [Commutative, IntrNoMem, ImmArg<1>]>;
+ [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_addvi_d : GCCBuiltin<"__builtin_msa_addvi_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty],
- [Commutative, IntrNoMem, ImmArg<1>]>;
+ [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_and_v : GCCBuiltin<"__builtin_msa_and_v">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_andi_b : GCCBuiltin<"__builtin_msa_andi_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_asub_s_b : GCCBuiltin<"__builtin_msa_asub_s_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -560,13 +560,13 @@ def int_mips_bclr_d : GCCBuiltin<"__builtin_msa_bclr_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_bclri_b : GCCBuiltin<"__builtin_msa_bclri_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bclri_h : GCCBuiltin<"__builtin_msa_bclri_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bclri_w : GCCBuiltin<"__builtin_msa_bclri_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bclri_d : GCCBuiltin<"__builtin_msa_bclri_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_binsl_b : GCCBuiltin<"__builtin_msa_binsl_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
@@ -583,16 +583,16 @@ def int_mips_binsl_d : GCCBuiltin<"__builtin_msa_binsl_d">,
def int_mips_binsli_b : GCCBuiltin<"__builtin_msa_binsli_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsli_h : GCCBuiltin<"__builtin_msa_binsli_h">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsli_w : GCCBuiltin<"__builtin_msa_binsli_w">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsli_d : GCCBuiltin<"__builtin_msa_binsli_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsr_b : GCCBuiltin<"__builtin_msa_binsr_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
@@ -609,16 +609,16 @@ def int_mips_binsr_d : GCCBuiltin<"__builtin_msa_binsr_d">,
def int_mips_binsri_b : GCCBuiltin<"__builtin_msa_binsri_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsri_h : GCCBuiltin<"__builtin_msa_binsri_h">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsri_w : GCCBuiltin<"__builtin_msa_binsri_w">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsri_d : GCCBuiltin<"__builtin_msa_binsri_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_bmnz_v : GCCBuiltin<"__builtin_msa_bmnz_v">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
@@ -626,7 +626,7 @@ def int_mips_bmnz_v : GCCBuiltin<"__builtin_msa_bmnz_v">,
def int_mips_bmnzi_b : GCCBuiltin<"__builtin_msa_bmnzi_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_bmz_v : GCCBuiltin<"__builtin_msa_bmz_v">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
@@ -634,7 +634,7 @@ def int_mips_bmz_v : GCCBuiltin<"__builtin_msa_bmz_v">,
def int_mips_bmzi_b : GCCBuiltin<"__builtin_msa_bmzi_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_bneg_b : GCCBuiltin<"__builtin_msa_bneg_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -646,13 +646,13 @@ def int_mips_bneg_d : GCCBuiltin<"__builtin_msa_bneg_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_bnegi_b : GCCBuiltin<"__builtin_msa_bnegi_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bnegi_h : GCCBuiltin<"__builtin_msa_bnegi_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bnegi_w : GCCBuiltin<"__builtin_msa_bnegi_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bnegi_d : GCCBuiltin<"__builtin_msa_bnegi_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bnz_b : GCCBuiltin<"__builtin_msa_bnz_b">,
Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
@@ -672,7 +672,7 @@ def int_mips_bsel_v : GCCBuiltin<"__builtin_msa_bsel_v">,
def int_mips_bseli_b : GCCBuiltin<"__builtin_msa_bseli_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_bset_b : GCCBuiltin<"__builtin_msa_bset_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -684,13 +684,13 @@ def int_mips_bset_d : GCCBuiltin<"__builtin_msa_bset_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_bseti_b : GCCBuiltin<"__builtin_msa_bseti_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bseti_h : GCCBuiltin<"__builtin_msa_bseti_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bseti_w : GCCBuiltin<"__builtin_msa_bseti_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bseti_d : GCCBuiltin<"__builtin_msa_bseti_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bz_b : GCCBuiltin<"__builtin_msa_bz_b">,
Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
@@ -714,16 +714,16 @@ def int_mips_ceq_d : GCCBuiltin<"__builtin_msa_ceq_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_ceqi_b : GCCBuiltin<"__builtin_msa_ceqi_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_ceqi_h : GCCBuiltin<"__builtin_msa_ceqi_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_ceqi_w : GCCBuiltin<"__builtin_msa_ceqi_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_ceqi_d : GCCBuiltin<"__builtin_msa_ceqi_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_cfcmsa : GCCBuiltin<"__builtin_msa_cfcmsa">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<0>]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_mips_cle_s_b : GCCBuiltin<"__builtin_msa_cle_s_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -744,22 +744,22 @@ def int_mips_cle_u_d : GCCBuiltin<"__builtin_msa_cle_u_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_clei_s_b : GCCBuiltin<"__builtin_msa_clei_s_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_s_h : GCCBuiltin<"__builtin_msa_clei_s_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_s_w : GCCBuiltin<"__builtin_msa_clei_s_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_s_d : GCCBuiltin<"__builtin_msa_clei_s_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_u_b : GCCBuiltin<"__builtin_msa_clei_u_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_u_h : GCCBuiltin<"__builtin_msa_clei_u_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_u_w : GCCBuiltin<"__builtin_msa_clei_u_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_u_d : GCCBuiltin<"__builtin_msa_clei_u_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clt_s_b : GCCBuiltin<"__builtin_msa_clt_s_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -780,22 +780,22 @@ def int_mips_clt_u_d : GCCBuiltin<"__builtin_msa_clt_u_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_clti_s_b : GCCBuiltin<"__builtin_msa_clti_s_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_s_h : GCCBuiltin<"__builtin_msa_clti_s_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_s_w : GCCBuiltin<"__builtin_msa_clti_s_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_s_d : GCCBuiltin<"__builtin_msa_clti_s_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_u_b : GCCBuiltin<"__builtin_msa_clti_u_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_u_h : GCCBuiltin<"__builtin_msa_clti_u_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_u_w : GCCBuiltin<"__builtin_msa_clti_u_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_u_d : GCCBuiltin<"__builtin_msa_clti_u_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_copy_s_b : GCCBuiltin<"__builtin_msa_copy_s_b">,
Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -816,7 +816,7 @@ def int_mips_copy_u_d : GCCBuiltin<"__builtin_msa_copy_u_d">,
Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_ctcmsa : GCCBuiltin<"__builtin_msa_ctcmsa">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<0>]>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_mips_div_s_b : GCCBuiltin<"__builtin_msa_div_s_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1244,19 +1244,19 @@ def int_mips_insert_d : GCCBuiltin<"__builtin_msa_insert_d">,
def int_mips_insve_b : GCCBuiltin<"__builtin_msa_insve_b">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_insve_h : GCCBuiltin<"__builtin_msa_insve_h">,
Intrinsic<[llvm_v8i16_ty],
[llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_insve_w : GCCBuiltin<"__builtin_msa_insve_w">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_insve_d : GCCBuiltin<"__builtin_msa_insve_d">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_ld_b : GCCBuiltin<"__builtin_msa_ld_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
@@ -1271,14 +1271,21 @@ def int_mips_ld_d : GCCBuiltin<"__builtin_msa_ld_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly]>;
+def int_mips_ldr_d : GCCBuiltin<"__builtin_msa_ldr_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+def int_mips_ldr_w : GCCBuiltin<"__builtin_msa_ldr_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
def int_mips_ldi_b : GCCBuiltin<"__builtin_msa_ldi_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_mips_ldi_h : GCCBuiltin<"__builtin_msa_ldi_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_mips_ldi_w : GCCBuiltin<"__builtin_msa_ldi_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_mips_ldi_d : GCCBuiltin<"__builtin_msa_ldi_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
// This instruction is part of the MSA spec but it does not share the
// __builtin_msa prefix because it operates on the GPR registers.
@@ -1341,22 +1348,22 @@ def int_mips_max_u_d : GCCBuiltin<"__builtin_msa_max_u_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_maxi_s_b : GCCBuiltin<"__builtin_msa_maxi_s_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_s_h : GCCBuiltin<"__builtin_msa_maxi_s_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_s_w : GCCBuiltin<"__builtin_msa_maxi_s_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_s_d : GCCBuiltin<"__builtin_msa_maxi_s_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_u_b : GCCBuiltin<"__builtin_msa_maxi_u_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_u_h : GCCBuiltin<"__builtin_msa_maxi_u_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_u_w : GCCBuiltin<"__builtin_msa_maxi_u_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_u_d : GCCBuiltin<"__builtin_msa_maxi_u_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_min_a_b : GCCBuiltin<"__builtin_msa_min_a_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1386,22 +1393,22 @@ def int_mips_min_u_d : GCCBuiltin<"__builtin_msa_min_u_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_mini_s_b : GCCBuiltin<"__builtin_msa_mini_s_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_s_h : GCCBuiltin<"__builtin_msa_mini_s_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_s_w : GCCBuiltin<"__builtin_msa_mini_s_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_s_d : GCCBuiltin<"__builtin_msa_mini_s_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_u_b : GCCBuiltin<"__builtin_msa_mini_u_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_u_h : GCCBuiltin<"__builtin_msa_mini_u_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_u_w : GCCBuiltin<"__builtin_msa_mini_u_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_u_d : GCCBuiltin<"__builtin_msa_mini_u_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mod_s_b : GCCBuiltin<"__builtin_msa_mod_s_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1492,13 +1499,13 @@ def int_mips_nor_v : GCCBuiltin<"__builtin_msa_nor_v">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_nori_b : GCCBuiltin<"__builtin_msa_nori_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_or_v : GCCBuiltin<"__builtin_msa_or_v">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_ori_b : GCCBuiltin<"__builtin_msa_ori_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_pckev_b : GCCBuiltin<"__builtin_msa_pckev_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1528,29 +1535,29 @@ def int_mips_pcnt_d : GCCBuiltin<"__builtin_msa_pcnt_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_sat_s_b : GCCBuiltin<"__builtin_msa_sat_s_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_s_h : GCCBuiltin<"__builtin_msa_sat_s_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_s_w : GCCBuiltin<"__builtin_msa_sat_s_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_s_d : GCCBuiltin<"__builtin_msa_sat_s_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_u_b : GCCBuiltin<"__builtin_msa_sat_u_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_u_h : GCCBuiltin<"__builtin_msa_sat_u_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_u_w : GCCBuiltin<"__builtin_msa_sat_u_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_u_d : GCCBuiltin<"__builtin_msa_sat_u_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_shf_b : GCCBuiltin<"__builtin_msa_shf_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_shf_h : GCCBuiltin<"__builtin_msa_shf_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_shf_w : GCCBuiltin<"__builtin_msa_shf_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sld_b : GCCBuiltin<"__builtin_msa_sld_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -1563,16 +1570,16 @@ def int_mips_sld_d : GCCBuiltin<"__builtin_msa_sld_d">,
def int_mips_sldi_b : GCCBuiltin<"__builtin_msa_sldi_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_sldi_h : GCCBuiltin<"__builtin_msa_sldi_h">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_sldi_w : GCCBuiltin<"__builtin_msa_sldi_w">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_sldi_d : GCCBuiltin<"__builtin_msa_sldi_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_sll_b : GCCBuiltin<"__builtin_msa_sll_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1584,13 +1591,13 @@ def int_mips_sll_d : GCCBuiltin<"__builtin_msa_sll_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_slli_b : GCCBuiltin<"__builtin_msa_slli_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_slli_h : GCCBuiltin<"__builtin_msa_slli_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_slli_w : GCCBuiltin<"__builtin_msa_slli_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_slli_d : GCCBuiltin<"__builtin_msa_slli_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_splat_b : GCCBuiltin<"__builtin_msa_splat_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -1602,13 +1609,13 @@ def int_mips_splat_d : GCCBuiltin<"__builtin_msa_splat_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_splati_b : GCCBuiltin<"__builtin_msa_splati_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_splati_h : GCCBuiltin<"__builtin_msa_splati_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_splati_w : GCCBuiltin<"__builtin_msa_splati_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_splati_d : GCCBuiltin<"__builtin_msa_splati_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sra_b : GCCBuiltin<"__builtin_msa_sra_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1620,13 +1627,13 @@ def int_mips_sra_d : GCCBuiltin<"__builtin_msa_sra_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_srai_b : GCCBuiltin<"__builtin_msa_srai_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srai_h : GCCBuiltin<"__builtin_msa_srai_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srai_w : GCCBuiltin<"__builtin_msa_srai_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srai_d : GCCBuiltin<"__builtin_msa_srai_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srar_b : GCCBuiltin<"__builtin_msa_srar_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1638,13 +1645,13 @@ def int_mips_srar_d : GCCBuiltin<"__builtin_msa_srar_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_srari_b : GCCBuiltin<"__builtin_msa_srari_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srari_h : GCCBuiltin<"__builtin_msa_srari_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srari_w : GCCBuiltin<"__builtin_msa_srari_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srari_d : GCCBuiltin<"__builtin_msa_srari_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srl_b : GCCBuiltin<"__builtin_msa_srl_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1656,13 +1663,13 @@ def int_mips_srl_d : GCCBuiltin<"__builtin_msa_srl_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_srli_b : GCCBuiltin<"__builtin_msa_srli_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srli_h : GCCBuiltin<"__builtin_msa_srli_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srli_w : GCCBuiltin<"__builtin_msa_srli_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srli_d : GCCBuiltin<"__builtin_msa_srli_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srlr_b : GCCBuiltin<"__builtin_msa_srlr_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
@@ -1674,13 +1681,13 @@ def int_mips_srlr_d : GCCBuiltin<"__builtin_msa_srlr_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_srlri_b : GCCBuiltin<"__builtin_msa_srlri_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srlri_h : GCCBuiltin<"__builtin_msa_srlri_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srlri_w : GCCBuiltin<"__builtin_msa_srlri_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srlri_d : GCCBuiltin<"__builtin_msa_srlri_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_st_b : GCCBuiltin<"__builtin_msa_st_b">,
Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty, llvm_i32_ty],
@@ -1695,6 +1702,13 @@ def int_mips_st_d : GCCBuiltin<"__builtin_msa_st_d">,
Intrinsic<[], [llvm_v2i64_ty, llvm_ptr_ty, llvm_i32_ty],
[IntrArgMemOnly]>;
+def int_mips_str_d : GCCBuiltin<"__builtin_msa_str_d">,
+ Intrinsic<[], [llvm_v2i64_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+def int_mips_str_w : GCCBuiltin<"__builtin_msa_str_w">,
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
def int_mips_subs_s_b : GCCBuiltin<"__builtin_msa_subs_s_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_subs_s_h : GCCBuiltin<"__builtin_msa_subs_s_h">,
@@ -1741,13 +1755,13 @@ def int_mips_subv_d : GCCBuiltin<"__builtin_msa_subv_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_mips_subvi_b : GCCBuiltin<"__builtin_msa_subvi_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_subvi_h : GCCBuiltin<"__builtin_msa_subvi_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_subvi_w : GCCBuiltin<"__builtin_msa_subvi_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_subvi_d : GCCBuiltin<"__builtin_msa_subvi_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_vshf_b : GCCBuiltin<"__builtin_msa_vshf_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
@@ -1766,5 +1780,5 @@ def int_mips_xor_v : GCCBuiltin<"__builtin_msa_xor_v">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_xori_b : GCCBuiltin<"__builtin_msa_xori_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index ec328d69a8dd..61293418ec41 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -978,20 +978,20 @@ let TargetPrefix = "nvvm" in {
// Atomics not available as llvm intrinsics.
def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty],
[LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty],
[LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
class SCOPED_ATOMIC2_impl<LLVMType elty>
: Intrinsic<[elty],
[LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
class SCOPED_ATOMIC3_impl<LLVMType elty>
: Intrinsic<[elty],
[LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>,
LLVMMatchType<0>],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
multiclass PTXAtomicWithScope2<LLVMType elty> {
def _cta : SCOPED_ATOMIC2_impl<elty>;
@@ -1063,30 +1063,30 @@ let TargetPrefix = "nvvm" in {
// pointer's alignment.
def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+ [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldu.global.i">;
def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+ [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldu.global.f">;
def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+ [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldu.global.p">;
// Generated within nvvm. Use for ldg on sm_35 or later. Second arg is the
// pointer's alignment.
def int_nvvm_ldg_global_i : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+ [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldg.global.i">;
def int_nvvm_ldg_global_f : Intrinsic<[llvm_anyfloat_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+ [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldg.global.f">;
def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, NoCapture<0>],
+ [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldg.global.p">;
// Use for generic pointers
@@ -1143,7 +1143,7 @@ def int_nvvm_move_float : Intrinsic<[llvm_float_ty], [llvm_float_ty],
def int_nvvm_move_double : Intrinsic<[llvm_double_ty], [llvm_double_ty],
[IntrNoMem], "llvm.nvvm.move.double">;
def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty],
- [IntrNoMem, NoCapture<0>], "llvm.nvvm.move.ptr">;
+ [IntrNoMem, NoCapture<ArgIndex<0>>], "llvm.nvvm.move.ptr">;
// For getting the handle from a texture or surface variable
@@ -4110,7 +4110,7 @@ def int_nvvm_match_all_sync_i64p :
class NVVM_WMMA_LD<WMMA_REGS Frag, string Layout, int WithStride>
: Intrinsic<Frag.regs,
!if(WithStride, [llvm_anyptr_ty, llvm_i32_ty], [llvm_anyptr_ty]),
- [IntrReadMem, IntrArgMemOnly, ReadOnly<0>, NoCapture<0>],
+ [IntrReadMem, IntrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.intr>;
// WMMA.STORE.D
@@ -4120,7 +4120,7 @@ class NVVM_WMMA_ST<WMMA_REGS Frag, string Layout, int WithStride>
[llvm_anyptr_ty],
Frag.regs,
!if(WithStride, [llvm_i32_ty], [])),
- [IntrWriteMem, IntrArgMemOnly, WriteOnly<0>, NoCapture<0>],
+ [IntrWriteMem, IntrArgMemOnly, WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.intr>;
// Create all load/store variants
diff --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
index f87317445753..614a29049686 100644
--- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td
+++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -20,28 +20,32 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_dcba : Intrinsic<[], [llvm_ptr_ty], []>;
def int_ppc_dcbf : GCCBuiltin<"__builtin_dcbf">,
Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbfl : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbflp: Intrinsic<[], [llvm_ptr_ty], []>;
def int_ppc_dcbi : Intrinsic<[], [llvm_ptr_ty], []>;
def int_ppc_dcbst : Intrinsic<[], [llvm_ptr_ty], []>;
def int_ppc_dcbt : Intrinsic<[], [llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_ppc_dcbtst: Intrinsic<[], [llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<0>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
+ def int_ppc_dcbt_with_hint: Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
+ def int_ppc_dcbtst_with_hint: Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_ppc_dcbz : Intrinsic<[], [llvm_ptr_ty], []>;
def int_ppc_dcbzl : Intrinsic<[], [llvm_ptr_ty], []>;
+ // Population Count in each Byte.
+ def int_ppc_popcntb : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+
// sync instruction (i.e. sync 0, a.k.a hwsync)
def int_ppc_sync : Intrinsic<[], [], []>;
+ // isync instruction
+ def int_ppc_isync : Intrinsic<[], [], []>;
// lwsync is sync 1
def int_ppc_lwsync : Intrinsic<[], [], []>;
-
- // Intrinsics used to generate ctr-based loops. These should only be
- // generated by the PowerPC backend!
- // The branch intrinsic is marked as NoDuplicate because loop rotation will
- // attempt to duplicate it forming loops where a block reachable from one
- // instance of it can contain another.
- def int_ppc_mtctr : Intrinsic<[], [llvm_anyint_ty], []>;
- def int_ppc_is_decremented_ctr_nonzero :
- Intrinsic<[llvm_i1_ty], [], [IntrNoDuplicate]>;
+ // eieio instruction
+ def int_ppc_eieio : Intrinsic<[],[],[]>;
// Intrinsics for [double]word extended forms of divide instructions
def int_ppc_divwe : GCCBuiltin<"__builtin_divwe">,
@@ -62,6 +66,27 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
[IntrNoMem]>;
+ // Parallel Bits Deposit/Extract Doubleword Builtins.
+ def int_ppc_pdepd
+ : GCCBuiltin<"__builtin_pdepd">,
+ Intrinsic <[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_ppc_pextd
+ : GCCBuiltin<"__builtin_pextd">,
+ Intrinsic <[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
+ // Centrifuge Doubleword Builtin.
+ def int_ppc_cfuged
+ : GCCBuiltin<"__builtin_cfuged">,
+ Intrinsic <[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
+ // Count Leading / Trailing Zeroes under bit Mask Builtins.
+ def int_ppc_cntlzdm
+ : GCCBuiltin<"__builtin_cntlzdm">,
+ Intrinsic <[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_ppc_cnttzdm
+ : GCCBuiltin<"__builtin_cnttzdm">,
+ Intrinsic <[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
def int_ppc_truncf128_round_to_odd
: GCCBuiltin<"__builtin_truncf128_round_to_odd">,
Intrinsic <[llvm_double_ty], [llvm_f128_ty], [IntrNoMem]>;
@@ -404,6 +429,108 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vprtybq : GCCBuiltin<"__builtin_altivec_vprtybq">,
Intrinsic<[llvm_v1i128_ty],[llvm_v1i128_ty],[IntrNoMem]>;
+ // P10 Vector Parallel Bits Deposit/Extract Doubleword Builtins.
+ def int_ppc_altivec_vpdepd : GCCBuiltin<"__builtin_altivec_vpdepd">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vpextd : GCCBuiltin<"__builtin_altivec_vpextd">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+ // P10 Vector Centrifuge Builtin.
+ def int_ppc_altivec_vcfuged : GCCBuiltin<"__builtin_altivec_vcfuged">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+ // P10 Vector Gather Every Nth Bit Builtin.
+ def int_ppc_altivec_vgnb : GCCBuiltin<"__builtin_altivec_vgnb">,
+ Intrinsic<[llvm_i64_ty], [llvm_v1i128_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+ // P10 Vector Clear Bytes
+ def int_ppc_altivec_vclrlb : GCCBuiltin<"__builtin_altivec_vclrlb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vclrrb : GCCBuiltin<"__builtin_altivec_vclrrb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ // P10 Vector Shift Double Bit Immediate.
+ def int_ppc_altivec_vsldbi : GCCBuiltin<"__builtin_altivec_vsldbi">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+ def int_ppc_altivec_vsrdbi : GCCBuiltin<"__builtin_altivec_vsrdbi">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+ // P10 Vector Insert.
+ def int_ppc_altivec_vinsblx : GCCBuiltin<"__builtin_altivec_vinsblx">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinsbrx : GCCBuiltin<"__builtin_altivec_vinsbrx">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinshlx : GCCBuiltin<"__builtin_altivec_vinshlx">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinshrx : GCCBuiltin<"__builtin_altivec_vinshrx">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinswlx : GCCBuiltin<"__builtin_altivec_vinswlx">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinswrx : GCCBuiltin<"__builtin_altivec_vinswrx">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinsdlx : GCCBuiltin<"__builtin_altivec_vinsdlx">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinsdrx : GCCBuiltin<"__builtin_altivec_vinsdrx">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinsbvlx : GCCBuiltin<"__builtin_altivec_vinsbvlx">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i64_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinsbvrx : GCCBuiltin<"__builtin_altivec_vinsbvrx">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i64_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinshvlx : GCCBuiltin<"__builtin_altivec_vinshvlx">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_i64_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinshvrx : GCCBuiltin<"__builtin_altivec_vinshvrx">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_i64_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinswvlx : GCCBuiltin<"__builtin_altivec_vinswvlx">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i64_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vinswvrx : GCCBuiltin<"__builtin_altivec_vinswvrx">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i64_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ // P10 Vector Insert with immediate.
+ def int_ppc_altivec_vinsw :
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+ def int_ppc_altivec_vinsd :
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}
// Vector average.
@@ -472,7 +599,7 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
- // Vector Multiply Sum Intructions.
+ // Vector Multiply Sum Instructions.
def int_ppc_altivec_vmsummbm : GCCBuiltin<"__builtin_altivec_vmsummbm">,
Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
llvm_v4i32_ty], [IntrNoMem]>;
@@ -488,11 +615,14 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vmsumuhm : GCCBuiltin<"__builtin_altivec_vmsumuhm">,
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vmsumudm : GCCBuiltin<"__builtin_altivec_vmsumudm">,
+ Intrinsic<[llvm_v1i128_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v1i128_ty], [IntrNoMem]>;
def int_ppc_altivec_vmsumuhs : GCCBuiltin<"__builtin_altivec_vmsumuhs">,
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v4i32_ty], [IntrNoMem]>;
- // Vector Multiply Intructions.
+ // Vector Multiply Instructions.
def int_ppc_altivec_vmulesb : GCCBuiltin<"__builtin_altivec_vmulesb">,
Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
@@ -531,7 +661,7 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
- // Vector Sum Intructions.
+ // Vector Sum Instructions.
def int_ppc_altivec_vsumsws : GCCBuiltin<"__builtin_altivec_vsumsws">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
@@ -610,16 +740,16 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
// FP <-> integer conversion.
def int_ppc_altivec_vcfsx : GCCBuiltin<"__builtin_altivec_vcfsx">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_altivec_vcfux : GCCBuiltin<"__builtin_altivec_vcfux">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_altivec_vctsxs : GCCBuiltin<"__builtin_altivec_vctsxs">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_altivec_vctuxs : GCCBuiltin<"__builtin_altivec_vctuxs">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_altivec_vrfim : GCCBuiltin<"__builtin_altivec_vrfim">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
@@ -649,6 +779,14 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
+
+ // P10 Vector Count Leading / Trailing Zeroes under bit Mask Builtins.
+ def int_ppc_altivec_vclzdm : GCCBuiltin<"__builtin_altivec_vclzdm">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vctzdm : GCCBuiltin<"__builtin_altivec_vctzdm">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
}
def int_ppc_altivec_vsl : PowerPC_Vec_WWW_Intrinsic<"vsl">;
@@ -716,11 +854,11 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
def int_ppc_altivec_crypto_vshasigmad :
GCCBuiltin<"__builtin_altivec_crypto_vshasigmad">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
def int_ppc_altivec_crypto_vshasigmaw :
GCCBuiltin<"__builtin_altivec_crypto_vshasigmaw">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
}
def int_ppc_altivec_crypto_vcipher :
PowerPC_Vec_DDD_Intrinsic<"crypto_vcipher">;
@@ -915,10 +1053,10 @@ def int_ppc_vsx_xvxsigsp :
[llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvtstdcdp :
PowerPC_VSX_Intrinsic<"xvtstdcdp", [llvm_v2i64_ty],
- [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_vsx_xvtstdcsp :
PowerPC_VSX_Intrinsic<"xvtstdcsp", [llvm_v4i32_ty],
- [llvm_v4f32_ty,llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ [llvm_v4f32_ty,llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_vsx_xvcvhpsp :
PowerPC_VSX_Intrinsic<"xvcvhpsp", [llvm_v4f32_ty],
[llvm_v8i16_ty],[IntrNoMem]>;
@@ -929,6 +1067,46 @@ def int_ppc_vsx_xxinsertw :
PowerPC_VSX_Intrinsic<"xxinsertw",[llvm_v4i32_ty],
[llvm_v4i32_ty,llvm_v2i64_ty,llvm_i32_ty],
[IntrNoMem]>;
+def int_ppc_vsx_xvtlsbb :
+ PowerPC_VSX_Intrinsic<"xvtlsbb", [llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_i1_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxeval :
+ PowerPC_VSX_Intrinsic<"xxeval", [llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_ppc_vsx_xxgenpcvbm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvbm", [llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxgenpcvhm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvhm", [llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxgenpcvwm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvwm", [llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxgenpcvdm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvdm", [llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// P10 VSX Vector permute extended.
+def int_ppc_vsx_xxpermx :
+ GCCBuiltin<"__builtin_vsx_xxpermx">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty,llvm_v16i8_ty,llvm_v16i8_ty,llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+// P10 VSX Vector Blend Variable.
+def int_ppc_vsx_xxblendvb: GCCBuiltin<"__builtin_vsx_xxblendvb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xxblendvh: GCCBuiltin<"__builtin_vsx_xxblendvh">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xxblendvw: GCCBuiltin<"__builtin_vsx_xxblendvw">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xxblendvd: GCCBuiltin<"__builtin_vsx_xxblendvd">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
@@ -1113,9 +1291,9 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_tbegin : GCCBuiltin<"__builtin_tbegin">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<0>]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_ppc_tend : GCCBuiltin<"__builtin_tend">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<0>]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_ppc_tabort : GCCBuiltin<"__builtin_tabort">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 2039ad1a26b8..7590b568c367 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -28,11 +28,11 @@ let TargetPrefix = "riscv" in {
// T @llvm.<name>.T.<p>(any*, T, T, T imm);
class MaskedAtomicRMWFourArg<LLVMType itype>
: Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
- [IntrArgMemOnly, NoCapture<0>, ImmArg<3>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
// T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
class MaskedAtomicRMWFiveArg<LLVMType itype>
: Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
- [IntrArgMemOnly, NoCapture<0>, ImmArg<4>]>;
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
// We define 32-bit and 64-bit variants of the above, where T stands for i32
// or i64 respectively:
diff --git a/llvm/include/llvm/IR/IntrinsicsSystemZ.td b/llvm/include/llvm/IR/IntrinsicsSystemZ.td
index 40d6ba17eaf1..b0c5cf0148fe 100644
--- a/llvm/include/llvm/IR/IntrinsicsSystemZ.td
+++ b/llvm/include/llvm/IR/IntrinsicsSystemZ.td
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
class SystemZUnaryConv<string name, LLVMType result, LLVMType arg>
- : GCCBuiltin<"__builtin_s390_" ## name>,
+ : GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[result], [arg], [IntrNoMem]>;
class SystemZUnary<string name, LLVMType type>
@@ -24,14 +24,14 @@ class SystemZUnaryCC<LLVMType type>
: SystemZUnaryConvCC<type, type>;
class SystemZBinaryConv<string name, LLVMType result, LLVMType arg>
- : GCCBuiltin<"__builtin_s390_" ## name>,
+ : GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[result], [arg, arg], [IntrNoMem]>;
class SystemZBinary<string name, LLVMType type>
: SystemZBinaryConv<name, type, type>;
class SystemZBinaryInt<string name, LLVMType type>
- : GCCBuiltin<"__builtin_s390_" ## name>,
+ : GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[type], [type, llvm_i32_ty], [IntrNoMem]>;
class SystemZBinaryConvCC<LLVMType result, LLVMType arg>
@@ -39,13 +39,13 @@ class SystemZBinaryConvCC<LLVMType result, LLVMType arg>
class SystemZBinaryConvIntCC<LLVMType result, LLVMType arg>
: Intrinsic<[result, llvm_i32_ty], [arg, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
class SystemZBinaryCC<LLVMType type>
: SystemZBinaryConvCC<type, type>;
class SystemZTernaryConv<string name, LLVMType result, LLVMType arg>
- : GCCBuiltin<"__builtin_s390_" ## name>,
+ : GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[result], [arg, arg, result], [IntrNoMem]>;
class SystemZTernaryConvCC<LLVMType result, LLVMType arg>
@@ -55,42 +55,42 @@ class SystemZTernary<string name, LLVMType type>
: SystemZTernaryConv<name, type, type>;
class SystemZTernaryInt<string name, LLVMType type>
- : GCCBuiltin<"__builtin_s390_" ## name>,
- Intrinsic<[type], [type, type, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ : GCCBuiltin<"__builtin_s390_" # name>,
+ Intrinsic<[type], [type, type, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
class SystemZTernaryIntCC<LLVMType type>
: Intrinsic<[type, llvm_i32_ty], [type, type, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
class SystemZQuaternaryInt<string name, LLVMType type>
- : GCCBuiltin<"__builtin_s390_" ## name>,
+ : GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[type], [type, type, type, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
class SystemZQuaternaryIntCC<LLVMType type>
: Intrinsic<[type, llvm_i32_ty], [type, type, type, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
multiclass SystemZUnaryExtBHF<string name> {
- def b : SystemZUnaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
- def h : SystemZUnaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
- def f : SystemZUnaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+ def b : SystemZUnaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def h : SystemZUnaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZUnaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZUnaryExtBHWF<string name> {
- def b : SystemZUnaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
- def hw : SystemZUnaryConv<name##"hw", llvm_v4i32_ty, llvm_v8i16_ty>;
- def f : SystemZUnaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+ def b : SystemZUnaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def hw : SystemZUnaryConv<name#"hw", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZUnaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZUnaryBHF<string name> {
- def b : SystemZUnary<name##"b", llvm_v16i8_ty>;
- def h : SystemZUnary<name##"h", llvm_v8i16_ty>;
- def f : SystemZUnary<name##"f", llvm_v4i32_ty>;
+ def b : SystemZUnary<name#"b", llvm_v16i8_ty>;
+ def h : SystemZUnary<name#"h", llvm_v8i16_ty>;
+ def f : SystemZUnary<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZUnaryBHFG<string name> : SystemZUnaryBHF<name> {
- def g : SystemZUnary<name##"g", llvm_v2i64_ty>;
+ def g : SystemZUnary<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZUnaryCCBHF {
@@ -100,9 +100,9 @@ multiclass SystemZUnaryCCBHF {
}
multiclass SystemZBinaryTruncHFG<string name> {
- def h : SystemZBinaryConv<name##"h", llvm_v16i8_ty, llvm_v8i16_ty>;
- def f : SystemZBinaryConv<name##"f", llvm_v8i16_ty, llvm_v4i32_ty>;
- def g : SystemZBinaryConv<name##"g", llvm_v4i32_ty, llvm_v2i64_ty>;
+ def h : SystemZBinaryConv<name#"h", llvm_v16i8_ty, llvm_v8i16_ty>;
+ def f : SystemZBinaryConv<name#"f", llvm_v8i16_ty, llvm_v4i32_ty>;
+ def g : SystemZBinaryConv<name#"g", llvm_v4i32_ty, llvm_v2i64_ty>;
}
multiclass SystemZBinaryTruncCCHFG {
@@ -112,30 +112,30 @@ multiclass SystemZBinaryTruncCCHFG {
}
multiclass SystemZBinaryExtBHF<string name> {
- def b : SystemZBinaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
- def h : SystemZBinaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
- def f : SystemZBinaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+ def b : SystemZBinaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def h : SystemZBinaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZBinaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZBinaryExtBHFG<string name> : SystemZBinaryExtBHF<name> {
- def g : SystemZBinaryConv<name##"g", llvm_v16i8_ty, llvm_v2i64_ty>;
+ def g : SystemZBinaryConv<name#"g", llvm_v16i8_ty, llvm_v2i64_ty>;
}
multiclass SystemZBinaryBHF<string name> {
- def b : SystemZBinary<name##"b", llvm_v16i8_ty>;
- def h : SystemZBinary<name##"h", llvm_v8i16_ty>;
- def f : SystemZBinary<name##"f", llvm_v4i32_ty>;
+ def b : SystemZBinary<name#"b", llvm_v16i8_ty>;
+ def h : SystemZBinary<name#"h", llvm_v8i16_ty>;
+ def f : SystemZBinary<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZBinaryBHFG<string name> : SystemZBinaryBHF<name> {
- def g : SystemZBinary<name##"g", llvm_v2i64_ty>;
+ def g : SystemZBinary<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZBinaryIntBHFG<string name> {
- def b : SystemZBinaryInt<name##"b", llvm_v16i8_ty>;
- def h : SystemZBinaryInt<name##"h", llvm_v8i16_ty>;
- def f : SystemZBinaryInt<name##"f", llvm_v4i32_ty>;
- def g : SystemZBinaryInt<name##"g", llvm_v2i64_ty>;
+ def b : SystemZBinaryInt<name#"b", llvm_v16i8_ty>;
+ def h : SystemZBinaryInt<name#"h", llvm_v8i16_ty>;
+ def f : SystemZBinaryInt<name#"f", llvm_v4i32_ty>;
+ def g : SystemZBinaryInt<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZBinaryCCBHF {
@@ -152,25 +152,25 @@ multiclass SystemZCompareBHFG<string name> {
}
multiclass SystemZTernaryExtBHF<string name> {
- def b : SystemZTernaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
- def h : SystemZTernaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
- def f : SystemZTernaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+ def b : SystemZTernaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def h : SystemZTernaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZTernaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZTernaryExtBHFG<string name> : SystemZTernaryExtBHF<name> {
- def g : SystemZTernaryConv<name##"g", llvm_v16i8_ty, llvm_v2i64_ty>;
+ def g : SystemZTernaryConv<name#"g", llvm_v16i8_ty, llvm_v2i64_ty>;
}
multiclass SystemZTernaryBHF<string name> {
- def b : SystemZTernary<name##"b", llvm_v16i8_ty>;
- def h : SystemZTernary<name##"h", llvm_v8i16_ty>;
- def f : SystemZTernary<name##"f", llvm_v4i32_ty>;
+ def b : SystemZTernary<name#"b", llvm_v16i8_ty>;
+ def h : SystemZTernary<name#"h", llvm_v8i16_ty>;
+ def f : SystemZTernary<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZTernaryIntBHF<string name> {
- def b : SystemZTernaryInt<name##"b", llvm_v16i8_ty>;
- def h : SystemZTernaryInt<name##"h", llvm_v8i16_ty>;
- def f : SystemZTernaryInt<name##"f", llvm_v4i32_ty>;
+ def b : SystemZTernaryInt<name#"b", llvm_v16i8_ty>;
+ def h : SystemZTernaryInt<name#"h", llvm_v8i16_ty>;
+ def f : SystemZTernaryInt<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZTernaryIntCCBHF {
@@ -180,14 +180,14 @@ multiclass SystemZTernaryIntCCBHF {
}
multiclass SystemZQuaternaryIntBHF<string name> {
- def b : SystemZQuaternaryInt<name##"b", llvm_v16i8_ty>;
- def h : SystemZQuaternaryInt<name##"h", llvm_v8i16_ty>;
- def f : SystemZQuaternaryInt<name##"f", llvm_v4i32_ty>;
+ def b : SystemZQuaternaryInt<name#"b", llvm_v16i8_ty>;
+ def h : SystemZQuaternaryInt<name#"h", llvm_v8i16_ty>;
+ def f : SystemZQuaternaryInt<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZQuaternaryIntBHFG<string name> :
SystemZQuaternaryIntBHF<name> {
- def g : SystemZQuaternaryInt<name##"g", llvm_v2i64_ty>;
+ def g : SystemZQuaternaryInt<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZQuaternaryIntCCBHF {
@@ -238,11 +238,11 @@ let TargetPrefix = "s390" in {
let TargetPrefix = "s390" in {
def int_s390_lcbb : GCCBuiltin<"__builtin_s390_lcbb">,
Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_s390_vlbb : GCCBuiltin<"__builtin_s390_vlbb">,
Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, ImmArg<1>]>;
+ [IntrReadMem, IntrArgMemOnly, ImmArg<ArgIndex<1>>]>;
def int_s390_vll : GCCBuiltin<"__builtin_s390_vll">,
Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
@@ -251,7 +251,7 @@ let TargetPrefix = "s390" in {
def int_s390_vpdi : GCCBuiltin<"__builtin_s390_vpdi">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vperm : GCCBuiltin<"__builtin_s390_vperm">,
Intrinsic<[llvm_v16i8_ty],
@@ -317,7 +317,7 @@ let TargetPrefix = "s390" in {
def int_s390_vsldb : GCCBuiltin<"__builtin_s390_vsldb">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
defm int_s390_vscbi : SystemZBinaryBHFG<"vscbi">;
@@ -376,7 +376,7 @@ let TargetPrefix = "s390" in {
def int_s390_vfidb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
// Instructions from the Vector Enhancements Facility 1
def int_s390_vbperm : SystemZBinaryConv<"vbperm", llvm_v2i64_ty,
@@ -385,20 +385,20 @@ let TargetPrefix = "s390" in {
def int_s390_vmslg : GCCBuiltin<"__builtin_s390_vmslg">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_v16i8_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_s390_vfmaxdb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfmindb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfmaxsb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfminsb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfcesbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vfchsbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
@@ -408,7 +408,7 @@ let TargetPrefix = "s390" in {
def int_s390_vfisb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
// Instructions from the Vector Packed Decimal Facility
def int_s390_vlrl : GCCBuiltin<"__builtin_s390_vlrl">,
@@ -423,12 +423,12 @@ let TargetPrefix = "s390" in {
def int_s390_vsld : GCCBuiltin<"__builtin_s390_vsld">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vsrd : GCCBuiltin<"__builtin_s390_vsrd">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vstrsb : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v16i8_ty>;
def int_s390_vstrsh : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
diff --git a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
index e97700ad724a..7c9ceb148a47 100644
--- a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -51,7 +51,7 @@ def int_wasm_trunc_saturate_unsigned : Intrinsic<[llvm_anyint_ty],
// throw / rethrow
def int_wasm_throw : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty],
- [Throws, IntrNoReturn, ImmArg<0>]>;
+ [Throws, IntrNoReturn, ImmArg<ArgIndex<0>>]>;
def int_wasm_rethrow_in_catch : Intrinsic<[], [], [Throws, IntrNoReturn]>;
// Since wasm does not use landingpad instructions, these instructions return
@@ -69,7 +69,7 @@ def int_wasm_extract_exception : Intrinsic<[llvm_ptr_ty], [],
// by WasmEHPrepare pass to generate landingpad table in EHStreamer. This is
// used in order to give them the indices in WasmEHPrepare.
def int_wasm_landingpad_index: Intrinsic<[], [llvm_token_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
// Returns LSDA address of the current function.
def int_wasm_lsda : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
@@ -82,18 +82,18 @@ def int_wasm_lsda : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_wasm_atomic_wait_i32 :
Intrinsic<[llvm_i32_ty],
[LLVMPointerType<llvm_i32_ty>, llvm_i32_ty, llvm_i64_ty],
- [IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>,
+ [IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_atomic_wait_i64 :
Intrinsic<[llvm_i32_ty],
[LLVMPointerType<llvm_i64_ty>, llvm_i64_ty, llvm_i64_ty],
- [IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>,
+ [IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_atomic_notify:
Intrinsic<[llvm_i32_ty], [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty],
- [IntrInaccessibleMemOnly, NoCapture<0>, IntrHasSideEffects], "",
+ [IntrInaccessibleMemOnly, NoCapture<ArgIndex<0>>, IntrHasSideEffects], "",
[SDNPMemOperand]>;
//===----------------------------------------------------------------------===//
@@ -104,6 +104,13 @@ def int_wasm_swizzle :
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem, IntrSpeculatable]>;
+def int_wasm_shuffle :
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]>;
def int_wasm_sub_saturate_signed :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
@@ -116,7 +123,6 @@ def int_wasm_avgr_unsigned :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
-
def int_wasm_bitselect :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
@@ -129,6 +135,10 @@ def int_wasm_alltrue :
Intrinsic<[llvm_i32_ty],
[llvm_anyvector_ty],
[IntrNoMem, IntrSpeculatable]>;
+def int_wasm_bitmask :
+ Intrinsic<[llvm_i32_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem, IntrSpeculatable]>;
def int_wasm_qfma :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
@@ -166,20 +176,35 @@ def int_wasm_widen_high_unsigned :
[llvm_anyvector_ty],
[IntrNoMem, IntrSpeculatable]>;
+// TODO: Replace these intrinsics with normal ISel patterns
+def int_wasm_pmin :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_pmax :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
-//===----------------------------------------------------------------------===//
-// Bulk memory intrinsics
-//===----------------------------------------------------------------------===//
-
-def int_wasm_memory_init :
- Intrinsic<[],
- [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrWriteMem, IntrInaccessibleMemOrArgMemOnly, WriteOnly<2>,
- IntrHasSideEffects, ImmArg<0>, ImmArg<1>]>;
-def int_wasm_data_drop :
- Intrinsic<[],
- [llvm_i32_ty],
- [IntrNoDuplicate, IntrHasSideEffects, ImmArg<0>]>;
+// TODO: Replace these instrinsics with normal ISel patterns once the
+// rounding instructions are merged to the proposal
+// (https://github.com/WebAssembly/simd/pull/232).
+def int_wasm_ceil :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_floor :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_trunc :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_nearest :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
//===----------------------------------------------------------------------===//
// Thread-local storage intrinsics
diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td
index 5796686dd79f..3f86fd075d3a 100644
--- a/llvm/include/llvm/IR/IntrinsicsX86.td
+++ b/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
// Interrupt traps
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_int : Intrinsic<[], [llvm_i8_ty], [ImmArg<0>]>;
+ def int_x86_int : Intrinsic<[], [llvm_i8_ty], [ImmArg<ArgIndex<0>>]>;
}
//===----------------------------------------------------------------------===//
@@ -203,12 +203,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_cmp_ss : GCCBuiltin<"__builtin_ia32_cmpss">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
// NOTE: This comparison intrinsic is not used by clang as long as the
// distinction in signaling behaviour is not implemented.
def int_x86_sse_cmp_ps :
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse_comieq_ss : GCCBuiltin<"__builtin_ia32_comieq">,
Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
llvm_v4f32_ty], [IntrNoMem]>;
@@ -284,7 +284,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_ldmxcsr :
Intrinsic<[], [llvm_ptr_ty],
[IntrReadMem, IntrArgMemOnly, IntrHasSideEffects,
- // FIXME: LDMXCSR does not actualy write to memory,
+ // FIXME: LDMXCSR does not actually write to memory,
// but Fast and DAG Isel both use writing to memory
// as a proxy for having side effects.
IntrWriteMem]>;
@@ -319,12 +319,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_cmp_sd : GCCBuiltin<"__builtin_ia32_cmpsd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
// NOTE: This comparison intrinsic is not used by clang as long as the
// distinction in signaling behaviour is not implemented.
def int_x86_sse2_cmp_pd :
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse2_comieq_sd : GCCBuiltin<"__builtin_ia32_comisdeq">,
Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
llvm_v2f64_ty], [IntrNoMem]>;
@@ -618,7 +618,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_sse_pshuf_w : GCCBuiltin<"__builtin_ia32_pshufw">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
// Sign ops
@@ -664,16 +664,16 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_round_ss : GCCBuiltin<"__builtin_ia32_roundss">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse41_round_ps : GCCBuiltin<"__builtin_ia32_roundps">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_sse41_round_sd : GCCBuiltin<"__builtin_ia32_roundsd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse41_round_pd : GCCBuiltin<"__builtin_ia32_roundpd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
// Vector min element
@@ -736,20 +736,20 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_aesni_aeskeygenassist :
GCCBuiltin<"__builtin_ia32_aeskeygenassist128">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
// PCLMUL instructions
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_pclmulqdq : GCCBuiltin<"__builtin_ia32_pclmulqdq128">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_pclmulqdq_256 : GCCBuiltin<"__builtin_ia32_pclmulqdq256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_pclmulqdq_512 : GCCBuiltin<"__builtin_ia32_pclmulqdq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}
// Vector pack
@@ -763,7 +763,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_insertps : GCCBuiltin<"__builtin_ia32_insertps128">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}
// Vector blend
@@ -783,17 +783,17 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_dppd : GCCBuiltin<"__builtin_ia32_dppd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem, Commutative, ImmArg<2>]>;
+ [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
def int_x86_sse41_dpps : GCCBuiltin<"__builtin_ia32_dpps">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem, Commutative, ImmArg<2>]>;
+ [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
}
// Vector sum of absolute differences
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_i8_ty],
- [IntrNoMem, Commutative, ImmArg<2>]>;
+ [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
}
// Test instruction with bitwise comparison.
@@ -834,66 +834,66 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse42_pcmpistrm128 : GCCBuiltin<"__builtin_ia32_pcmpistrm128">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse42_pcmpistri128 : GCCBuiltin<"__builtin_ia32_pcmpistri128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse42_pcmpistria128 : GCCBuiltin<"__builtin_ia32_pcmpistria128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse42_pcmpistric128 : GCCBuiltin<"__builtin_ia32_pcmpistric128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse42_pcmpistrio128 : GCCBuiltin<"__builtin_ia32_pcmpistrio128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse42_pcmpistris128 : GCCBuiltin<"__builtin_ia32_pcmpistris128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse42_pcmpistriz128 : GCCBuiltin<"__builtin_ia32_pcmpistriz128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sse42_pcmpestrm128 : GCCBuiltin<"__builtin_ia32_pcmpestrm128">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
llvm_i8_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_sse42_pcmpestri128 : GCCBuiltin<"__builtin_ia32_pcmpestri128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
llvm_i8_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_sse42_pcmpestria128 : GCCBuiltin<"__builtin_ia32_pcmpestria128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
llvm_i8_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_sse42_pcmpestric128 : GCCBuiltin<"__builtin_ia32_pcmpestric128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
llvm_i8_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_sse42_pcmpestrio128 : GCCBuiltin<"__builtin_ia32_pcmpestrio128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
llvm_i8_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_sse42_pcmpestris128 : GCCBuiltin<"__builtin_ia32_pcmpestris128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
llvm_i8_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_sse42_pcmpestriz128 : GCCBuiltin<"__builtin_ia32_pcmpestriz128">,
Intrinsic<[llvm_i32_ty],
[llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
llvm_i8_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
}
//===----------------------------------------------------------------------===//
@@ -902,14 +902,14 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse4a_extrqi : GCCBuiltin<"__builtin_ia32_extrqi">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
def int_x86_sse4a_extrq : GCCBuiltin<"__builtin_ia32_extrq">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_sse4a_insertqi : GCCBuiltin<"__builtin_ia32_insertqi">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
llvm_i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
def int_x86_sse4a_insertq : GCCBuiltin<"__builtin_ia32_insertq">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
}
@@ -946,10 +946,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_round_pd_256 : GCCBuiltin<"__builtin_ia32_roundpd256">,
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx_round_ps_256 : GCCBuiltin<"__builtin_ia32_roundps256">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<1>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
// Horizontal ops
@@ -1101,33 +1101,33 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v16qi">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_vgf2p8affineinvqb_256 :
GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v32qi">,
Intrinsic<[llvm_v32i8_ty],
[llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_vgf2p8affineinvqb_512 :
GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v64qi">,
Intrinsic<[llvm_v64i8_ty],
[llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_vgf2p8affineqb_128 :
GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v16qi">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_vgf2p8affineqb_256 :
GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v32qi">,
Intrinsic<[llvm_v32i8_ty],
[llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_vgf2p8affineqb_512 :
GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v64qi">,
Intrinsic<[llvm_v64i8_ty],
[llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_vgf2p8mulb_128 :
GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v16qi">,
@@ -1161,17 +1161,17 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_dp_ps_256 : GCCBuiltin<"__builtin_ia32_dpps256">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem, Commutative, ImmArg<2>]>;
+ [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
}
// Vector compare
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_cmp_pd_256 :
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx_cmp_ps_256 :
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}
// Vector convert
@@ -1238,30 +1238,30 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_fpclass_pd_128 :
Intrinsic<[llvm_v2i1_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_fpclass_pd_256 :
Intrinsic<[llvm_v4i1_ty], [llvm_v4f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_fpclass_pd_512 :
Intrinsic<[llvm_v8i1_ty], [llvm_v8f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_fpclass_ps_128 :
Intrinsic<[llvm_v4i1_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_fpclass_ps_256 :
Intrinsic<[llvm_v8i1_ty], [llvm_v8f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_fpclass_ps_512 :
Intrinsic<[llvm_v16i1_ty], [llvm_v16f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_fpclass_sd :
GCCBuiltin<"__builtin_ia32_fpclasssd_mask">,
Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_fpclass_ss :
GCCBuiltin<"__builtin_ia32_fpclassss_mask">,
Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
// Vector extract sign mask
@@ -1275,9 +1275,9 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Vector zero
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_vzeroall : GCCBuiltin<"__builtin_ia32_vzeroall">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
def int_x86_avx_vzeroupper : GCCBuiltin<"__builtin_ia32_vzeroupper">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
}
// SIMD load ops
@@ -1707,68 +1707,68 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_gather_d_pd : GCCBuiltin<"__builtin_ia32_gatherd_pd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_d_pd_256 : GCCBuiltin<"__builtin_ia32_gatherd_pd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_pd : GCCBuiltin<"__builtin_ia32_gatherq_pd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_pd_256 : GCCBuiltin<"__builtin_ia32_gatherq_pd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_d_ps : GCCBuiltin<"__builtin_ia32_gatherd_ps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_d_ps_256 : GCCBuiltin<"__builtin_ia32_gatherd_ps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_ps : GCCBuiltin<"__builtin_ia32_gatherq_ps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_ps_256 : GCCBuiltin<"__builtin_ia32_gatherq_ps256">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_d_q : GCCBuiltin<"__builtin_ia32_gatherd_q">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_d_q_256 : GCCBuiltin<"__builtin_ia32_gatherd_q256">,
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_q : GCCBuiltin<"__builtin_ia32_gatherq_q">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_q_256 : GCCBuiltin<"__builtin_ia32_gatherq_q256">,
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_d_d : GCCBuiltin<"__builtin_ia32_gatherd_d">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_d_d_256 : GCCBuiltin<"__builtin_ia32_gatherd_d256">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_d : GCCBuiltin<"__builtin_ia32_gatherq_d">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx2_gather_q_d_256 : GCCBuiltin<"__builtin_ia32_gatherq_d256">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
}
// Misc.
@@ -1780,42 +1780,60 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v32i8_ty], [IntrNoMem]>;
def int_x86_avx2_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw256">,
Intrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_i8_ty], [IntrNoMem, Commutative, ImmArg<2>]>;
+ llvm_i8_ty], [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
}
//===----------------------------------------------------------------------===//
// FMA3 and FMA4
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_fma_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmaddsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmaddsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+
def int_x86_avx512_vfmadd_pd_512 :
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_vfmadd_ps_512 :
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
- // TODO: Can we use 2 vfmadds+shufflevector?
def int_x86_avx512_vfmaddsub_pd_512 :
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_vfmaddsub_ps_512 :
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_vfmadd_f64 :
Intrinsic<[llvm_double_ty],
[llvm_double_ty, llvm_double_ty, llvm_double_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_vfmadd_f32 :
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_vpmadd52h_uq_128 :
GCCBuiltin<"__builtin_ia32_vpmadd52huq128">,
@@ -1905,23 +1923,23 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_xop_vpermil2pd : GCCBuiltin<"__builtin_ia32_vpermil2pd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_v2i64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_xop_vpermil2pd_256 :
GCCBuiltin<"__builtin_ia32_vpermil2pd256">,
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
llvm_v4i64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_xop_vpermil2ps : GCCBuiltin<"__builtin_ia32_vpermil2ps">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
llvm_v4i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_xop_vpermil2ps_256 :
GCCBuiltin<"__builtin_ia32_vpermil2ps256">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
llvm_v8i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_xop_vfrcz_pd : GCCBuiltin<"__builtin_ia32_vfrczpd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
@@ -2092,19 +2110,19 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_lwpins32 :
GCCBuiltin<"__builtin_ia32_lwpins32">,
Intrinsic<[llvm_i8_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [ImmArg<2>]>;
+ [ImmArg<ArgIndex<2>>]>;
def int_x86_lwpins64 :
GCCBuiltin<"__builtin_ia32_lwpins64">,
Intrinsic<[llvm_i8_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
- [ImmArg<2>]>;
+ [ImmArg<ArgIndex<2>>]>;
def int_x86_lwpval32 :
GCCBuiltin<"__builtin_ia32_lwpval32">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [ImmArg<2>]>;
+ [ImmArg<ArgIndex<2>>]>;
def int_x86_lwpval64 :
GCCBuiltin<"__builtin_ia32_lwpval64">,
Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
- [ImmArg<2>]>;
+ [ImmArg<ArgIndex<2>>]>;
}
//===----------------------------------------------------------------------===//
@@ -2405,15 +2423,15 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
- llvm_x86mmx_ty, llvm_i8_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_x86mmx_ty, llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_mmx_pextr_w : GCCBuiltin<"__builtin_ia32_vec_ext_v4hi">,
Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_mmx_pinsr_w : GCCBuiltin<"__builtin_ia32_vec_set_v4hi">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
- llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}
//===----------------------------------------------------------------------===//
@@ -2528,38 +2546,28 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Half float conversion
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_vcvtph2ps_128 : GCCBuiltin<"__builtin_ia32_vcvtph2ps">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
- def int_x86_vcvtph2ps_256 : GCCBuiltin<"__builtin_ia32_vcvtph2ps256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_vcvtps2ph_128 : GCCBuiltin<"__builtin_ia32_vcvtps2ph">,
Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
- def int_x86_avx512_mask_vcvtph2ps_512 : GCCBuiltin<"__builtin_ia32_vcvtph2ps512_mask">,
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+ def int_x86_avx512_mask_vcvtph2ps_512 :
Intrinsic<[llvm_v16f32_ty], [llvm_v16i16_ty, llvm_v16f32_ty,
llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
- def int_x86_avx512_mask_vcvtph2ps_256 : GCCBuiltin<"__builtin_ia32_vcvtph2ps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8i16_ty, llvm_v8f32_ty,
- llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vcvtph2ps_128 : GCCBuiltin<"__builtin_ia32_vcvtph2ps_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty, llvm_v4f32_ty,
- llvm_i8_ty], [IntrNoMem]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_vcvtps2ph_512 : GCCBuiltin<"__builtin_ia32_vcvtps2ph512_mask">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16f32_ty, llvm_i32_ty,
llvm_v16i16_ty, llvm_i16_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256_mask">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty,
llvm_v8i16_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_vcvtps2ph_128 : GCCBuiltin<"__builtin_ia32_vcvtps2ph_mask">,
Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty,
llvm_v8i16_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
//===----------------------------------------------------------------------===//
@@ -2568,10 +2576,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_tbm_bextri_u32 : GCCBuiltin<"__builtin_ia32_bextri_u32">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_tbm_bextri_u64 : GCCBuiltin<"__builtin_ia32_bextri_u64">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
//===----------------------------------------------------------------------===//
@@ -2617,7 +2625,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_xend : GCCBuiltin<"__builtin_ia32_xend">,
Intrinsic<[], [], []>;
def int_x86_xabort : GCCBuiltin<"__builtin_ia32_xabort">,
- Intrinsic<[], [llvm_i8_ty], [ImmArg<0>]>;
+ Intrinsic<[], [llvm_i8_ty], [ImmArg<ArgIndex<0>>]>;
def int_x86_xtest : GCCBuiltin<"__builtin_ia32_xtest">,
Intrinsic<[llvm_i32_ty], [], []>;
}
@@ -2659,70 +2667,70 @@ let TargetPrefix = "x86" in {
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_cvttss2si : GCCBuiltin<"__builtin_ia32_vcvttss2si32">,
Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvttss2si64 : GCCBuiltin<"__builtin_ia32_vcvttss2si64">,
Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvttss2usi : GCCBuiltin<"__builtin_ia32_vcvttss2usi32">,
Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvttss2usi64 : GCCBuiltin<"__builtin_ia32_vcvttss2usi64">,
Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvtusi2ss : GCCBuiltin<"__builtin_ia32_cvtusi2ss32">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_cvtusi642ss : GCCBuiltin<"__builtin_ia32_cvtusi2ss64">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_cvttsd2si : GCCBuiltin<"__builtin_ia32_vcvttsd2si32">,
Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvttsd2si64 : GCCBuiltin<"__builtin_ia32_vcvttsd2si64">,
Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvttsd2usi : GCCBuiltin<"__builtin_ia32_vcvttsd2usi32">,
Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvttsd2usi64 : GCCBuiltin<"__builtin_ia32_vcvttsd2usi64">,
Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvtusi642sd : GCCBuiltin<"__builtin_ia32_cvtusi2sd64">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_vcvtss2usi32 : GCCBuiltin<"__builtin_ia32_vcvtss2usi32">,
Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_vcvtss2usi64 : GCCBuiltin<"__builtin_ia32_vcvtss2usi64">,
Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_vcvtss2si32 : GCCBuiltin<"__builtin_ia32_vcvtss2si32">,
Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_vcvtss2si64 : GCCBuiltin<"__builtin_ia32_vcvtss2si64">,
Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_vcvtsd2usi32 : GCCBuiltin<"__builtin_ia32_vcvtsd2usi32">,
Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_vcvtsd2usi64 : GCCBuiltin<"__builtin_ia32_vcvtsd2usi64">,
Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_vcvtsd2si32 : GCCBuiltin<"__builtin_ia32_vcvtsd2si32">,
Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_vcvtsd2si64 : GCCBuiltin<"__builtin_ia32_vcvtsd2si64">,
Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_cvtsi2ss32 : GCCBuiltin<"__builtin_ia32_cvtsi2ss32">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_cvtsi2ss64 : GCCBuiltin<"__builtin_ia32_cvtsi2ss64">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_cvtsi2sd64 : GCCBuiltin<"__builtin_ia32_cvtsi2sd64">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}
// Pack ops.
@@ -2745,11 +2753,11 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_sitofp_round :
Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_uitofp_round :
Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_cvtpd2dq_128 :
GCCBuiltin<"__builtin_ia32_cvtpd2dq128_mask">,
@@ -2761,25 +2769,25 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtpd2dq512_mask">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtpd2ps_512 :
GCCBuiltin<"__builtin_ia32_cvtpd2ps512_mask">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f64_ty, llvm_v8f32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtsd2ss_round :
GCCBuiltin<"__builtin_ia32_cvtsd2ss_round_mask">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v2f64_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_cvtss2sd_round :
GCCBuiltin<"__builtin_ia32_cvtss2sd_round_mask">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v4f32_ty, llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_cvtpd2ps :
GCCBuiltin<"__builtin_ia32_cvtpd2ps_mask">,
@@ -2803,7 +2811,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtpd2qq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtpd2udq_128 :
GCCBuiltin<"__builtin_ia32_cvtpd2udq128_mask">,
@@ -2821,7 +2829,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtpd2udq512_mask">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtpd2uqq_128 :
GCCBuiltin<"__builtin_ia32_cvtpd2uqq128_mask">,
@@ -2839,7 +2847,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtpd2uqq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtps2dq_128 :
GCCBuiltin<"__builtin_ia32_cvtps2dq128_mask">,
@@ -2857,13 +2865,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtps2dq512_mask">,
Intrinsic<[llvm_v16i32_ty],
[llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtps2pd_512 :
GCCBuiltin<"__builtin_ia32_cvtps2pd512_mask">,
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f32_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtps2qq_128 :
GCCBuiltin<"__builtin_ia32_cvtps2qq128_mask">,
@@ -2881,7 +2889,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtps2qq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtps2udq_128 :
GCCBuiltin<"__builtin_ia32_cvtps2udq128_mask">,
@@ -2899,7 +2907,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtps2udq512_mask">,
Intrinsic<[llvm_v16i32_ty],
[llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtps2uqq_128 :
GCCBuiltin<"__builtin_ia32_cvtps2uqq128_mask">,
@@ -2917,7 +2925,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvtps2uqq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtqq2ps_128 :
GCCBuiltin<"__builtin_ia32_cvtqq2ps128_mask">,
@@ -2935,7 +2943,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvttpd2dq512_mask">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvttpd2qq_128 :
GCCBuiltin<"__builtin_ia32_cvttpd2qq128_mask">,
@@ -2953,7 +2961,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvttpd2qq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvttpd2udq_128 :
GCCBuiltin<"__builtin_ia32_cvttpd2udq128_mask">,
@@ -2971,7 +2979,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvttpd2udq512_mask">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvttpd2uqq_128 :
GCCBuiltin<"__builtin_ia32_cvttpd2uqq128_mask">,
@@ -2989,13 +2997,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvttpd2uqq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvttps2dq_512 :
GCCBuiltin<"__builtin_ia32_cvttps2dq512_mask">,
Intrinsic<[llvm_v16i32_ty],
[llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvttps2qq_128 :
GCCBuiltin<"__builtin_ia32_cvttps2qq128_mask">,
@@ -3013,7 +3021,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvttps2qq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvttps2udq_128 :
GCCBuiltin<"__builtin_ia32_cvttps2udq128_mask">,
@@ -3031,7 +3039,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvttps2udq512_mask">,
Intrinsic<[llvm_v16i32_ty],
[llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvttps2uqq_128 :
GCCBuiltin<"__builtin_ia32_cvttps2uqq128_mask">,
@@ -3049,7 +3057,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_cvttps2uqq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_cvtuqq2ps_128 :
GCCBuiltin<"__builtin_ia32_cvtuqq2ps128_mask">,
@@ -3060,75 +3068,75 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_mask_rndscale_pd_128 : GCCBuiltin<"__builtin_ia32_rndscalepd_128_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_i32_ty,
llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_rndscale_pd_256 : GCCBuiltin<"__builtin_ia32_rndscalepd_256_mask">,
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_rndscale_pd_512 : GCCBuiltin<"__builtin_ia32_rndscalepd_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_rndscale_ps_128 : GCCBuiltin<"__builtin_ia32_rndscaleps_128_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty,
llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_rndscale_ps_256 : GCCBuiltin<"__builtin_ia32_rndscaleps_256_mask">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_rndscale_ps_512 : GCCBuiltin<"__builtin_ia32_rndscaleps_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_reduce_pd_128 : GCCBuiltin<"__builtin_ia32_reducepd128_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_i32_ty,
llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_reduce_pd_256 : GCCBuiltin<"__builtin_ia32_reducepd256_mask">,
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_reduce_pd_512 : GCCBuiltin<"__builtin_ia32_reducepd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_reduce_ps_128 : GCCBuiltin<"__builtin_ia32_reduceps128_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty,
llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_reduce_ps_256 : GCCBuiltin<"__builtin_ia32_reduceps256_mask">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_reduce_ps_512 : GCCBuiltin<"__builtin_ia32_reduceps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_range_pd_128 : GCCBuiltin<"__builtin_ia32_rangepd128_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty,
llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_pd_256 : GCCBuiltin<"__builtin_ia32_rangepd256_mask">,
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty,
llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_pd_512 : GCCBuiltin<"__builtin_ia32_rangepd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty,
llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_range_ps_128 : GCCBuiltin<"__builtin_ia32_rangeps128_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty,
llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_ps_256 : GCCBuiltin<"__builtin_ia32_rangeps256_mask">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty,
llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_ps_512 : GCCBuiltin<"__builtin_ia32_rangeps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty,
llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
}
// Vector load with broadcast
@@ -3158,111 +3166,111 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_add_ps_512 : GCCBuiltin<"__builtin_ia32_addps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_add_pd_512 : GCCBuiltin<"__builtin_ia32_addpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_sub_ps_512 : GCCBuiltin<"__builtin_ia32_subps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_sub_pd_512 : GCCBuiltin<"__builtin_ia32_subpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mul_ps_512 : GCCBuiltin<"__builtin_ia32_mulps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mul_pd_512 : GCCBuiltin<"__builtin_ia32_mulpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_div_ps_512 : GCCBuiltin<"__builtin_ia32_divps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_div_pd_512 : GCCBuiltin<"__builtin_ia32_divpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_max_ps_512 : GCCBuiltin<"__builtin_ia32_maxps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_max_pd_512 : GCCBuiltin<"__builtin_ia32_maxpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_min_ps_512 : GCCBuiltin<"__builtin_ia32_minps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_min_pd_512 : GCCBuiltin<"__builtin_ia32_minpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_add_ss_round : GCCBuiltin<"__builtin_ia32_addss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_div_ss_round : GCCBuiltin<"__builtin_ia32_divss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_mul_ss_round : GCCBuiltin<"__builtin_ia32_mulss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_sub_ss_round : GCCBuiltin<"__builtin_ia32_subss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_max_ss_round : GCCBuiltin<"__builtin_ia32_maxss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_min_ss_round : GCCBuiltin<"__builtin_ia32_minss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_add_sd_round : GCCBuiltin<"__builtin_ia32_addsd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_div_sd_round : GCCBuiltin<"__builtin_ia32_divsd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_mul_sd_round : GCCBuiltin<"__builtin_ia32_mulsd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_sub_sd_round : GCCBuiltin<"__builtin_ia32_subsd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_max_sd_round : GCCBuiltin<"__builtin_ia32_maxsd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_min_sd_round : GCCBuiltin<"__builtin_ia32_minsd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<4>]>;
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_rndscale_ss : GCCBuiltin<"__builtin_ia32_rndscaless_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_rndscale_sd : GCCBuiltin<"__builtin_ia32_rndscalesd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_range_ss : GCCBuiltin<"__builtin_ia32_rangess128_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_range_sd : GCCBuiltin<"__builtin_ia32_rangesd128_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_reduce_ss : GCCBuiltin<"__builtin_ia32_reducess_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_reduce_sd : GCCBuiltin<"__builtin_ia32_reducesd_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>, ImmArg<5>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_scalef_sd : GCCBuiltin<"__builtin_ia32_scalefsd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scalef_ss : GCCBuiltin<"__builtin_ia32_scalefss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scalef_pd_128 : GCCBuiltin<"__builtin_ia32_scalefpd128_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
@@ -3272,7 +3280,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_mask_scalef_pd_512 : GCCBuiltin<"__builtin_ia32_scalefpd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scalef_ps_128 : GCCBuiltin<"__builtin_ia32_scalefps128_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
@@ -3282,103 +3290,103 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_mask_scalef_ps_512 : GCCBuiltin<"__builtin_ia32_scalefps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_sqrt_ss :
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_sqrt_sd :
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_sqrt_pd_512 :
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_sqrt_ps_512 :
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_fixupimm_pd_128 :
GCCBuiltin<"__builtin_ia32_fixupimmpd128_mask">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_maskz_fixupimm_pd_128 :
GCCBuiltin<"__builtin_ia32_fixupimmpd128_maskz">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_fixupimm_pd_256 :
GCCBuiltin<"__builtin_ia32_fixupimmpd256_mask">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_maskz_fixupimm_pd_256 :
GCCBuiltin<"__builtin_ia32_fixupimmpd256_maskz">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_fixupimm_pd_512 :
GCCBuiltin<"__builtin_ia32_fixupimmpd512_mask">,
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_i8_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_maskz_fixupimm_pd_512 :
GCCBuiltin<"__builtin_ia32_fixupimmpd512_maskz">,
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_i8_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_fixupimm_ps_128 :
GCCBuiltin<"__builtin_ia32_fixupimmps128_mask">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_maskz_fixupimm_ps_128 :
GCCBuiltin<"__builtin_ia32_fixupimmps128_maskz">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_fixupimm_ps_256 :
GCCBuiltin<"__builtin_ia32_fixupimmps256_mask">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_maskz_fixupimm_ps_256 :
GCCBuiltin<"__builtin_ia32_fixupimmps256_maskz">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_fixupimm_ps_512 :
GCCBuiltin<"__builtin_ia32_fixupimmps512_mask">,
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16i32_ty, llvm_i32_ty,
- llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_maskz_fixupimm_ps_512 :
GCCBuiltin<"__builtin_ia32_fixupimmps512_maskz">,
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16i32_ty, llvm_i32_ty,
- llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_fixupimm_sd :
GCCBuiltin<"__builtin_ia32_fixupimmsd_mask">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_maskz_fixupimm_sd :
GCCBuiltin<"__builtin_ia32_fixupimmsd_maskz">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_fixupimm_ss :
GCCBuiltin<"__builtin_ia32_fixupimmss_mask">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_maskz_fixupimm_ss :
GCCBuiltin<"__builtin_ia32_fixupimmss_maskz">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>, ImmArg<5>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_getexp_pd_128 : GCCBuiltin<"__builtin_ia32_getexppd128_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_i8_ty], [IntrNoMem]>;
@@ -3388,7 +3396,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_mask_getexp_pd_512 : GCCBuiltin<"__builtin_ia32_getexppd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_getexp_ps_128 : GCCBuiltin<"__builtin_ia32_getexpps128_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
llvm_i8_ty], [IntrNoMem]>;
@@ -3398,64 +3406,64 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_mask_getexp_ps_512 : GCCBuiltin<"__builtin_ia32_getexpps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_mask_getexp_ss : GCCBuiltin<"__builtin_ia32_getexpss128_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_getexp_sd : GCCBuiltin<"__builtin_ia32_getexpsd128_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_getmant_pd_128 :
GCCBuiltin<"__builtin_ia32_getmantpd128_mask">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty,llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_getmant_pd_256 :
GCCBuiltin<"__builtin_ia32_getmantpd256_mask">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty,llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_getmant_pd_512 :
GCCBuiltin<"__builtin_ia32_getmantpd512_mask">,
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty,llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty,llvm_i32_ty ],
- [IntrNoMem, ImmArg<1>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_getmant_ps_128 :
GCCBuiltin<"__builtin_ia32_getmantps128_mask">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_getmant_ps_256 :
GCCBuiltin<"__builtin_ia32_getmantps256_mask">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<1>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_x86_avx512_mask_getmant_ps_512 :
GCCBuiltin<"__builtin_ia32_getmantps512_mask">,
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty,llvm_i32_ty, llvm_v16f32_ty,llvm_i16_ty,llvm_i32_ty],
- [IntrNoMem, ImmArg<1>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_getmant_ss :
GCCBuiltin<"__builtin_ia32_getmantss_round_mask">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,
- llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>, ImmArg<5>]>;
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_getmant_sd :
GCCBuiltin<"__builtin_ia32_getmantsd_round_mask">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty,
- llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<2>, ImmArg<5>]>;
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_rsqrt14_ss : GCCBuiltin<"__builtin_ia32_rsqrt14ss_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
@@ -3510,41 +3518,41 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_rcp28_ps : GCCBuiltin<"__builtin_ia32_rcp28ps_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<3>]>;
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_rcp28_pd : GCCBuiltin<"__builtin_ia32_rcp28pd_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<3>]>;
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_exp2_ps : GCCBuiltin<"__builtin_ia32_exp2ps_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<3>]>;
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_exp2_pd : GCCBuiltin<"__builtin_ia32_exp2pd_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<3>]>;
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_rcp28_ss : GCCBuiltin<"__builtin_ia32_rcp28ss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_rcp28_sd : GCCBuiltin<"__builtin_ia32_rcp28sd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_rsqrt28_ps : GCCBuiltin<"__builtin_ia32_rsqrt28ps_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_i16_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_rsqrt28_pd : GCCBuiltin<"__builtin_ia32_rsqrt28pd_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_rsqrt28_ss : GCCBuiltin<"__builtin_ia32_rsqrt28ss_round_mask">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_rsqrt28_sd : GCCBuiltin<"__builtin_ia32_rsqrt28sd_round_mask">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_psad_bw_512 : GCCBuiltin<"__builtin_ia32_psadbw512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
[IntrNoMem, Commutative]>;
@@ -3574,19 +3582,19 @@ let TargetPrefix = "x86" in {
GCCBuiltin<"__builtin_ia32_dbpsadbw128">,
Intrinsic<[llvm_v8i16_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_dbpsadbw_256 :
GCCBuiltin<"__builtin_ia32_dbpsadbw256">,
Intrinsic<[llvm_v16i16_ty],
[llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_dbpsadbw_512 :
GCCBuiltin<"__builtin_ia32_dbpsadbw512">,
Intrinsic<[llvm_v32i16_ty],
[llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}
// Gather and Scatter ops
@@ -3597,117 +3605,117 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather_dpd_512 :
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather_dps_512 :
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather_qpd_512 :
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather_qps_512 :
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather_dpq_512 :
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather_dpi_512 :
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather_qpq_512 :
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather_qpi_512 :
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div2_df :
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div2_di :
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div4_df :
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div4_di :
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div4_sf :
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div4_si :
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div8_sf :
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3div8_si :
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv2_df :
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv2_di :
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv4_df :
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv4_di :
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv4_sf :
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv4_si :
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv8_sf :
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gather3siv8_si :
Intrinsic<[llvm_v8i32_ty],
[llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
// scatter
// NOTE: These are deprecated in favor of the versions that take a vXi1 mask.
@@ -3716,149 +3724,149 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_scatter_dpd_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatter_dps_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatter_qpd_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatter_qps_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatter_dpq_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatter_dpi_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatter_qpq_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,llvm_v8i64_ty, llvm_v8i64_ty,
llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatter_qpi_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty, llvm_v8i64_ty, llvm_v8i32_ty,
llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv2_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv2_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv4_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv4_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv4_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv4_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv8_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterdiv8_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv2_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv2_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv4_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv4_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv4_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv4_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv8_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scattersiv8_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
// gather prefetch
// NOTE: These can't be ArgMemOnly because you can put the address completely
// in the index register.
def int_x86_avx512_gatherpf_dpd_512 : GCCBuiltin<"__builtin_ia32_gatherpfdpd">,
Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gatherpf_dps_512 : GCCBuiltin<"__builtin_ia32_gatherpfdps">,
Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gatherpf_qpd_512 : GCCBuiltin<"__builtin_ia32_gatherpfqpd">,
Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_gatherpf_qps_512 : GCCBuiltin<"__builtin_ia32_gatherpfqps">,
Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
// scatter prefetch
// NOTE: These can't be ArgMemOnly because you can put the address completely
// in the index register.
def int_x86_avx512_scatterpf_dpd_512 : GCCBuiltin<"__builtin_ia32_scatterpfdpd">,
Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterpf_dps_512 : GCCBuiltin<"__builtin_ia32_scatterpfdps">,
Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterpf_qpd_512 : GCCBuiltin<"__builtin_ia32_scatterpfqpd">,
Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_scatterpf_qps_512 : GCCBuiltin<"__builtin_ia32_scatterpfqps">,
Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<3>, ImmArg<4>]>;
+ llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
}
// AVX512 gather/scatter intrinsics that use vXi1 masks.
@@ -3868,134 +3876,134 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_mask_gather_dpd_512 :
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather_dps_512 :
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather_qpd_512 :
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather_qps_512 :
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather_dpq_512 :
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather_dpi_512 :
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather_qpq_512 :
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather_qpi_512 :
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div2_df :
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div2_di :
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div4_df :
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div4_di :
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div4_sf :
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div4_si :
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div8_sf :
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3div8_si :
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv2_df :
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv2_di :
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv4_df :
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv4_di :
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv4_sf :
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv4_si :
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv8_sf :
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_gather3siv8_si :
Intrinsic<[llvm_v8i32_ty],
[llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
- [IntrReadMem, ImmArg<4>]>;
+ [IntrReadMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatter_dpd_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatter_dps_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatter_qpd_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatter_qps_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
// NOTE: These can't be ArgMemOnly because you can put the address completely
@@ -4003,99 +4011,99 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_mask_scatter_dpq_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatter_dpi_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatter_qpq_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,llvm_v8i64_ty, llvm_v8i64_ty,
llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatter_qpi_512 :
Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i64_ty, llvm_v8i32_ty,
llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv2_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv2_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv4_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv4_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv4_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv4_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv8_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scatterdiv8_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv2_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv2_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv4_df :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv4_di :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv4_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv4_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv8_sf :
Intrinsic<[],
[llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_scattersiv8_si :
Intrinsic<[],
[llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
- [ImmArg<4>]>;
+ [ImmArg<ArgIndex<4>>]>;
}
// AVX-512 conflict detection instruction
@@ -4128,11 +4136,11 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_vcomi_sd : GCCBuiltin<"__builtin_ia32_vcomisd">,
Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_vcomi_ss : GCCBuiltin<"__builtin_ia32_vcomiss">,
Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
}
// Compress, Expand
@@ -4676,37 +4684,37 @@ let TargetPrefix = "x86" in {
GCCBuiltin<"__builtin_ia32_pternlogd128">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_pternlog_d_256 :
GCCBuiltin<"__builtin_ia32_pternlogd256">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_pternlog_d_512 :
GCCBuiltin<"__builtin_ia32_pternlogd512">,
Intrinsic<[llvm_v16i32_ty],
[llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<3>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_pternlog_q_128 :
GCCBuiltin<"__builtin_ia32_pternlogq128">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_pternlog_q_256 :
GCCBuiltin<"__builtin_ia32_pternlogq256">,
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_pternlog_q_512 :
GCCBuiltin<"__builtin_ia32_pternlogq512">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
}
// vp2intersect
@@ -4744,34 +4752,34 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_cmp_ps_512 :
Intrinsic<[llvm_v16i1_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_cmp_pd_512 :
Intrinsic<[llvm_v8i1_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<3>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
def int_x86_avx512_cmp_ps_256 :
Intrinsic<[llvm_v8i1_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_cmp_pd_256 :
Intrinsic<[llvm_v4i1_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_cmp_ps_128 :
Intrinsic<[llvm_v4i1_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_cmp_pd_128 :
Intrinsic<[llvm_v2i1_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_i32_ty], [IntrNoMem, ImmArg<2>]>;
+ llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_cmp_ss :
GCCBuiltin<"__builtin_ia32_cmpss_mask">,
Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
llvm_i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_cmp_sd :
GCCBuiltin<"__builtin_ia32_cmpsd_mask">,
Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_i32_ty, llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<2>, ImmArg<4>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<4>>]>;
}
//===----------------------------------------------------------------------===//
@@ -4779,7 +4787,7 @@ let TargetPrefix = "x86" in {
let TargetPrefix = "x86" in {
def int_x86_sha1rnds4 : GCCBuiltin<"__builtin_ia32_sha1rnds4">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
- [IntrNoMem, ImmArg<2>]>;
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_sha1nexte : GCCBuiltin<"__builtin_ia32_sha1nexte">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_x86_sha1msg1 : GCCBuiltin<"__builtin_ia32_sha1msg1">,
@@ -4922,3 +4930,50 @@ let TargetPrefix = "x86" in {
def int_x86_enqcmds : GCCBuiltin<"__builtin_ia32_enqcmds">,
Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_ptr_ty], []>;
}
+
+//===----------------------------------------------------------------------===//
+// SERIALIZE - Serialize instruction fetch and execution
+
+let TargetPrefix = "x86" in {
+ def int_x86_serialize : GCCBuiltin<"__builtin_ia32_serialize">,
+ Intrinsic<[], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// TSXLDTRK - TSX Suspend Load Address Tracking
+
+let TargetPrefix = "x86" in {
+ def int_x86_xsusldtrk : GCCBuiltin<"__builtin_ia32_xsusldtrk">,
+ Intrinsic<[], [], []>;
+ def int_x86_xresldtrk : GCCBuiltin<"__builtin_ia32_xresldtrk">,
+ Intrinsic<[], [], []>;
+}
+//===----------------------------------------------------------------------===//
+// AMX - Intel AMX extensions
+
+let TargetPrefix = "x86" in {
+ def int_x86_ldtilecfg : GCCBuiltin<"__builtin_ia32_tile_loadconfig">,
+ Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_x86_sttilecfg : GCCBuiltin<"__builtin_ia32_tile_storeconfig">,
+ Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_x86_tilerelease : GCCBuiltin<"__builtin_ia32_tilerelease">,
+ Intrinsic<[], [], []>;
+ def int_x86_tilezero : GCCBuiltin<"__builtin_ia32_tilezero">,
+ Intrinsic<[], [llvm_i8_ty], []>;
+ def int_x86_tileloadd64 : GCCBuiltin<"__builtin_ia32_tileloadd64">,
+ Intrinsic<[], [llvm_i8_ty, llvm_ptr_ty, llvm_i64_ty], []>;
+ def int_x86_tileloaddt164 : GCCBuiltin<"__builtin_ia32_tileloaddt164">,
+ Intrinsic<[], [llvm_i8_ty, llvm_ptr_ty, llvm_i64_ty], []>;
+ def int_x86_tilestored64 : GCCBuiltin<"__builtin_ia32_tilestored64">,
+ Intrinsic<[], [llvm_i8_ty, llvm_ptr_ty, llvm_i64_ty], []>;
+ def int_x86_tdpbssd : GCCBuiltin<"__builtin_ia32_tdpbssd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty], []>;
+ def int_x86_tdpbsud : GCCBuiltin<"__builtin_ia32_tdpbsud">,
+ Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty], []>;
+ def int_x86_tdpbusd : GCCBuiltin<"__builtin_ia32_tdpbusd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty], []>;
+ def int_x86_tdpbuud : GCCBuiltin<"__builtin_ia32_tdpbuud">,
+ Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty], []>;
+ def int_x86_tdpbf16ps : GCCBuiltin<"__builtin_ia32_tdpbf16ps">,
+ Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty], []>;
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsXCore.td b/llvm/include/llvm/IR/IntrinsicsXCore.td
index 7fe8bdfd3bd0..89dbc65fea44 100644
--- a/llvm/include/llvm/IR/IntrinsicsXCore.td
+++ b/llvm/include/llvm/IR/IntrinsicsXCore.td
@@ -38,58 +38,58 @@ let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
// Resource instructions.
def int_xcore_getr : Intrinsic<[llvm_anyptr_ty],[llvm_i32_ty]>;
def int_xcore_freer : Intrinsic<[],[llvm_anyptr_ty],
- [NoCapture<0>]>;
- def int_xcore_in : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],[NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
+ def int_xcore_in : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],[NoCapture<ArgIndex<0>>]>;
def int_xcore_int : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_inct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_out : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_outt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_outct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_chkct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_testct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_testwct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_setd : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_setc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_inshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_outshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_setpt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_clrpt : Intrinsic<[],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_getts : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_syncr : Intrinsic<[],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_settw : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_setv : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_setev : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
- [NoCapture<0>]>;
- def int_xcore_eeu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
- def int_xcore_edu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
+ def int_xcore_eeu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
+ def int_xcore_edu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
def int_xcore_setclk : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
- [NoCapture<0>, NoCapture<1>]>;
+ [NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>]>;
def int_xcore_setrdy : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
- [NoCapture<0>, NoCapture<1>]>;
+ [NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>]>;
def int_xcore_setpsc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_peek : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_endin : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
// Intrinsics for events.
def int_xcore_waitevent : Intrinsic<[llvm_ptr_ty],[], [IntrReadMem]>;
@@ -103,18 +103,18 @@ let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
// Intrinsics for threads.
def int_xcore_getst : Intrinsic <[llvm_anyptr_ty],[llvm_anyptr_ty],
- [NoCapture<0>]>;
- def int_xcore_msync : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
+ def int_xcore_msync : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
def int_xcore_ssync : Intrinsic <[],[]>;
- def int_xcore_mjoin : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ def int_xcore_mjoin : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
def int_xcore_initsp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_initpc : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_initlr : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_initcp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
def int_xcore_initdp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
- [NoCapture<0>]>;
+ [NoCapture<ArgIndex<0>>]>;
}
diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h
index 39d19b7cffd9..c465e02c2fc5 100644
--- a/llvm/include/llvm/IR/LLVMContext.h
+++ b/llvm/include/llvm/IR/LLVMContext.h
@@ -31,12 +31,17 @@ class LLVMContextImpl;
class Module;
class OptPassGate;
template <typename T> class SmallVectorImpl;
+template <typename T> class StringMapEntry;
class SMDiagnostic;
class StringRef;
class Twine;
-class RemarkStreamer;
+class LLVMRemarkStreamer;
class raw_ostream;
+namespace remarks {
+class RemarkStreamer;
+}
+
namespace SyncScope {
typedef uint8_t ID;
@@ -79,12 +84,15 @@ public:
/// Known operand bundle tag IDs, which always have the same value. All
/// operand bundle tags that LLVM has special knowledge of are listed here.
/// Additionally, this scheme allows LLVM to efficiently check for specific
- /// operand bundle tags without comparing strings.
+ /// operand bundle tags without comparing strings. Keep this in sync with
+ /// LLVMContext::LLVMContext().
enum : unsigned {
OB_deopt = 0, // "deopt"
OB_funclet = 1, // "funclet"
OB_gc_transition = 2, // "gc-transition"
OB_cfguardtarget = 3, // "cfguardtarget"
+ OB_preallocated = 4, // "preallocated"
+ OB_gc_live = 5, // "gc-live"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
@@ -101,6 +109,10 @@ public:
/// \see LLVMContext::getOperandBundleTagID
void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
+ /// getOrInsertBundleTag - Returns the Tag to use for an operand bundle of
+ /// name TagName.
+ StringMapEntry<uint32_t> *getOrInsertBundleTag(StringRef TagName) const;
+
/// getOperandBundleTagID - Maps a bundle tag to an integer ID. Every bundle
/// tag registered with an LLVMContext has an unique ID.
uint32_t getOperandBundleTagID(StringRef Tag) const;
@@ -218,23 +230,27 @@ public:
/// included in optimization diagnostics.
void setDiagnosticsHotnessThreshold(uint64_t Threshold);
- /// Return the streamer used by the backend to save remark diagnostics. If it
- /// does not exist, diagnostics are not saved in a file but only emitted via
- /// the diagnostic handler.
- RemarkStreamer *getRemarkStreamer();
- const RemarkStreamer *getRemarkStreamer() const;
-
- /// Set the diagnostics output used for optimization diagnostics.
- /// This filename may be embedded in a section for tools to find the
- /// diagnostics whenever they're needed.
+ /// The "main remark streamer" used by all the specialized remark streamers.
+ /// This streamer keeps generic remark metadata in memory throughout the life
+ /// of the LLVMContext. This metadata may be emitted in a section in object
+ /// files depending on the format requirements.
///
- /// If a remark streamer is already set, it will be replaced with
- /// \p RemarkStreamer.
+ /// All specialized remark streamers should convert remarks to
+ /// llvm::remarks::Remark and emit them through this streamer.
+ remarks::RemarkStreamer *getMainRemarkStreamer();
+ const remarks::RemarkStreamer *getMainRemarkStreamer() const;
+ void setMainRemarkStreamer(
+ std::unique_ptr<remarks::RemarkStreamer> MainRemarkStreamer);
+
+ /// The "LLVM remark streamer" used by LLVM to serialize remark diagnostics
+ /// comming from IR and MIR passes.
///
- /// By default, diagnostics are not saved in a file but only emitted via the
- /// diagnostic handler. Even if an output file is set, the handler is invoked
- /// for each diagnostic message.
- void setRemarkStreamer(std::unique_ptr<RemarkStreamer> RemarkStreamer);
+ /// If it does not exist, diagnostics are not saved in a file but only emitted
+ /// via the diagnostic handler.
+ LLVMRemarkStreamer *getLLVMRemarkStreamer();
+ const LLVMRemarkStreamer *getLLVMRemarkStreamer() const;
+ void
+ setLLVMRemarkStreamer(std::unique_ptr<LLVMRemarkStreamer> RemarkStreamer);
/// Get the prefix that should be printed in front of a diagnostic of
/// the given \p Severity
diff --git a/llvm/include/llvm/IR/LLVMRemarkStreamer.h b/llvm/include/llvm/IR/LLVMRemarkStreamer.h
new file mode 100644
index 000000000000..97082a44e62f
--- /dev/null
+++ b/llvm/include/llvm/IR/LLVMRemarkStreamer.h
@@ -0,0 +1,95 @@
+//===- llvm/IR/LLVMRemarkStreamer.h - Streamer for LLVM remarks--*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the conversion between IR Diagnostics and
+// serializable remarks::Remark objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LLVMREMARKSTREAMER_H
+#define LLVM_IR_LLVMREMARKSTREAMER_H
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/Remarks/RemarkStreamer.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+/// Streamer for LLVM remarks which has logic for dealing with DiagnosticInfo
+/// objects.
+class LLVMRemarkStreamer {
+ remarks::RemarkStreamer &RS;
+ /// Convert diagnostics into remark objects.
+ /// The lifetime of the members of the result is bound to the lifetime of
+ /// the LLVM diagnostics.
+ remarks::Remark toRemark(const DiagnosticInfoOptimizationBase &Diag) const;
+
+public:
+ LLVMRemarkStreamer(remarks::RemarkStreamer &RS) : RS(RS) {}
+ /// Emit a diagnostic through the streamer.
+ void emit(const DiagnosticInfoOptimizationBase &Diag);
+};
+
+template <typename ThisError>
+struct LLVMRemarkSetupErrorInfo : public ErrorInfo<ThisError> {
+ std::string Msg;
+ std::error_code EC;
+
+ LLVMRemarkSetupErrorInfo(Error E) {
+ handleAllErrors(std::move(E), [&](const ErrorInfoBase &EIB) {
+ Msg = EIB.message();
+ EC = EIB.convertToErrorCode();
+ });
+ }
+
+ void log(raw_ostream &OS) const override { OS << Msg; }
+ std::error_code convertToErrorCode() const override { return EC; }
+};
+
+struct LLVMRemarkSetupFileError
+ : LLVMRemarkSetupErrorInfo<LLVMRemarkSetupFileError> {
+ static char ID;
+ using LLVMRemarkSetupErrorInfo<
+ LLVMRemarkSetupFileError>::LLVMRemarkSetupErrorInfo;
+};
+
+struct LLVMRemarkSetupPatternError
+ : LLVMRemarkSetupErrorInfo<LLVMRemarkSetupPatternError> {
+ static char ID;
+ using LLVMRemarkSetupErrorInfo<
+ LLVMRemarkSetupPatternError>::LLVMRemarkSetupErrorInfo;
+};
+
+struct LLVMRemarkSetupFormatError
+ : LLVMRemarkSetupErrorInfo<LLVMRemarkSetupFormatError> {
+ static char ID;
+ using LLVMRemarkSetupErrorInfo<
+ LLVMRemarkSetupFormatError>::LLVMRemarkSetupErrorInfo;
+};
+
+/// Setup optimization remarks that output to a file.
+Expected<std::unique_ptr<ToolOutputFile>>
+setupLLVMOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
+ StringRef RemarksPasses, StringRef RemarksFormat,
+ bool RemarksWithHotness,
+ unsigned RemarksHotnessThreshold = 0);
+
+/// Setup optimization remarks that output directly to a raw_ostream.
+/// \p OS is managed by the caller and should be open for writing as long as \p
+/// Context is streaming remarks to it.
+Error setupLLVMOptimizationRemarks(LLVMContext &Context, raw_ostream &OS,
+ StringRef RemarksPasses,
+ StringRef RemarksFormat,
+ bool RemarksWithHotness,
+ unsigned RemarksHotnessThreshold = 0);
+
+} // end namespace llvm
+
+#endif // LLVM_IR_LLVMREMARKSTREAMER_H
diff --git a/llvm/include/llvm/IR/LegacyPassManagers.h b/llvm/include/llvm/IR/LegacyPassManagers.h
index 5044c1f6ed31..6b1ddd4d79f8 100644
--- a/llvm/include/llvm/IR/LegacyPassManagers.h
+++ b/llvm/include/llvm/IR/LegacyPassManagers.h
@@ -330,7 +330,8 @@ public:
/// through getAnalysis interface.
virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
- virtual Pass *getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F);
+ virtual std::tuple<Pass *, bool> getOnTheFlyPass(Pass *P, AnalysisID PI,
+ Function &F);
/// Initialize available analysis information.
void initializeAnalysisInfo() {
diff --git a/llvm/include/llvm/IR/LegacyPassNameParser.h b/llvm/include/llvm/IR/LegacyPassNameParser.h
index 30820e750350..c33b9fc40472 100644
--- a/llvm/include/llvm/IR/LegacyPassNameParser.h
+++ b/llvm/include/llvm/IR/LegacyPassNameParser.h
@@ -92,47 +92,6 @@ private:
}
};
-///===----------------------------------------------------------------------===//
-/// FilteredPassNameParser class - Make use of the pass registration
-/// mechanism to automatically add a command line argument to opt for
-/// each pass that satisfies a filter criteria. Filter should return
-/// true for passes to be registered as command-line options.
-///
-template<typename Filter>
-class FilteredPassNameParser : public PassNameParser {
-private:
- Filter filter;
-
-public:
- bool ignorablePassImpl(const PassInfo *P) const override {
- return !filter(*P);
- }
-};
-
-///===----------------------------------------------------------------------===//
-/// PassArgFilter - A filter for use with PassNameFilterParser that only
-/// accepts a Pass whose Arg matches certain strings.
-///
-/// Use like this:
-///
-/// extern const char AllowedPassArgs[] = "-anders_aa -dse";
-///
-/// static cl::list<
-/// const PassInfo*,
-/// bool,
-/// FilteredPassNameParser<PassArgFilter<AllowedPassArgs> > >
-/// PassList(cl::desc("Passes available:"));
-///
-/// Only the -anders_aa and -dse options will be available to the user.
-///
-template<const char *Args>
-class PassArgFilter {
-public:
- bool operator()(const PassInfo &P) const {
- return StringRef(Args).contains(P.getPassArgument());
- }
-};
-
} // End llvm namespace
#endif
diff --git a/llvm/include/llvm/IR/Mangler.h b/llvm/include/llvm/IR/Mangler.h
index e4a05ab46a65..747a4085235c 100644
--- a/llvm/include/llvm/IR/Mangler.h
+++ b/llvm/include/llvm/IR/Mangler.h
@@ -14,11 +14,11 @@
#define LLVM_IR_MANGLER_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/IR/GlobalValue.h"
namespace llvm {
class DataLayout;
+class GlobalValue;
template <typename T> class SmallVectorImpl;
class Triple;
class Twine;
diff --git a/llvm/include/llvm/IR/MatrixBuilder.h b/llvm/include/llvm/IR/MatrixBuilder.h
new file mode 100644
index 000000000000..5d04b3563dd5
--- /dev/null
+++ b/llvm/include/llvm/IR/MatrixBuilder.h
@@ -0,0 +1,221 @@
+//===- llvm/MatrixBuilder.h - Builder to lower matrix ops -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MatrixBuilder class, which is used as a convenient way
+// to lower matrix operations to LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MATRIXBUILDER_H
+#define LLVM_IR_MATRIXBUILDER_H
+
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Alignment.h"
+
+namespace llvm {
+
+class Function;
+class Twine;
+class Module;
+
+template <class IRBuilderTy> class MatrixBuilder {
+ IRBuilderTy &B;
+ Module *getModule() { return B.GetInsertBlock()->getParent()->getParent(); }
+
+ std::pair<Value *, Value *> splatScalarOperandIfNeeded(Value *LHS,
+ Value *RHS) {
+ assert((LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy()) &&
+ "One of the operands must be a matrix (embedded in a vector)");
+ if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy())
+ RHS = B.CreateVectorSplat(
+ cast<VectorType>(LHS->getType())->getNumElements(), RHS,
+ "scalar.splat");
+ else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy())
+ LHS = B.CreateVectorSplat(
+ cast<VectorType>(RHS->getType())->getNumElements(), LHS,
+ "scalar.splat");
+ return {LHS, RHS};
+ }
+
+public:
+ MatrixBuilder(IRBuilderTy &Builder) : B(Builder) {}
+
+ /// Create a column major, strided matrix load.
+ /// \p DataPtr - Start address of the matrix read
+ /// \p Rows - Number of rows in matrix (must be a constant)
+ /// \p Columns - Number of columns in matrix (must be a constant)
+ /// \p Stride - Space between columns
+ CallInst *CreateColumnMajorLoad(Value *DataPtr, Align Alignment,
+ Value *Stride, bool IsVolatile, unsigned Rows,
+ unsigned Columns, const Twine &Name = "") {
+
+ // Deal with the pointer
+ PointerType *PtrTy = cast<PointerType>(DataPtr->getType());
+ Type *EltTy = PtrTy->getElementType();
+
+ auto *RetType = FixedVectorType::get(EltTy, Rows * Columns);
+
+ Value *Ops[] = {DataPtr, Stride, B.getInt1(IsVolatile), B.getInt32(Rows),
+ B.getInt32(Columns)};
+ Type *OverloadedTypes[] = {RetType};
+
+ Function *TheFn = Intrinsic::getDeclaration(
+ getModule(), Intrinsic::matrix_column_major_load, OverloadedTypes);
+
+ CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
+ Attribute AlignAttr =
+ Attribute::getWithAlignment(Call->getContext(), Alignment);
+ Call->addAttribute(1, AlignAttr);
+ return Call;
+ }
+
+ /// Create a column major, strided matrix store.
+ /// \p Matrix - Matrix to store
+ /// \p Ptr - Pointer to write back to
+ /// \p Stride - Space between columns
+ CallInst *CreateColumnMajorStore(Value *Matrix, Value *Ptr, Align Alignment,
+ Value *Stride, bool IsVolatile,
+ unsigned Rows, unsigned Columns,
+ const Twine &Name = "") {
+ Value *Ops[] = {Matrix, Ptr,
+ Stride, B.getInt1(IsVolatile),
+ B.getInt32(Rows), B.getInt32(Columns)};
+ Type *OverloadedTypes[] = {Matrix->getType()};
+
+ Function *TheFn = Intrinsic::getDeclaration(
+ getModule(), Intrinsic::matrix_column_major_store, OverloadedTypes);
+
+ CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
+ Attribute AlignAttr =
+ Attribute::getWithAlignment(Call->getContext(), Alignment);
+ Call->addAttribute(2, AlignAttr);
+ return Call;
+ }
+
+ /// Create a llvm.matrix.transpose call, transposing \p Matrix with \p Rows
+ /// rows and \p Columns columns.
+ CallInst *CreateMatrixTranspose(Value *Matrix, unsigned Rows,
+ unsigned Columns, const Twine &Name = "") {
+ auto *OpType = cast<VectorType>(Matrix->getType());
+ auto *ReturnType =
+ FixedVectorType::get(OpType->getElementType(), Rows * Columns);
+
+ Type *OverloadedTypes[] = {ReturnType};
+ Value *Ops[] = {Matrix, B.getInt32(Rows), B.getInt32(Columns)};
+ Function *TheFn = Intrinsic::getDeclaration(
+ getModule(), Intrinsic::matrix_transpose, OverloadedTypes);
+
+ return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
+ }
+
+ /// Create a llvm.matrix.multiply call, multiplying matrixes \p LHS and \p
+ /// RHS.
+ CallInst *CreateMatrixMultiply(Value *LHS, Value *RHS, unsigned LHSRows,
+ unsigned LHSColumns, unsigned RHSColumns,
+ const Twine &Name = "") {
+ auto *LHSType = cast<VectorType>(LHS->getType());
+ auto *RHSType = cast<VectorType>(RHS->getType());
+
+ auto *ReturnType =
+ FixedVectorType::get(LHSType->getElementType(), LHSRows * RHSColumns);
+
+ Value *Ops[] = {LHS, RHS, B.getInt32(LHSRows), B.getInt32(LHSColumns),
+ B.getInt32(RHSColumns)};
+ Type *OverloadedTypes[] = {ReturnType, LHSType, RHSType};
+
+ Function *TheFn = Intrinsic::getDeclaration(
+ getModule(), Intrinsic::matrix_multiply, OverloadedTypes);
+ return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
+ }
+
+ /// Insert a single element \p NewVal into \p Matrix at indices (\p RowIdx, \p
+ /// ColumnIdx).
+ Value *CreateMatrixInsert(Value *Matrix, Value *NewVal, Value *RowIdx,
+ Value *ColumnIdx, unsigned NumRows) {
+ return B.CreateInsertElement(
+ Matrix, NewVal,
+ B.CreateAdd(B.CreateMul(ColumnIdx, ConstantInt::get(
+ ColumnIdx->getType(), NumRows)),
+ RowIdx));
+ }
+
+ /// Add matrixes \p LHS and \p RHS. Support both integer and floating point
+ /// matrixes.
+ Value *CreateAdd(Value *LHS, Value *RHS) {
+ assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy());
+ if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy())
+ RHS = B.CreateVectorSplat(
+ cast<VectorType>(LHS->getType())->getNumElements(), RHS,
+ "scalar.splat");
+ else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy())
+ LHS = B.CreateVectorSplat(
+ cast<VectorType>(RHS->getType())->getNumElements(), LHS,
+ "scalar.splat");
+
+ return cast<VectorType>(LHS->getType())
+ ->getElementType()
+ ->isFloatingPointTy()
+ ? B.CreateFAdd(LHS, RHS)
+ : B.CreateAdd(LHS, RHS);
+ }
+
+ /// Subtract matrixes \p LHS and \p RHS. Support both integer and floating
+ /// point matrixes.
+ Value *CreateSub(Value *LHS, Value *RHS) {
+ assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy());
+ if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy())
+ RHS = B.CreateVectorSplat(
+ cast<VectorType>(LHS->getType())->getNumElements(), RHS,
+ "scalar.splat");
+ else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy())
+ LHS = B.CreateVectorSplat(
+ cast<VectorType>(RHS->getType())->getNumElements(), LHS,
+ "scalar.splat");
+
+ return cast<VectorType>(LHS->getType())
+ ->getElementType()
+ ->isFloatingPointTy()
+ ? B.CreateFSub(LHS, RHS)
+ : B.CreateSub(LHS, RHS);
+ }
+
+ /// Multiply matrix \p LHS with scalar \p RHS or scalar \p LHS with matrix \p
+ /// RHS.
+ Value *CreateScalarMultiply(Value *LHS, Value *RHS) {
+ std::tie(LHS, RHS) = splatScalarOperandIfNeeded(LHS, RHS);
+ if (LHS->getType()->getScalarType()->isFloatingPointTy())
+ return B.CreateFMul(LHS, RHS);
+ return B.CreateMul(LHS, RHS);
+ }
+
+ /// Extracts the element at (\p RowIdx, \p ColumnIdx) from \p Matrix.
+ Value *CreateExtractElement(Value *Matrix, Value *RowIdx, Value *ColumnIdx,
+ unsigned NumRows, Twine const &Name = "") {
+
+ unsigned MaxWidth = std::max(RowIdx->getType()->getScalarSizeInBits(),
+ ColumnIdx->getType()->getScalarSizeInBits());
+ Type *IntTy = IntegerType::get(RowIdx->getType()->getContext(), MaxWidth);
+ RowIdx = B.CreateZExt(RowIdx, IntTy);
+ ColumnIdx = B.CreateZExt(ColumnIdx, IntTy);
+ Value *NumRowsV = B.getIntN(MaxWidth, NumRows);
+ return B.CreateExtractElement(
+ Matrix, B.CreateAdd(B.CreateMul(ColumnIdx, NumRowsV), RowIdx),
+ "matext");
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_MATRIXBUILDER_H
diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index dda939b97575..46526c70ea3b 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -527,7 +527,7 @@ template <class V, class M> struct IsValidReference {
/// As an analogue to \a isa(), check whether \c MD has an \a Value inside of
/// type \c X.
template <class X, class Y>
-inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, bool>::type
+inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, bool>
hasa(Y &&MD) {
assert(MD && "Null pointer sent into hasa");
if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
@@ -535,9 +535,8 @@ hasa(Y &&MD) {
return false;
}
template <class X, class Y>
-inline
- typename std::enable_if<detail::IsValidReference<X, Y &>::value, bool>::type
- hasa(Y &MD) {
+inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, bool>
+hasa(Y &MD) {
return hasa(&MD);
}
@@ -545,14 +544,13 @@ inline
///
/// As an analogue to \a cast(), extract the \a Value subclass \c X from \c MD.
template <class X, class Y>
-inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
extract(Y &&MD) {
return cast<X>(cast<ConstantAsMetadata>(MD)->getValue());
}
template <class X, class Y>
-inline
- typename std::enable_if<detail::IsValidReference<X, Y &>::value, X *>::type
- extract(Y &MD) {
+inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, X *>
+extract(Y &MD) {
return extract(&MD);
}
@@ -561,7 +559,7 @@ inline
/// As an analogue to \a cast_or_null(), extract the \a Value subclass \c X
/// from \c MD, allowing \c MD to be null.
template <class X, class Y>
-inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
extract_or_null(Y &&MD) {
if (auto *V = cast_or_null<ConstantAsMetadata>(MD))
return cast<X>(V->getValue());
@@ -574,7 +572,7 @@ extract_or_null(Y &&MD) {
/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
/// Value it does contain is of the wrong subclass.
template <class X, class Y>
-inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
dyn_extract(Y &&MD) {
if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
return dyn_cast<X>(V->getValue());
@@ -587,7 +585,7 @@ dyn_extract(Y &&MD) {
/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
/// Value it does contain is of the wrong subclass, allowing \c MD to be null.
template <class X, class Y>
-inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
dyn_extract_or_null(Y &&MD) {
if (auto *V = dyn_cast_or_null<ConstantAsMetadata>(MD))
return dyn_cast<X>(V->getValue());
@@ -976,7 +974,7 @@ public:
/// Try to create a uniqued version of \c N -- in place, if possible -- and
/// return it. If \c N cannot be uniqued, return a distinct node instead.
template <class T>
- static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+ static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *>
replaceWithPermanent(std::unique_ptr<T, TempMDNodeDeleter> N) {
return cast<T>(N.release()->replaceWithPermanentImpl());
}
@@ -988,7 +986,7 @@ public:
///
/// \pre N does not self-reference.
template <class T>
- static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+ static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *>
replaceWithUniqued(std::unique_ptr<T, TempMDNodeDeleter> N) {
return cast<T>(N.release()->replaceWithUniquedImpl());
}
@@ -998,7 +996,7 @@ public:
/// Create a distinct version of \c N -- in place, if possible -- and return
/// it. Takes ownership of the temporary node.
template <class T>
- static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+ static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *>
replaceWithDistinct(std::unique_ptr<T, TempMDNodeDeleter> N) {
return cast<T>(N.release()->replaceWithDistinctImpl());
}
@@ -1237,15 +1235,13 @@ public:
template <class U>
MDTupleTypedArrayWrapper(
const MDTupleTypedArrayWrapper<U> &Other,
- typename std::enable_if<std::is_convertible<U *, T *>::value>::type * =
- nullptr)
+ std::enable_if_t<std::is_convertible<U *, T *>::value> * = nullptr)
: N(Other.get()) {}
template <class U>
explicit MDTupleTypedArrayWrapper(
const MDTupleTypedArrayWrapper<U> &Other,
- typename std::enable_if<!std::is_convertible<U *, T *>::value>::type * =
- nullptr)
+ std::enable_if_t<!std::is_convertible<U *, T *>::value> * = nullptr)
: N(Other.get()) {}
explicit operator bool() const { return get(); }
diff --git a/llvm/include/llvm/IR/Module.h b/llvm/include/llvm/IR/Module.h
index 68cd583c136c..3f97d048f862 100644
--- a/llvm/include/llvm/IR/Module.h
+++ b/llvm/include/llvm/IR/Module.h
@@ -46,6 +46,7 @@ class FunctionType;
class GVMaterializer;
class LLVMContext;
class MemoryBuffer;
+class ModuleSummaryIndex;
class Pass;
class RandomNumberGenerator;
template <class PtrType> class SmallPtrSetImpl;
@@ -79,6 +80,8 @@ public:
using NamedMDListType = ilist<NamedMDNode>;
/// The type of the comdat "symbol" table.
using ComdatSymTabType = StringMap<Comdat>;
+ /// The type for mapping names to named metadata.
+ using NamedMDSymTabType = StringMap<NamedMDNode *>;
/// The Global Variable iterator.
using global_iterator = GlobalListType::iterator;
@@ -154,6 +157,11 @@ public:
/// converted result in MFB.
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB);
+ /// Check if the given module flag metadata represents a valid module flag,
+ /// and store the flag behavior, the key string and the value metadata.
+ static bool isValidModuleFlag(const MDNode &ModFlag, ModFlagBehavior &MFB,
+ MDString *&Key, Metadata *&Val);
+
struct ModuleFlagEntry {
ModFlagBehavior Behavior;
MDString *Key;
@@ -175,7 +183,7 @@ private:
IFuncListType IFuncList; ///< The IFuncs in the module
NamedMDListType NamedMDList; ///< The named metadata in the module
std::string GlobalScopeAsm; ///< Inline Asm at global scope.
- ValueSymbolTable *ValSymTab; ///< Symbol table for values
+ std::unique_ptr<ValueSymbolTable> ValSymTab; ///< Symbol table for values
ComdatSymTabType ComdatSymTab; ///< Symbol table for COMDATs
std::unique_ptr<MemoryBuffer>
OwnedMemoryBuffer; ///< Memory buffer directly owned by this
@@ -187,7 +195,7 @@ private:
///< recorded in bitcode.
std::string TargetTriple; ///< Platform target triple Module compiled on
///< Format: (arch)(sub)-(vendor)-(sys0-(abi)
- void *NamedMDSymTab; ///< NamedMDNode names.
+ NamedMDSymTabType NamedMDSymTab; ///< NamedMDNode names.
DataLayout DL; ///< DataLayout associated with the module
friend class Constant;
@@ -257,7 +265,7 @@ public:
/// when other randomness consuming passes are added or removed. In
/// addition, the random stream will be reproducible across LLVM
/// versions when the pass does not change.
- std::unique_ptr<RandomNumberGenerator> createRNG(const Pass* P) const;
+ std::unique_ptr<RandomNumberGenerator> createRNG(const StringRef Name) const;
/// Return true if size-info optimization remark is enabled, false
/// otherwise.
@@ -271,22 +279,22 @@ public:
/// @{
/// Set the module identifier.
- void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
+ void setModuleIdentifier(StringRef ID) { ModuleID = std::string(ID); }
/// Set the module's original source file name.
- void setSourceFileName(StringRef Name) { SourceFileName = Name; }
+ void setSourceFileName(StringRef Name) { SourceFileName = std::string(Name); }
/// Set the data layout
void setDataLayout(StringRef Desc);
void setDataLayout(const DataLayout &Other);
/// Set the target triple.
- void setTargetTriple(StringRef T) { TargetTriple = T; }
+ void setTargetTriple(StringRef T) { TargetTriple = std::string(T); }
/// Set the module-scope inline assembly blocks.
/// A trailing newline is added if the input doesn't have one.
void setModuleInlineAsm(StringRef Asm) {
- GlobalScopeAsm = Asm;
+ GlobalScopeAsm = std::string(Asm);
if (!GlobalScopeAsm.empty() && GlobalScopeAsm.back() != '\n')
GlobalScopeAsm += '\n';
}
@@ -491,10 +499,12 @@ public:
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Constant *Val);
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val);
void addModuleFlag(MDNode *Node);
+ /// Like addModuleFlag but replaces the old module flag if it already exists.
+ void setModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val);
-/// @}
-/// @name Materialization
-/// @{
+ /// @}
+ /// @name Materialization
+ /// @{
/// Sets the GVMaterializer to GVM. This module must not yet have a
/// Materializer. To reset the materializer for a module that already has one,
@@ -583,6 +593,7 @@ public:
const_global_iterator global_begin() const { return GlobalList.begin(); }
global_iterator global_end () { return GlobalList.end(); }
const_global_iterator global_end () const { return GlobalList.end(); }
+ size_t global_size () const { return GlobalList.size(); }
bool global_empty() const { return GlobalList.empty(); }
iterator_range<global_iterator> globals() {
@@ -846,6 +857,13 @@ public:
Metadata *getProfileSummary(bool IsCS);
/// @}
+ /// Returns whether semantic interposition is to be respected.
+ bool getSemanticInterposition() const;
+ bool noSemanticInterposition() const;
+
+ /// Set whether semantic interposition is to be respected.
+ void setSemanticInterposition(bool);
+
/// Returns true if PLT should be avoided for RTLib calls.
bool getRtLibUseGOT() const;
@@ -866,6 +884,10 @@ public:
/// Take ownership of the given memory buffer.
void setOwnedMemoryBuffer(std::unique_ptr<MemoryBuffer> MB);
+
+ /// Set the partial sample profile ratio in the profile summary module flag,
+ /// if applicable.
+ void setPartialSampleProfileRatio(const ModuleSummaryIndex &Index);
};
/// Given "llvm.used" or "llvm.compiler.used" as a global name, collect
diff --git a/llvm/include/llvm/IR/ModuleSummaryIndex.h b/llvm/include/llvm/IR/ModuleSummaryIndex.h
index aa4054c8409e..12a829b14e36 100644
--- a/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Allocator.h"
@@ -552,6 +553,41 @@ public:
unsigned AlwaysInline : 1;
};
+ /// Describes the uses of a parameter by the function.
+ struct ParamAccess {
+ static constexpr uint32_t RangeWidth = 64;
+
+ /// Describes the use of a value in a call instruction, specifying the
+ /// call's target, the value's parameter number, and the possible range of
+ /// offsets from the beginning of the value that are passed.
+ struct Call {
+ uint64_t ParamNo = 0;
+ GlobalValue::GUID Callee = 0;
+ ConstantRange Offsets{/*BitWidth=*/RangeWidth, /*isFullSet=*/true};
+
+ Call() = default;
+ Call(uint64_t ParamNo, GlobalValue::GUID Callee,
+ const ConstantRange &Offsets)
+ : ParamNo(ParamNo), Callee(Callee), Offsets(Offsets) {}
+ };
+
+ uint64_t ParamNo = 0;
+ /// The range contains byte offsets from the parameter pointer which
+ /// accessed by the function. In the per-module summary, it only includes
+ /// accesses made by the function instructions. In the combined summary, it
+ /// also includes accesses by nested function calls.
+ ConstantRange Use{/*BitWidth=*/RangeWidth, /*isFullSet=*/true};
+ /// In the per-module summary, it summarizes the byte offset applied to each
+ /// pointer parameter before passing to each corresponding callee.
+ /// In the combined summary, it's empty and information is propagated by
+ /// inter-procedural analysis and applied to the Use field.
+ std::vector<Call> Calls;
+
+ ParamAccess() = default;
+ ParamAccess(uint64_t ParamNo, const ConstantRange &Use)
+ : ParamNo(ParamNo), Use(Use) {}
+ };
+
/// Create an empty FunctionSummary (with specified call edges).
/// Used to represent external nodes and the dummy root node.
static FunctionSummary
@@ -567,7 +603,8 @@ public:
std::vector<FunctionSummary::VFuncId>(),
std::vector<FunctionSummary::VFuncId>(),
std::vector<FunctionSummary::ConstVCall>(),
- std::vector<FunctionSummary::ConstVCall>());
+ std::vector<FunctionSummary::ConstVCall>(),
+ std::vector<FunctionSummary::ParamAccess>());
}
/// A dummy node to reference external functions that aren't in the index
@@ -591,6 +628,10 @@ private:
std::unique_ptr<TypeIdInfo> TIdInfo;
+ /// Uses for every parameter to this function.
+ using ParamAccessesTy = std::vector<ParamAccess>;
+ std::unique_ptr<ParamAccessesTy> ParamAccesses;
+
public:
FunctionSummary(GVFlags Flags, unsigned NumInsts, FFlags FunFlags,
uint64_t EntryCount, std::vector<ValueInfo> Refs,
@@ -599,18 +640,21 @@ public:
std::vector<VFuncId> TypeTestAssumeVCalls,
std::vector<VFuncId> TypeCheckedLoadVCalls,
std::vector<ConstVCall> TypeTestAssumeConstVCalls,
- std::vector<ConstVCall> TypeCheckedLoadConstVCalls)
+ std::vector<ConstVCall> TypeCheckedLoadConstVCalls,
+ std::vector<ParamAccess> Params)
: GlobalValueSummary(FunctionKind, Flags, std::move(Refs)),
InstCount(NumInsts), FunFlags(FunFlags), EntryCount(EntryCount),
CallGraphEdgeList(std::move(CGEdges)) {
if (!TypeTests.empty() || !TypeTestAssumeVCalls.empty() ||
!TypeCheckedLoadVCalls.empty() || !TypeTestAssumeConstVCalls.empty() ||
!TypeCheckedLoadConstVCalls.empty())
- TIdInfo = std::make_unique<TypeIdInfo>(TypeIdInfo{
- std::move(TypeTests), std::move(TypeTestAssumeVCalls),
- std::move(TypeCheckedLoadVCalls),
- std::move(TypeTestAssumeConstVCalls),
- std::move(TypeCheckedLoadConstVCalls)});
+ TIdInfo = std::make_unique<TypeIdInfo>(
+ TypeIdInfo{std::move(TypeTests), std::move(TypeTestAssumeVCalls),
+ std::move(TypeCheckedLoadVCalls),
+ std::move(TypeTestAssumeConstVCalls),
+ std::move(TypeCheckedLoadConstVCalls)});
+ if (!Params.empty())
+ ParamAccesses = std::make_unique<ParamAccessesTy>(std::move(Params));
}
// Gets the number of readonly and writeonly refs in RefEdgeList
std::pair<unsigned, unsigned> specialRefCounts() const;
@@ -681,6 +725,23 @@ public:
return {};
}
+ /// Returns the list of known uses of pointer parameters.
+ ArrayRef<ParamAccess> paramAccesses() const {
+ if (ParamAccesses)
+ return *ParamAccesses;
+ return {};
+ }
+
+ /// Sets the list of known uses of pointer parameters.
+ void setParamAccesses(std::vector<ParamAccess> NewParams) {
+ if (NewParams.empty())
+ ParamAccesses.reset();
+ else if (ParamAccesses)
+ *ParamAccesses = std::move(NewParams);
+ else
+ ParamAccesses = std::make_unique<ParamAccessesTy>(std::move(NewParams));
+ }
+
/// Add a type test to the summary. This is used by WholeProgramDevirt if we
/// were unable to devirtualize a checked call.
void addTypeTest(GlobalValue::GUID Guid) {
@@ -757,14 +818,33 @@ private:
public:
struct GVarFlags {
- GVarFlags(bool ReadOnly, bool WriteOnly)
- : MaybeReadOnly(ReadOnly), MaybeWriteOnly(WriteOnly) {}
-
- // In permodule summaries both MaybeReadOnly and MaybeWriteOnly
- // bits are set, because attribute propagation occurs later on
- // thin link phase.
+ GVarFlags(bool ReadOnly, bool WriteOnly, bool Constant,
+ GlobalObject::VCallVisibility Vis)
+ : MaybeReadOnly(ReadOnly), MaybeWriteOnly(WriteOnly),
+ Constant(Constant), VCallVisibility(Vis) {}
+
+ // If true indicates that this global variable might be accessed
+ // purely by non-volatile load instructions. This in turn means
+ // it can be internalized in source and destination modules during
+ // thin LTO import because it neither modified nor its address
+ // is taken.
unsigned MaybeReadOnly : 1;
+ // If true indicates that variable is possibly only written to, so
+ // its value isn't loaded and its address isn't taken anywhere.
+ // False, when 'Constant' attribute is set.
unsigned MaybeWriteOnly : 1;
+ // Indicates that value is a compile-time constant. Global variable
+ // can be 'Constant' while not being 'ReadOnly' on several occasions:
+ // - it is volatile, (e.g mapped device address)
+ // - its address is taken, meaning that unlike 'ReadOnly' vars we can't
+ // internalize it.
+ // Constant variables are always imported thus giving compiler an
+ // opportunity to make some extra optimizations. Readonly constants
+ // are also internalized.
+ unsigned Constant : 1;
+ // Set from metadata on vtable definitions during the module summary
+ // analysis.
+ unsigned VCallVisibility : 2;
} VarFlags;
GlobalVarSummary(GVFlags Flags, GVarFlags VarFlags,
@@ -782,6 +862,13 @@ public:
void setWriteOnly(bool WO) { VarFlags.MaybeWriteOnly = WO; }
bool maybeReadOnly() const { return VarFlags.MaybeReadOnly; }
bool maybeWriteOnly() const { return VarFlags.MaybeWriteOnly; }
+ bool isConstant() const { return VarFlags.Constant; }
+ void setVCallVisibility(GlobalObject::VCallVisibility Vis) {
+ VarFlags.VCallVisibility = Vis;
+ }
+ GlobalObject::VCallVisibility getVCallVisibility() const {
+ return (GlobalObject::VCallVisibility)VarFlags.VCallVisibility;
+ }
void setVTableFuncs(VTableFuncList Funcs) {
assert(!VTableFuncs);
@@ -807,7 +894,8 @@ struct TypeTestResolution {
Single, ///< Single element (last example in "Short Inline Bit Vectors")
AllOnes, ///< All-ones bit vector ("Eliminating Bit Vector Checks for
/// All-Ones Bit Vectors")
- } TheKind = Unsat;
+ Unknown, ///< Unknown (analysis not performed, don't lower)
+ } TheKind = Unknown;
/// Range of size-1 expressed as a bit width. For example, if the size is in
/// range [1,256], this number will be 8. This helps generate the most compact
@@ -933,7 +1021,8 @@ private:
/// with that type identifier's metadata. Produced by per module summary
/// analysis and consumed by thin link. For more information, see description
/// above where TypeIdCompatibleVtableInfo is defined.
- std::map<std::string, TypeIdCompatibleVtableInfo> TypeIdCompatibleVtableMap;
+ std::map<std::string, TypeIdCompatibleVtableInfo, std::less<>>
+ TypeIdCompatibleVtableMap;
/// Mapping from original ID to GUID. If original ID can map to multiple
/// GUIDs, it will be mapped to 0.
@@ -980,6 +1069,10 @@ private:
StringSaver Saver;
BumpPtrAllocator Alloc;
+ // The total number of basic blocks in the module in the per-module summary or
+ // the total number of basic blocks in the LTO unit in the combined index.
+ uint64_t BlockCount;
+
// YAML I/O support.
friend yaml::MappingTraits<ModuleSummaryIndex>;
@@ -992,18 +1085,30 @@ private:
public:
// See HaveGVs variable comment.
ModuleSummaryIndex(bool HaveGVs, bool EnableSplitLTOUnit = false)
- : HaveGVs(HaveGVs), EnableSplitLTOUnit(EnableSplitLTOUnit), Saver(Alloc) {
- }
+ : HaveGVs(HaveGVs), EnableSplitLTOUnit(EnableSplitLTOUnit), Saver(Alloc),
+ BlockCount(0) {}
// Current version for the module summary in bitcode files.
// The BitcodeSummaryVersion should be bumped whenever we introduce changes
// in the way some record are interpreted, like flags for instance.
// Note that incrementing this may require changes in both BitcodeReader.cpp
// and BitcodeWriter.cpp.
- static constexpr uint64_t BitcodeSummaryVersion = 8;
+ static constexpr uint64_t BitcodeSummaryVersion = 9;
+
+ // Regular LTO module name for ASM writer
+ static constexpr const char *getRegularLTOModuleName() {
+ return "[Regular LTO]";
+ }
bool haveGVs() const { return HaveGVs; }
+ uint64_t getFlags() const;
+ void setFlags(uint64_t Flags);
+
+ uint64_t getBlockCount() const { return BlockCount; }
+ void addBlockCount(uint64_t C) { BlockCount += C; }
+ void setBlockCount(uint64_t C) { BlockCount = C; }
+
gvsummary_iterator begin() { return GlobalValueMap.begin(); }
const_gvsummary_iterator begin() const { return GlobalValueMap.begin(); }
gvsummary_iterator end() { return GlobalValueMap.end(); }
@@ -1264,7 +1369,7 @@ public:
NewName += ".llvm.";
NewName += utostr((uint64_t(ModHash[0]) << 32) |
ModHash[1]); // Take the first 64 bits
- return NewName.str();
+ return std::string(NewName.str());
}
/// Helper to obtain the unpromoted name for a global value (or the original
@@ -1310,7 +1415,7 @@ public:
if (It->second.first == TypeId)
return It->second.second;
auto It = TypeIdMap.insert(
- {GlobalValue::getGUID(TypeId), {TypeId, TypeIdSummary()}});
+ {GlobalValue::getGUID(TypeId), {std::string(TypeId), TypeIdSummary()}});
return It->second.second;
}
@@ -1330,8 +1435,7 @@ public:
TypeId));
}
- const std::map<std::string, TypeIdCompatibleVtableInfo> &
- typeIdCompatibleVtableMap() const {
+ const auto &typeIdCompatibleVtableMap() const {
return TypeIdCompatibleVtableMap;
}
@@ -1340,7 +1444,7 @@ public:
/// the ThinLTO backends.
TypeIdCompatibleVtableInfo &
getOrInsertTypeIdCompatibleVtableSummary(StringRef TypeId) {
- return TypeIdCompatibleVtableMap[TypeId];
+ return TypeIdCompatibleVtableMap[std::string(TypeId)];
}
/// For the given \p TypeId, this returns the TypeIdCompatibleVtableMap
diff --git a/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h b/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
index 4d4a67c75172..f7fa16df1100 100644
--- a/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
+++ b/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
@@ -17,6 +17,7 @@ namespace yaml {
template <> struct ScalarEnumerationTraits<TypeTestResolution::Kind> {
static void enumeration(IO &io, TypeTestResolution::Kind &value) {
+ io.enumCase(value, "Unknown", TypeTestResolution::Unknown);
io.enumCase(value, "Unsat", TypeTestResolution::Unsat);
io.enumCase(value, "ByteArray", TypeTestResolution::ByteArray);
io.enumCase(value, "Inline", TypeTestResolution::Inline);
@@ -223,13 +224,15 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
Elem.SummaryList.push_back(std::make_unique<FunctionSummary>(
GlobalValueSummary::GVFlags(
static_cast<GlobalValue::LinkageTypes>(FSum.Linkage),
- FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal, FSum.CanAutoHide),
+ FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal,
+ FSum.CanAutoHide),
/*NumInsts=*/0, FunctionSummary::FFlags{}, /*EntryCount=*/0, Refs,
ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
std::move(FSum.TypeTestAssumeVCalls),
std::move(FSum.TypeCheckedLoadVCalls),
std::move(FSum.TypeTestAssumeConstVCalls),
- std::move(FSum.TypeCheckedLoadConstVCalls)));
+ std::move(FSum.TypeCheckedLoadConstVCalls),
+ ArrayRef<FunctionSummary::ParamAccess>{}));
}
}
static void output(IO &io, GlobalValueSummaryMapTy &V) {
@@ -262,7 +265,7 @@ template <> struct CustomMappingTraits<TypeIdSummaryMapTy> {
static void inputOne(IO &io, StringRef Key, TypeIdSummaryMapTy &V) {
TypeIdSummary TId;
io.mapRequired(Key.str().c_str(), TId);
- V.insert({GlobalValue::getGUID(Key), {Key, TId}});
+ V.insert({GlobalValue::getGUID(Key), {std::string(Key), TId}});
}
static void output(IO &io, TypeIdSummaryMapTy &V) {
for (auto TidIter = V.begin(); TidIter != V.end(); TidIter++)
diff --git a/llvm/include/llvm/IR/NoFolder.h b/llvm/include/llvm/IR/NoFolder.h
index 835236b1eac0..dcffa6b2f9da 100644
--- a/llvm/include/llvm/IR/NoFolder.h
+++ b/llvm/include/llvm/IR/NoFolder.h
@@ -26,11 +26,14 @@
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IRBuilderFolder.h"
namespace llvm {
/// NoFolder - Create "constants" (actually, instructions) with no folding.
-class NoFolder {
+class NoFolder final : public IRBuilderFolder {
+ virtual void anchor();
+
public:
explicit NoFolder() = default;
@@ -39,105 +42,76 @@ public:
//===--------------------------------------------------------------------===//
Instruction *CreateAdd(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false,
+ bool HasNSW = false) const override {
BinaryOperator *BO = BinaryOperator::CreateAdd(LHS, RHS);
if (HasNUW) BO->setHasNoUnsignedWrap();
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
- Instruction *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateNSWAdd(LHS, RHS);
- }
-
- Instruction *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateNUWAdd(LHS, RHS);
- }
-
- Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateFAdd(LHS, RHS);
}
Instruction *CreateSub(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false,
+ bool HasNSW = false) const override {
BinaryOperator *BO = BinaryOperator::CreateSub(LHS, RHS);
if (HasNUW) BO->setHasNoUnsignedWrap();
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
- Instruction *CreateNSWSub(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateNSWSub(LHS, RHS);
- }
-
- Instruction *CreateNUWSub(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateNUWSub(LHS, RHS);
- }
-
- Instruction *CreateFSub(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFSub(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateFSub(LHS, RHS);
}
Instruction *CreateMul(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false,
+ bool HasNSW = false) const override {
BinaryOperator *BO = BinaryOperator::CreateMul(LHS, RHS);
if (HasNUW) BO->setHasNoUnsignedWrap();
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
- Instruction *CreateNSWMul(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateNSWMul(LHS, RHS);
- }
-
- Instruction *CreateNUWMul(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateNUWMul(LHS, RHS);
- }
-
- Instruction *CreateFMul(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFMul(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateFMul(LHS, RHS);
}
Instruction *CreateUDiv(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
if (!isExact)
return BinaryOperator::CreateUDiv(LHS, RHS);
return BinaryOperator::CreateExactUDiv(LHS, RHS);
}
- Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateExactUDiv(LHS, RHS);
- }
-
Instruction *CreateSDiv(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
if (!isExact)
return BinaryOperator::CreateSDiv(LHS, RHS);
return BinaryOperator::CreateExactSDiv(LHS, RHS);
}
- Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
- return BinaryOperator::CreateExactSDiv(LHS, RHS);
- }
-
- Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateFDiv(LHS, RHS);
}
- Instruction *CreateURem(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateURem(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateURem(LHS, RHS);
}
- Instruction *CreateSRem(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateSRem(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateSRem(LHS, RHS);
}
- Instruction *CreateFRem(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFRem(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateFRem(LHS, RHS);
}
Instruction *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false,
- bool HasNSW = false) const {
+ bool HasNSW = false) const override {
BinaryOperator *BO = BinaryOperator::CreateShl(LHS, RHS);
if (HasNUW) BO->setHasNoUnsignedWrap();
if (HasNSW) BO->setHasNoSignedWrap();
@@ -145,33 +119,33 @@ public:
}
Instruction *CreateLShr(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
if (!isExact)
return BinaryOperator::CreateLShr(LHS, RHS);
return BinaryOperator::CreateExactLShr(LHS, RHS);
}
Instruction *CreateAShr(Constant *LHS, Constant *RHS,
- bool isExact = false) const {
+ bool isExact = false) const override {
if (!isExact)
return BinaryOperator::CreateAShr(LHS, RHS);
return BinaryOperator::CreateExactAShr(LHS, RHS);
}
- Instruction *CreateAnd(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateAnd(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateAnd(LHS, RHS);
}
- Instruction *CreateOr(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateOr(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateOr(LHS, RHS);
}
- Instruction *CreateXor(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateXor(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateXor(LHS, RHS);
}
Instruction *CreateBinOp(Instruction::BinaryOps Opc,
- Constant *LHS, Constant *RHS) const {
+ Constant *LHS, Constant *RHS) const override {
return BinaryOperator::Create(Opc, LHS, RHS);
}
@@ -180,30 +154,24 @@ public:
//===--------------------------------------------------------------------===//
Instruction *CreateNeg(Constant *C,
- bool HasNUW = false, bool HasNSW = false) const {
+ bool HasNUW = false,
+ bool HasNSW = false) const override {
BinaryOperator *BO = BinaryOperator::CreateNeg(C);
if (HasNUW) BO->setHasNoUnsignedWrap();
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
- Instruction *CreateNSWNeg(Constant *C) const {
- return BinaryOperator::CreateNSWNeg(C);
- }
-
- Instruction *CreateNUWNeg(Constant *C) const {
- return BinaryOperator::CreateNUWNeg(C);
- }
-
- Instruction *CreateFNeg(Constant *C) const {
+ Instruction *CreateFNeg(Constant *C) const override {
return UnaryOperator::CreateFNeg(C);
}
- Instruction *CreateNot(Constant *C) const {
+ Instruction *CreateNot(Constant *C) const override {
return BinaryOperator::CreateNot(C);
}
- Instruction *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const {
+ Instruction *CreateUnOp(Instruction::UnaryOps Opc,
+ Constant *C) const override {
return UnaryOperator::Create(Opc, C);
}
@@ -212,11 +180,12 @@ public:
//===--------------------------------------------------------------------===//
Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const {
+ ArrayRef<Constant *> IdxList) const override {
return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
}
- Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) const override {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
@@ -224,25 +193,25 @@ public:
}
Instruction *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const {
+ ArrayRef<Value *> IdxList) const override {
return GetElementPtrInst::Create(Ty, C, IdxList);
}
- Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const {
+ Constant *CreateInBoundsGetElementPtr(
+ Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const override {
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
}
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const {
+ Constant *Idx) const override {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
}
- Instruction *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const {
+ Instruction *CreateInBoundsGetElementPtr(
+ Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const override {
return GetElementPtrInst::CreateInBounds(Ty, C, IdxList);
}
@@ -251,44 +220,49 @@ public:
//===--------------------------------------------------------------------===//
Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
- Type *DestTy) const {
+ Type *DestTy) const override {
return CastInst::Create(Op, C, DestTy);
}
- Instruction *CreatePointerCast(Constant *C, Type *DestTy) const {
+ Instruction *CreatePointerCast(Constant *C, Type *DestTy) const override {
return CastInst::CreatePointerCast(C, DestTy);
}
+ Instruction *CreatePointerBitCastOrAddrSpaceCast(
+ Constant *C, Type *DestTy) const override {
+ return CastInst::CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
+ }
+
Instruction *CreateIntCast(Constant *C, Type *DestTy,
- bool isSigned) const {
+ bool isSigned) const override {
return CastInst::CreateIntegerCast(C, DestTy, isSigned);
}
- Instruction *CreateFPCast(Constant *C, Type *DestTy) const {
+ Instruction *CreateFPCast(Constant *C, Type *DestTy) const override {
return CastInst::CreateFPCast(C, DestTy);
}
- Instruction *CreateBitCast(Constant *C, Type *DestTy) const {
+ Instruction *CreateBitCast(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::BitCast, C, DestTy);
}
- Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const {
+ Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
- Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const {
+ Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
- Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+ Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
return CastInst::CreateZExtOrBitCast(C, DestTy);
}
- Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+ Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
return CastInst::CreateSExtOrBitCast(C, DestTy);
}
- Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+ Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
return CastInst::CreateTruncOrBitCast(C, DestTy);
}
@@ -297,12 +271,12 @@ public:
//===--------------------------------------------------------------------===//
Instruction *CreateICmp(CmpInst::Predicate P,
- Constant *LHS, Constant *RHS) const {
+ Constant *LHS, Constant *RHS) const override {
return new ICmpInst(P, LHS, RHS);
}
Instruction *CreateFCmp(CmpInst::Predicate P,
- Constant *LHS, Constant *RHS) const {
+ Constant *LHS, Constant *RHS) const override {
return new FCmpInst(P, LHS, RHS);
}
@@ -311,31 +285,32 @@ public:
//===--------------------------------------------------------------------===//
Instruction *CreateSelect(Constant *C,
- Constant *True, Constant *False) const {
+ Constant *True, Constant *False) const override {
return SelectInst::Create(C, True, False);
}
- Instruction *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ Instruction *CreateExtractElement(Constant *Vec,
+ Constant *Idx) const override {
return ExtractElementInst::Create(Vec, Idx);
}
Instruction *CreateInsertElement(Constant *Vec, Constant *NewElt,
- Constant *Idx) const {
+ Constant *Idx) const override {
return InsertElementInst::Create(Vec, NewElt, Idx);
}
Instruction *CreateShuffleVector(Constant *V1, Constant *V2,
- Constant *Mask) const {
+ ArrayRef<int> Mask) const override {
return new ShuffleVectorInst(V1, V2, Mask);
}
Instruction *CreateExtractValue(Constant *Agg,
- ArrayRef<unsigned> IdxList) const {
+ ArrayRef<unsigned> IdxList) const override {
return ExtractValueInst::Create(Agg, IdxList);
}
Instruction *CreateInsertValue(Constant *Agg, Constant *Val,
- ArrayRef<unsigned> IdxList) const {
+ ArrayRef<unsigned> IdxList) const override {
return InsertValueInst::Create(Agg, Val, IdxList);
}
};
diff --git a/llvm/include/llvm/IR/Operator.h b/llvm/include/llvm/IR/Operator.h
index 35e08d9215e2..acfacbd6c74e 100644
--- a/llvm/include/llvm/IR/Operator.h
+++ b/llvm/include/llvm/IR/Operator.h
@@ -545,15 +545,29 @@ public:
});
}
+ /// Compute the maximum alignment that this GEP is garranteed to preserve.
+ Align getMaxPreservedAlignment(const DataLayout &DL) const;
+
/// Accumulate the constant address offset of this GEP if possible.
///
- /// This routine accepts an APInt into which it will accumulate the constant
- /// offset of this GEP if the GEP is in fact constant. If the GEP is not
- /// all-constant, it returns false and the value of the offset APInt is
- /// undefined (it is *not* preserved!). The APInt passed into this routine
- /// must be at exactly as wide as the IntPtr type for the address space of the
- /// base GEP pointer.
- bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
+ /// This routine accepts an APInt into which it will try to accumulate the
+ /// constant offset of this GEP.
+ ///
+ /// If \p ExternalAnalysis is provided it will be used to calculate a offset
+ /// when a operand of GEP is not constant.
+ /// For example, for a value \p ExternalAnalysis might try to calculate a
+ /// lower bound. If \p ExternalAnalysis is successful, it should return true.
+ ///
+ /// If the \p ExternalAnalysis returns false or the value returned by \p
+ /// ExternalAnalysis results in a overflow/underflow, this routine returns
+ /// false and the value of the offset APInt is undefined (it is *not*
+ /// preserved!).
+ ///
+ /// The APInt passed into this routine must be at exactly as wide as the
+ /// IntPtr type for the address space of the base GEP pointer.
+ bool accumulateConstantOffset(
+ const DataLayout &DL, APInt &Offset,
+ function_ref<bool(Value &, APInt &)> ExternalAnalysis = nullptr) const;
};
class PtrToIntOperator
@@ -599,6 +613,25 @@ public:
}
};
+class AddrSpaceCastOperator
+ : public ConcreteOperator<Operator, Instruction::AddrSpaceCast> {
+ friend class AddrSpaceCastInst;
+ friend class ConstantExpr;
+
+public:
+ Value *getPointerOperand() { return getOperand(0); }
+
+ const Value *getPointerOperand() const { return getOperand(0); }
+
+ unsigned getSrcAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
+ unsigned getDestAddressSpace() const {
+ return getType()->getPointerAddressSpace();
+ }
+};
+
} // end namespace llvm
#endif // LLVM_IR_OPERATOR_H
diff --git a/llvm/include/llvm/IR/PassInstrumentation.h b/llvm/include/llvm/IR/PassInstrumentation.h
index f8a1196871cf..bcc434548e67 100644
--- a/llvm/include/llvm/IR/PassInstrumentation.h
+++ b/llvm/include/llvm/IR/PassInstrumentation.h
@@ -56,12 +56,12 @@
#include "llvm/ADT/Any.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/TypeName.h"
#include <type_traits>
namespace llvm {
class PreservedAnalyses;
+class StringRef;
/// This class manages callbacks registration, as well as provides a way for
/// PassInstrumentation to pass control to the registered callbacks.
diff --git a/llvm/include/llvm/IR/PassManager.h b/llvm/include/llvm/IR/PassManager.h
index 58591ab380cc..4d5f292ba9a1 100644
--- a/llvm/include/llvm/IR/PassManager.h
+++ b/llvm/include/llvm/IR/PassManager.h
@@ -47,8 +47,8 @@
#include "llvm/IR/PassManagerInternal.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/TypeName.h"
-#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstring>
@@ -503,9 +503,6 @@ public:
for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
auto *P = Passes[Idx].get();
- if (DebugLogging)
- dbgs() << "Running pass: " << P->name() << " on " << IR.getName()
- << "\n";
// Check the PassInstrumentation's BeforePass callbacks before running the
// pass, skip its execution completely if asked to (callback returns
@@ -513,7 +510,15 @@ public:
if (!PI.runBeforePass<IRUnitT>(*P, IR))
continue;
- PreservedAnalyses PassPA = P->run(IR, AM, ExtraArgs...);
+ if (DebugLogging)
+ dbgs() << "Running pass: " << P->name() << " on " << IR.getName()
+ << "\n";
+
+ PreservedAnalyses PassPA;
+ {
+ TimeTraceScope TimeScope(P->name(), IR.getName());
+ PassPA = P->run(IR, AM, ExtraArgs...);
+ }
// Call onto PassInstrumentation's AfterPass callbacks immediately after
// running the pass.
@@ -727,9 +732,9 @@ public:
/// Construct an empty analysis manager.
///
/// If \p DebugLogging is true, we'll log our progress to llvm::dbgs().
- AnalysisManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
- AnalysisManager(AnalysisManager &&) = default;
- AnalysisManager &operator=(AnalysisManager &&) = default;
+ AnalysisManager(bool DebugLogging = false);
+ AnalysisManager(AnalysisManager &&);
+ AnalysisManager &operator=(AnalysisManager &&);
/// Returns true if the analysis manager has an empty results cache.
bool empty() const {
@@ -744,20 +749,7 @@ public:
/// This doesn't invalidate, but instead simply deletes, the relevant results.
/// It is useful when the IR is being removed and we want to clear out all the
/// memory pinned for it.
- void clear(IRUnitT &IR, llvm::StringRef Name) {
- if (DebugLogging)
- dbgs() << "Clearing all analysis results for: " << Name << "\n";
-
- auto ResultsListI = AnalysisResultLists.find(&IR);
- if (ResultsListI == AnalysisResultLists.end())
- return;
- // Delete the map entries that point into the results list.
- for (auto &IDAndResult : ResultsListI->second)
- AnalysisResults.erase({IDAndResult.first, &IR});
-
- // And actually destroy and erase the results associated with this IR.
- AnalysisResultLists.erase(ResultsListI);
- }
+ void clear(IRUnitT &IR, llvm::StringRef Name);
/// Clear all analysis results cached by this AnalysisManager.
///
@@ -808,6 +800,16 @@ public:
return &static_cast<ResultModelT *>(ResultConcept)->Result;
}
+ /// Verify that the given Result cannot be invalidated, assert otherwise.
+ template <typename PassT>
+ void verifyNotInvalidated(IRUnitT &IR, typename PassT::Result *Result) const {
+ PreservedAnalyses PA = PreservedAnalyses::none();
+ SmallDenseMap<AnalysisKey *, bool, 8> IsResultInvalidated;
+ Invalidator Inv(IsResultInvalidated, AnalysisResults);
+ assert(!Result->invalidate(IR, PA, Inv) &&
+ "Cached result cannot be invalidated");
+ }
+
/// Register an analysis pass with the manager.
///
/// The parameter is a callable whose result is an analysis pass. This allows
@@ -856,67 +858,7 @@ public:
///
/// Walk through all of the analyses pertaining to this unit of IR and
/// invalidate them, unless they are preserved by the PreservedAnalyses set.
- void invalidate(IRUnitT &IR, const PreservedAnalyses &PA) {
- // We're done if all analyses on this IR unit are preserved.
- if (PA.allAnalysesInSetPreserved<AllAnalysesOn<IRUnitT>>())
- return;
-
- if (DebugLogging)
- dbgs() << "Invalidating all non-preserved analyses for: " << IR.getName()
- << "\n";
-
- // Track whether each analysis's result is invalidated in
- // IsResultInvalidated.
- SmallDenseMap<AnalysisKey *, bool, 8> IsResultInvalidated;
- Invalidator Inv(IsResultInvalidated, AnalysisResults);
- AnalysisResultListT &ResultsList = AnalysisResultLists[&IR];
- for (auto &AnalysisResultPair : ResultsList) {
- // This is basically the same thing as Invalidator::invalidate, but we
- // can't call it here because we're operating on the type-erased result.
- // Moreover if we instead called invalidate() directly, it would do an
- // unnecessary look up in ResultsList.
- AnalysisKey *ID = AnalysisResultPair.first;
- auto &Result = *AnalysisResultPair.second;
-
- auto IMapI = IsResultInvalidated.find(ID);
- if (IMapI != IsResultInvalidated.end())
- // This result was already handled via the Invalidator.
- continue;
-
- // Try to invalidate the result, giving it the Invalidator so it can
- // recursively query for any dependencies it has and record the result.
- // Note that we cannot reuse 'IMapI' here or pre-insert the ID, as
- // Result.invalidate may insert things into the map, invalidating our
- // iterator.
- bool Inserted =
- IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, Inv)})
- .second;
- (void)Inserted;
- assert(Inserted && "Should never have already inserted this ID, likely "
- "indicates a cycle!");
- }
-
- // Now erase the results that were marked above as invalidated.
- if (!IsResultInvalidated.empty()) {
- for (auto I = ResultsList.begin(), E = ResultsList.end(); I != E;) {
- AnalysisKey *ID = I->first;
- if (!IsResultInvalidated.lookup(ID)) {
- ++I;
- continue;
- }
-
- if (DebugLogging)
- dbgs() << "Invalidating analysis: " << this->lookUpPass(ID).name()
- << " on " << IR.getName() << "\n";
-
- I = ResultsList.erase(I);
- AnalysisResults.erase({ID, &IR});
- }
- }
-
- if (ResultsList.empty())
- AnalysisResultLists.erase(&IR);
- }
+ void invalidate(IRUnitT &IR, const PreservedAnalyses &PA);
private:
/// Look up a registered analysis pass.
@@ -937,41 +879,7 @@ private:
/// Get an analysis result, running the pass if necessary.
ResultConceptT &getResultImpl(AnalysisKey *ID, IRUnitT &IR,
- ExtraArgTs... ExtraArgs) {
- typename AnalysisResultMapT::iterator RI;
- bool Inserted;
- std::tie(RI, Inserted) = AnalysisResults.insert(std::make_pair(
- std::make_pair(ID, &IR), typename AnalysisResultListT::iterator()));
-
- // If we don't have a cached result for this function, look up the pass and
- // run it to produce a result, which we then add to the cache.
- if (Inserted) {
- auto &P = this->lookUpPass(ID);
- if (DebugLogging)
- dbgs() << "Running analysis: " << P.name() << " on " << IR.getName()
- << "\n";
-
- PassInstrumentation PI;
- if (ID != PassInstrumentationAnalysis::ID()) {
- PI = getResult<PassInstrumentationAnalysis>(IR, ExtraArgs...);
- PI.runBeforeAnalysis(P, IR);
- }
-
- AnalysisResultListT &ResultList = AnalysisResultLists[&IR];
- ResultList.emplace_back(ID, P.run(IR, *this, ExtraArgs...));
-
- PI.runAfterAnalysis(P, IR);
-
- // P.run may have inserted elements into AnalysisResults and invalidated
- // RI.
- RI = AnalysisResults.find({ID, &IR});
- assert(RI != AnalysisResults.end() && "we just inserted it!");
-
- RI->second = std::prev(ResultList.end());
- }
-
- return *RI->second->second;
- }
+ ExtraArgTs... ExtraArgs);
/// Get a cached analysis result or return null.
ResultConceptT *getCachedResultImpl(AnalysisKey *ID, IRUnitT &IR) const {
@@ -1167,7 +1075,24 @@ public:
public:
explicit Result(const AnalysisManagerT &OuterAM) : OuterAM(&OuterAM) {}
- const AnalysisManagerT &getManager() const { return *OuterAM; }
+ /// Get a cached analysis. If the analysis can be invalidated, this will
+ /// assert.
+ template <typename PassT, typename IRUnitTParam>
+ typename PassT::Result *getCachedResult(IRUnitTParam &IR) const {
+ typename PassT::Result *Res =
+ OuterAM->template getCachedResult<PassT>(IR);
+ if (Res)
+ OuterAM->template verifyNotInvalidated<PassT>(IR, Res);
+ return Res;
+ }
+
+ /// Method provided for unit testing, not intended for general use.
+ template <typename PassT, typename IRUnitTParam>
+ bool cachedResultExists(IRUnitTParam &IR) const {
+ typename PassT::Result *Res =
+ OuterAM->template getCachedResult<PassT>(IR);
+ return Res != nullptr;
+ }
/// When invalidation occurs, remove any registered invalidation events.
bool invalidate(
@@ -1306,7 +1231,12 @@ public:
// false).
if (!PI.runBeforePass<Function>(Pass, F))
continue;
- PreservedAnalyses PassPA = Pass.run(F, FAM);
+
+ PreservedAnalyses PassPA;
+ {
+ TimeTraceScope TimeScope(Pass.name(), F.getName());
+ PassPA = Pass.run(F, FAM);
+ }
PI.runAfterPass(Pass, F);
diff --git a/llvm/include/llvm/IR/PassManagerImpl.h b/llvm/include/llvm/IR/PassManagerImpl.h
new file mode 100644
index 000000000000..978655ac69c4
--- /dev/null
+++ b/llvm/include/llvm/IR/PassManagerImpl.h
@@ -0,0 +1,157 @@
+//===- PassManagerImpl.h - Pass management infrastructure -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Provides implementations for PassManager and AnalysisManager template
+/// methods. These classes should be explicitly instantiated for any IR unit,
+/// and files doing the explicit instantiation should include this header.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PASSMANAGERIMPL_H
+#define LLVM_IR_PASSMANAGERIMPL_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+template <typename IRUnitT, typename... ExtraArgTs>
+inline AnalysisManager<IRUnitT, ExtraArgTs...>::AnalysisManager(
+ bool DebugLogging)
+ : DebugLogging(DebugLogging) {}
+
+template <typename IRUnitT, typename... ExtraArgTs>
+inline AnalysisManager<IRUnitT, ExtraArgTs...>::AnalysisManager(
+ AnalysisManager &&) = default;
+
+template <typename IRUnitT, typename... ExtraArgTs>
+inline AnalysisManager<IRUnitT, ExtraArgTs...> &
+AnalysisManager<IRUnitT, ExtraArgTs...>::operator=(AnalysisManager &&) =
+ default;
+
+template <typename IRUnitT, typename... ExtraArgTs>
+inline void
+AnalysisManager<IRUnitT, ExtraArgTs...>::clear(IRUnitT &IR,
+ llvm::StringRef Name) {
+ if (DebugLogging)
+ dbgs() << "Clearing all analysis results for: " << Name << "\n";
+
+ auto ResultsListI = AnalysisResultLists.find(&IR);
+ if (ResultsListI == AnalysisResultLists.end())
+ return;
+ // Delete the map entries that point into the results list.
+ for (auto &IDAndResult : ResultsListI->second)
+ AnalysisResults.erase({IDAndResult.first, &IR});
+
+ // And actually destroy and erase the results associated with this IR.
+ AnalysisResultLists.erase(ResultsListI);
+}
+
+template <typename IRUnitT, typename... ExtraArgTs>
+inline typename AnalysisManager<IRUnitT, ExtraArgTs...>::ResultConceptT &
+AnalysisManager<IRUnitT, ExtraArgTs...>::getResultImpl(
+ AnalysisKey *ID, IRUnitT &IR, ExtraArgTs... ExtraArgs) {
+ typename AnalysisResultMapT::iterator RI;
+ bool Inserted;
+ std::tie(RI, Inserted) = AnalysisResults.insert(std::make_pair(
+ std::make_pair(ID, &IR), typename AnalysisResultListT::iterator()));
+
+ // If we don't have a cached result for this function, look up the pass and
+ // run it to produce a result, which we then add to the cache.
+ if (Inserted) {
+ auto &P = this->lookUpPass(ID);
+ if (DebugLogging)
+ dbgs() << "Running analysis: " << P.name() << " on " << IR.getName()
+ << "\n";
+
+ PassInstrumentation PI;
+ if (ID != PassInstrumentationAnalysis::ID()) {
+ PI = getResult<PassInstrumentationAnalysis>(IR, ExtraArgs...);
+ PI.runBeforeAnalysis(P, IR);
+ }
+
+ AnalysisResultListT &ResultList = AnalysisResultLists[&IR];
+ ResultList.emplace_back(ID, P.run(IR, *this, ExtraArgs...));
+
+ PI.runAfterAnalysis(P, IR);
+
+ // P.run may have inserted elements into AnalysisResults and invalidated
+ // RI.
+ RI = AnalysisResults.find({ID, &IR});
+ assert(RI != AnalysisResults.end() && "we just inserted it!");
+
+ RI->second = std::prev(ResultList.end());
+ }
+
+ return *RI->second->second;
+}
+
+template <typename IRUnitT, typename... ExtraArgTs>
+inline void AnalysisManager<IRUnitT, ExtraArgTs...>::invalidate(
+ IRUnitT &IR, const PreservedAnalyses &PA) {
+ // We're done if all analyses on this IR unit are preserved.
+ if (PA.allAnalysesInSetPreserved<AllAnalysesOn<IRUnitT>>())
+ return;
+
+ if (DebugLogging)
+ dbgs() << "Invalidating all non-preserved analyses for: " << IR.getName()
+ << "\n";
+
+ // Track whether each analysis's result is invalidated in
+ // IsResultInvalidated.
+ SmallDenseMap<AnalysisKey *, bool, 8> IsResultInvalidated;
+ Invalidator Inv(IsResultInvalidated, AnalysisResults);
+ AnalysisResultListT &ResultsList = AnalysisResultLists[&IR];
+ for (auto &AnalysisResultPair : ResultsList) {
+ // This is basically the same thing as Invalidator::invalidate, but we
+ // can't call it here because we're operating on the type-erased result.
+ // Moreover if we instead called invalidate() directly, it would do an
+ // unnecessary look up in ResultsList.
+ AnalysisKey *ID = AnalysisResultPair.first;
+ auto &Result = *AnalysisResultPair.second;
+
+ auto IMapI = IsResultInvalidated.find(ID);
+ if (IMapI != IsResultInvalidated.end())
+ // This result was already handled via the Invalidator.
+ continue;
+
+ // Try to invalidate the result, giving it the Invalidator so it can
+ // recursively query for any dependencies it has and record the result.
+ // Note that we cannot reuse 'IMapI' here or pre-insert the ID, as
+ // Result.invalidate may insert things into the map, invalidating our
+ // iterator.
+ bool Inserted =
+ IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, Inv)}).second;
+ (void)Inserted;
+ assert(Inserted && "Should never have already inserted this ID, likely "
+ "indicates a cycle!");
+ }
+
+ // Now erase the results that were marked above as invalidated.
+ if (!IsResultInvalidated.empty()) {
+ for (auto I = ResultsList.begin(), E = ResultsList.end(); I != E;) {
+ AnalysisKey *ID = I->first;
+ if (!IsResultInvalidated.lookup(ID)) {
+ ++I;
+ continue;
+ }
+
+ if (DebugLogging)
+ dbgs() << "Invalidating analysis: " << this->lookUpPass(ID).name()
+ << " on " << IR.getName() << "\n";
+
+ I = ResultsList.erase(I);
+ AnalysisResults.erase({ID, &IR});
+ }
+ }
+
+ if (ResultsList.empty())
+ AnalysisResultLists.erase(&IR);
+}
+} // end namespace llvm
+
+#endif // LLVM_IR_PASSMANAGERIMPL_H
diff --git a/llvm/include/llvm/IR/PassTimingInfo.h b/llvm/include/llvm/IR/PassTimingInfo.h
index b8d8f117f73d..b70850fd64d7 100644
--- a/llvm/include/llvm/IR/PassTimingInfo.h
+++ b/llvm/include/llvm/IR/PassTimingInfo.h
@@ -55,11 +55,9 @@ class TimePassesHandler {
/// A group of all pass-timing timers.
TimerGroup TG;
+ using TimerVector = llvm::SmallVector<std::unique_ptr<Timer>, 4>;
/// Map of timers for pass invocations
- DenseMap<PassInvocationID, std::unique_ptr<Timer>> TimingData;
-
- /// Map that counts invocations of passes, for use in UniqPassID construction.
- StringMap<unsigned> PassIDCountMap;
+ StringMap<TimerVector> TimingData;
/// Stack of currently active timers.
SmallVector<Timer *, 8> TimerStack;
@@ -96,9 +94,6 @@ private:
/// Returns the new timer for each new run of the pass.
Timer &getPassTimer(StringRef PassID);
- /// Returns the incremented counter for the next invocation of \p PassID.
- unsigned nextPassID(StringRef PassID) { return ++PassIDCountMap[PassID]; }
-
void startTimer(StringRef PassID);
void stopTimer(StringRef PassID);
diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index 6621fc9f819c..4c11bc82510b 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -32,6 +32,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
@@ -49,6 +50,10 @@ template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
return const_cast<Pattern &>(P).match(V);
}
+template <typename Pattern> bool match(ArrayRef<int> Mask, const Pattern &P) {
+ return const_cast<Pattern &>(P).match(Mask);
+}
+
template <typename SubPattern_t> struct OneUse_match {
SubPattern_t SubPattern;
@@ -70,6 +75,11 @@ template <typename Class> struct class_match {
/// Match an arbitrary value and ignore it.
inline class_match<Value> m_Value() { return class_match<Value>(); }
+/// Match an arbitrary unary operation and ignore it.
+inline class_match<UnaryOperator> m_UnOp() {
+ return class_match<UnaryOperator>();
+}
+
/// Match an arbitrary binary operation and ignore it.
inline class_match<BinaryOperator> m_BinOp() {
return class_match<BinaryOperator>();
@@ -152,8 +162,10 @@ inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
struct apint_match {
const APInt *&Res;
+ bool AllowUndef;
- apint_match(const APInt *&R) : Res(R) {}
+ apint_match(const APInt *&Res, bool AllowUndef)
+ : Res(Res), AllowUndef(AllowUndef) {}
template <typename ITy> bool match(ITy *V) {
if (auto *CI = dyn_cast<ConstantInt>(V)) {
@@ -162,7 +174,8 @@ struct apint_match {
}
if (V->getType()->isVectorTy())
if (const auto *C = dyn_cast<Constant>(V))
- if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(
+ C->getSplatValue(AllowUndef))) {
Res = &CI->getValue();
return true;
}
@@ -174,7 +187,11 @@ struct apint_match {
// function for both apint/apfloat.
struct apfloat_match {
const APFloat *&Res;
- apfloat_match(const APFloat *&R) : Res(R) {}
+ bool AllowUndef;
+
+ apfloat_match(const APFloat *&Res, bool AllowUndef)
+ : Res(Res), AllowUndef(AllowUndef) {}
+
template <typename ITy> bool match(ITy *V) {
if (auto *CI = dyn_cast<ConstantFP>(V)) {
Res = &CI->getValueAPF();
@@ -182,7 +199,8 @@ struct apfloat_match {
}
if (V->getType()->isVectorTy())
if (const auto *C = dyn_cast<Constant>(V))
- if (auto *CI = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) {
+ if (auto *CI = dyn_cast_or_null<ConstantFP>(
+ C->getSplatValue(AllowUndef))) {
Res = &CI->getValueAPF();
return true;
}
@@ -192,11 +210,37 @@ struct apfloat_match {
/// Match a ConstantInt or splatted ConstantVector, binding the
/// specified pointer to the contained APInt.
-inline apint_match m_APInt(const APInt *&Res) { return Res; }
+inline apint_match m_APInt(const APInt *&Res) {
+ // Forbid undefs by default to maintain previous behavior.
+ return apint_match(Res, /* AllowUndef */ false);
+}
+
+/// Match APInt while allowing undefs in splat vector constants.
+inline apint_match m_APIntAllowUndef(const APInt *&Res) {
+ return apint_match(Res, /* AllowUndef */ true);
+}
+
+/// Match APInt while forbidding undefs in splat vector constants.
+inline apint_match m_APIntForbidUndef(const APInt *&Res) {
+ return apint_match(Res, /* AllowUndef */ false);
+}
/// Match a ConstantFP or splatted ConstantVector, binding the
/// specified pointer to the contained APFloat.
-inline apfloat_match m_APFloat(const APFloat *&Res) { return Res; }
+inline apfloat_match m_APFloat(const APFloat *&Res) {
+ // Forbid undefs by default to maintain previous behavior.
+ return apfloat_match(Res, /* AllowUndef */ false);
+}
+
+/// Match APFloat while allowing undefs in splat vector constants.
+inline apfloat_match m_APFloatAllowUndef(const APFloat *&Res) {
+ return apfloat_match(Res, /* AllowUndef */ true);
+}
+
+/// Match APFloat while forbidding undefs in splat vector constants.
+inline apfloat_match m_APFloatForbidUndef(const APFloat *&Res) {
+ return apfloat_match(Res, /* AllowUndef */ false);
+}
template <int64_t Val> struct constantint_match {
template <typename ITy> bool match(ITy *V) {
@@ -218,20 +262,26 @@ template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
return constantint_match<Val>();
}
-/// This helper class is used to match scalar and vector integer constants that
-/// satisfy a specified predicate.
-/// For vector constants, undefined elements are ignored.
-template <typename Predicate> struct cst_pred_ty : public Predicate {
+/// This helper class is used to match constant scalars, vector splats,
+/// and fixed width vectors that satisfy a specified predicate.
+/// For fixed width vector constants, undefined elements are ignored.
+template <typename Predicate, typename ConstantVal>
+struct cstval_pred_ty : public Predicate {
template <typename ITy> bool match(ITy *V) {
- if (const auto *CI = dyn_cast<ConstantInt>(V))
- return this->isValue(CI->getValue());
- if (V->getType()->isVectorTy()) {
+ if (const auto *CV = dyn_cast<ConstantVal>(V))
+ return this->isValue(CV->getValue());
+ if (const auto *VTy = dyn_cast<VectorType>(V->getType())) {
if (const auto *C = dyn_cast<Constant>(V)) {
- if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
- return this->isValue(CI->getValue());
+ if (const auto *CV = dyn_cast_or_null<ConstantVal>(C->getSplatValue()))
+ return this->isValue(CV->getValue());
+
+ // Number of elements of a scalable vector unknown at compile time
+ auto *FVTy = dyn_cast<FixedVectorType>(VTy);
+ if (!FVTy)
+ return false;
// Non-splat vector constant: check each element for a match.
- unsigned NumElts = V->getType()->getVectorNumElements();
+ unsigned NumElts = FVTy->getNumElements();
assert(NumElts != 0 && "Constant vector with no elements?");
bool HasNonUndefElements = false;
for (unsigned i = 0; i != NumElts; ++i) {
@@ -240,8 +290,8 @@ template <typename Predicate> struct cst_pred_ty : public Predicate {
return false;
if (isa<UndefValue>(Elt))
continue;
- auto *CI = dyn_cast<ConstantInt>(Elt);
- if (!CI || !this->isValue(CI->getValue()))
+ auto *CV = dyn_cast<ConstantVal>(Elt);
+ if (!CV || !this->isValue(CV->getValue()))
return false;
HasNonUndefElements = true;
}
@@ -252,6 +302,14 @@ template <typename Predicate> struct cst_pred_ty : public Predicate {
}
};
+/// specialization of cstval_pred_ty for ConstantInt
+template <typename Predicate>
+using cst_pred_ty = cstval_pred_ty<Predicate, ConstantInt>;
+
+/// specialization of cstval_pred_ty for ConstantFP
+template <typename Predicate>
+using cstfp_pred_ty = cstval_pred_ty<Predicate, ConstantFP>;
+
/// This helper class is used to match scalar and vector constants that
/// satisfy a specified predicate, and bind them to an APInt.
template <typename Predicate> struct api_pred_ty : public Predicate {
@@ -277,40 +335,6 @@ template <typename Predicate> struct api_pred_ty : public Predicate {
}
};
-/// This helper class is used to match scalar and vector floating-point
-/// constants that satisfy a specified predicate.
-/// For vector constants, undefined elements are ignored.
-template <typename Predicate> struct cstfp_pred_ty : public Predicate {
- template <typename ITy> bool match(ITy *V) {
- if (const auto *CF = dyn_cast<ConstantFP>(V))
- return this->isValue(CF->getValueAPF());
- if (V->getType()->isVectorTy()) {
- if (const auto *C = dyn_cast<Constant>(V)) {
- if (const auto *CF = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
- return this->isValue(CF->getValueAPF());
-
- // Non-splat vector constant: check each element for a match.
- unsigned NumElts = V->getType()->getVectorNumElements();
- assert(NumElts != 0 && "Constant vector with no elements?");
- bool HasNonUndefElements = false;
- for (unsigned i = 0; i != NumElts; ++i) {
- Constant *Elt = C->getAggregateElement(i);
- if (!Elt)
- return false;
- if (isa<UndefValue>(Elt))
- continue;
- auto *CF = dyn_cast<ConstantFP>(Elt);
- if (!CF || !this->isValue(CF->getValueAPF()))
- return false;
- HasNonUndefElements = true;
- }
- return HasNonUndefElements;
- }
- }
- return false;
- }
-};
-
///////////////////////////////////////////////////////////////////////////////
//
// Encapsulate constant value queries for use in templated predicate matchers.
@@ -418,6 +442,7 @@ inline cst_pred_ty<is_zero_int> m_ZeroInt() {
struct is_zero {
template <typename ITy> bool match(ITy *V) {
auto *C = dyn_cast<Constant>(V);
+ // FIXME: this should be able to do something for scalable vectors
return C && (C->isNullValue() || cst_pred_ty<is_zero_int>().match(C));
}
};
@@ -530,6 +555,15 @@ inline cstfp_pred_ty<is_nan> m_NaN() {
return cstfp_pred_ty<is_nan>();
}
+struct is_inf {
+ bool isValue(const APFloat &C) { return C.isInfinity(); }
+};
+/// Match a positive or negative infinity FP constant.
+/// For vectors, this includes constants with undefined elements.
+inline cstfp_pred_ty<is_inf> m_Inf() {
+ return cstfp_pred_ty<is_inf>();
+}
+
struct is_any_zero_fp {
bool isValue(const APFloat &C) { return C.isZero(); }
};
@@ -579,6 +613,8 @@ inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
/// Match an instruction, capturing it if we match.
inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
+/// Match a unary operator, capturing it if we match.
+inline bind_ty<UnaryOperator> m_UnOp(UnaryOperator *&I) { return I; }
/// Match a binary operator, capturing it if we match.
inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
/// Match a with overflow intrinsic, capturing it if we match.
@@ -751,6 +787,26 @@ inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
}
//===----------------------------------------------------------------------===//
+// Matcher for any unary operator.
+// TODO fuse unary, binary matcher into n-ary matcher
+//
+template <typename OP_t> struct AnyUnaryOp_match {
+ OP_t X;
+
+ AnyUnaryOp_match(const OP_t &X) : X(X) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *I = dyn_cast<UnaryOperator>(V))
+ return X.match(I->getOperand(0));
+ return false;
+ }
+};
+
+template <typename OP_t> inline AnyUnaryOp_match<OP_t> m_UnOp(const OP_t &X) {
+ return AnyUnaryOp_match<OP_t>(X);
+}
+
+//===----------------------------------------------------------------------===//
// Matchers for specific binary operators.
//
@@ -1155,13 +1211,16 @@ struct CmpClass_match {
: Predicate(Pred), L(LHS), R(RHS) {}
template <typename OpTy> bool match(OpTy *V) {
- if (auto *I = dyn_cast<Class>(V))
- if ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
- (Commutable && L.match(I->getOperand(1)) &&
- R.match(I->getOperand(0)))) {
+ if (auto *I = dyn_cast<Class>(V)) {
+ if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
Predicate = I->getPredicate();
return true;
+ } else if (Commutable && L.match(I->getOperand(1)) &&
+ R.match(I->getOperand(0))) {
+ Predicate = I->getSwappedPredicate();
+ return true;
}
+ }
return false;
}
};
@@ -1264,7 +1323,7 @@ inline OneOps_match<OpTy, Instruction::Freeze> m_Freeze(const OpTy &Op) {
/// Matches InsertElementInst.
template <typename Val_t, typename Elt_t, typename Idx_t>
inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>
-m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
+m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>(
Val, Elt, Idx);
}
@@ -1272,16 +1331,73 @@ m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
/// Matches ExtractElementInst.
template <typename Val_t, typename Idx_t>
inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>
-m_ExtractElement(const Val_t &Val, const Idx_t &Idx) {
+m_ExtractElt(const Val_t &Val, const Idx_t &Idx) {
return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx);
}
-/// Matches ShuffleVectorInst.
+/// Matches shuffle.
+template <typename T0, typename T1, typename T2> struct Shuffle_match {
+ T0 Op1;
+ T1 Op2;
+ T2 Mask;
+
+ Shuffle_match(const T0 &Op1, const T1 &Op2, const T2 &Mask)
+ : Op1(Op1), Op2(Op2), Mask(Mask) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *I = dyn_cast<ShuffleVectorInst>(V)) {
+ return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
+ Mask.match(I->getShuffleMask());
+ }
+ return false;
+ }
+};
+
+struct m_Mask {
+ ArrayRef<int> &MaskRef;
+ m_Mask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
+ bool match(ArrayRef<int> Mask) {
+ MaskRef = Mask;
+ return true;
+ }
+};
+
+struct m_ZeroMask {
+ bool match(ArrayRef<int> Mask) {
+ return all_of(Mask, [](int Elem) { return Elem == 0 || Elem == -1; });
+ }
+};
+
+struct m_SpecificMask {
+ ArrayRef<int> &MaskRef;
+ m_SpecificMask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
+ bool match(ArrayRef<int> Mask) { return MaskRef == Mask; }
+};
+
+struct m_SplatOrUndefMask {
+ int &SplatIndex;
+ m_SplatOrUndefMask(int &SplatIndex) : SplatIndex(SplatIndex) {}
+ bool match(ArrayRef<int> Mask) {
+ auto First = find_if(Mask, [](int Elem) { return Elem != -1; });
+ if (First == Mask.end())
+ return false;
+ SplatIndex = *First;
+ return all_of(Mask,
+ [First](int Elem) { return Elem == *First || Elem == -1; });
+ }
+};
+
+/// Matches ShuffleVectorInst independently of mask value.
+template <typename V1_t, typename V2_t>
+inline TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>
+m_Shuffle(const V1_t &v1, const V2_t &v2) {
+ return TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>(v1, v2);
+}
+
template <typename V1_t, typename V2_t, typename Mask_t>
-inline ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector>
-m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &m) {
- return ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector>(v1, v2,
- m);
+inline Shuffle_match<V1_t, V2_t, Mask_t>
+m_Shuffle(const V1_t &v1, const V2_t &v2, const Mask_t &mask) {
+ return Shuffle_match<V1_t, V2_t, Mask_t>(v1, v2, mask);
}
/// Matches LoadInst.
@@ -1378,25 +1494,31 @@ m_ZExtOrSExtOrSelf(const OpTy &Op) {
return m_CombineOr(m_ZExtOrSExt(Op), Op);
}
-/// Matches UIToFP.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
return CastClass_match<OpTy, Instruction::UIToFP>(Op);
}
-/// Matches SIToFP.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
return CastClass_match<OpTy, Instruction::SIToFP>(Op);
}
-/// Matches FPTrunc
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::FPToUI> m_FPToUI(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::FPToUI>(Op);
+}
+
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::FPToSI> m_FPToSI(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::FPToSI>(Op);
+}
+
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
}
-/// Matches FPExt
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
return CastClass_match<OpTy, Instruction::FPExt>(Op);
@@ -1636,7 +1758,8 @@ m_UnordFMin(const LHS &L, const RHS &R) {
}
//===----------------------------------------------------------------------===//
-// Matchers for overflow check patterns: e.g. (a + b) u< a
+// Matchers for overflow check patterns: e.g. (a + b) u< a, (a ^ -1) <u b
+// Note that S might be matched to other instructions than AddInst.
//
template <typename LHS_t, typename RHS_t, typename Sum_t>
@@ -1667,6 +1790,19 @@ struct UAddWithOverflow_match {
if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
+ Value *Op1;
+ auto XorExpr = m_OneUse(m_Xor(m_Value(Op1), m_AllOnes()));
+ // (a ^ -1) <u b
+ if (Pred == ICmpInst::ICMP_ULT) {
+ if (XorExpr.match(ICmpLHS))
+ return L.match(Op1) && R.match(ICmpRHS) && S.match(ICmpLHS);
+ }
+ // b > u (a ^ -1)
+ if (Pred == ICmpInst::ICMP_UGT) {
+ if (XorExpr.match(ICmpRHS))
+ return L.match(Op1) && R.match(ICmpLHS) && S.match(ICmpRHS);
+ }
+
// Match special-case for increment-by-1.
if (Pred == ICmpInst::ICMP_EQ) {
// (a + 1) == 0
@@ -1764,6 +1900,12 @@ struct m_Intrinsic_Ty<T0, T1, T2, T3, T4> {
Argument_match<T4>>;
};
+template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
+struct m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5> {
+ using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2, T3, T4>::Ty,
+ Argument_match<T5>>;
+};
+
/// Match intrinsic calls like this:
/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
@@ -1803,6 +1945,15 @@ m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
m_Argument<4>(Op4));
}
+template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
+ typename T3, typename T4, typename T5>
+inline typename m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
+ const T4 &Op4, const T5 &Op5) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2, Op3, Op4),
+ m_Argument<5>(Op5));
+}
+
// Helper intrinsic matching specializations.
template <typename Opnd0>
inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
@@ -1847,7 +1998,7 @@ inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
}
/// Matches an ICmp with a predicate over LHS and RHS in either order.
-/// Does not swap the predicate.
+/// Swaps the predicate if operands are commuted.
template <typename LHS, typename RHS>
inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
@@ -2002,6 +2153,42 @@ inline ExtractValue_match<Ind, Val_t> m_ExtractValue(const Val_t &V) {
return ExtractValue_match<Ind, Val_t>(V);
}
+/// Matches patterns for `vscale`. This can either be a call to `llvm.vscale` or
+/// the constant expression
+/// `ptrtoint(gep <vscale x 1 x i8>, <vscale x 1 x i8>* null, i32 1>`
+/// under the right conditions determined by DataLayout.
+struct VScaleVal_match {
+private:
+ template <typename Base, typename Offset>
+ inline BinaryOp_match<Base, Offset, Instruction::GetElementPtr>
+ m_OffsetGep(const Base &B, const Offset &O) {
+ return BinaryOp_match<Base, Offset, Instruction::GetElementPtr>(B, O);
+ }
+
+public:
+ const DataLayout &DL;
+ VScaleVal_match(const DataLayout &DL) : DL(DL) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ if (m_Intrinsic<Intrinsic::vscale>().match(V))
+ return true;
+
+ if (m_PtrToInt(m_OffsetGep(m_Zero(), m_SpecificInt(1))).match(V)) {
+ Type *PtrTy = cast<Operator>(V)->getOperand(0)->getType();
+ auto *DerefTy = PtrTy->getPointerElementType();
+ if (isa<ScalableVectorType>(DerefTy) &&
+ DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8)
+ return true;
+ }
+
+ return false;
+ }
+};
+
+inline VScaleVal_match m_VScale(const DataLayout &DL) {
+ return VScaleVal_match(DL);
+}
+
} // end namespace PatternMatch
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/ProfileSummary.h b/llvm/include/llvm/IR/ProfileSummary.h
index 78635ec4386c..889568e7946b 100644
--- a/llvm/include/llvm/IR/ProfileSummary.h
+++ b/llvm/include/llvm/IR/ProfileSummary.h
@@ -14,6 +14,7 @@
#define LLVM_IR_PROFILESUMMARY_H
#include <algorithm>
+#include <cassert>
#include <cstdint>
#include <vector>
@@ -21,6 +22,7 @@ namespace llvm {
class LLVMContext;
class Metadata;
+class raw_ostream;
// The profile summary is one or more (Cutoff, MinCount, NumCounts) triplets.
// The semantics of counts depend on the type of profile. For instrumentation
@@ -49,6 +51,17 @@ private:
SummaryEntryVector DetailedSummary;
uint64_t TotalCount, MaxCount, MaxInternalCount, MaxFunctionCount;
uint32_t NumCounts, NumFunctions;
+ /// If 'Partial' is false, it means the profile being used to optimize
+ /// a target is collected from the same target.
+ /// If 'Partial' is true, it means the profile is for common/shared
+ /// code. The common profile is usually merged from profiles collected
+ /// from running other targets.
+ bool Partial = false;
+ /// This approximately represents the ratio of the number of profile counters
+ /// of the program being built to the number of profile counters in the
+ /// partial sample profile. When 'Partial' is false, it is undefined. This is
+ /// currently only available under thin LTO mode.
+ double PartialProfileRatio = 0;
/// Return detailed summary as metadata.
Metadata *getDetailedSummaryMD(LLVMContext &Context);
@@ -58,15 +71,18 @@ public:
ProfileSummary(Kind K, SummaryEntryVector DetailedSummary,
uint64_t TotalCount, uint64_t MaxCount,
uint64_t MaxInternalCount, uint64_t MaxFunctionCount,
- uint32_t NumCounts, uint32_t NumFunctions)
+ uint32_t NumCounts, uint32_t NumFunctions,
+ bool Partial = false, double PartialProfileRatio = 0)
: PSK(K), DetailedSummary(std::move(DetailedSummary)),
TotalCount(TotalCount), MaxCount(MaxCount),
MaxInternalCount(MaxInternalCount), MaxFunctionCount(MaxFunctionCount),
- NumCounts(NumCounts), NumFunctions(NumFunctions) {}
+ NumCounts(NumCounts), NumFunctions(NumFunctions), Partial(Partial),
+ PartialProfileRatio(PartialProfileRatio) {}
Kind getKind() const { return PSK; }
/// Return summary information as metadata.
- Metadata *getMD(LLVMContext &Context);
+ Metadata *getMD(LLVMContext &Context, bool AddPartialField = true,
+ bool AddPartialProfileRatioField = true);
/// Construct profile summary from metdata.
static ProfileSummary *getFromMD(Metadata *MD);
SummaryEntryVector &getDetailedSummary() { return DetailedSummary; }
@@ -76,6 +92,15 @@ public:
uint64_t getTotalCount() { return TotalCount; }
uint64_t getMaxCount() { return MaxCount; }
uint64_t getMaxInternalCount() { return MaxInternalCount; }
+ void setPartialProfile(bool PP) { Partial = PP; }
+ bool isPartialProfile() { return Partial; }
+ double getPartialProfileRatio() { return PartialProfileRatio; }
+ void setPartialProfileRatio(double R) {
+ assert(isPartialProfile() && "Unexpected when not partial profile");
+ PartialProfileRatio = R;
+ }
+ void printSummary(raw_ostream &OS);
+ void printDetailedSummary(raw_ostream &OS);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/RemarkStreamer.h b/llvm/include/llvm/IR/RemarkStreamer.h
deleted file mode 100644
index 9ea12e8389f0..000000000000
--- a/llvm/include/llvm/IR/RemarkStreamer.h
+++ /dev/null
@@ -1,108 +0,0 @@
-//===- llvm/IR/RemarkStreamer.h - Remark Streamer ---------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the main interface for outputting remarks.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_IR_REMARKSTREAMER_H
-#define LLVM_IR_REMARKSTREAMER_H
-
-#include "llvm/IR/DiagnosticInfo.h"
-#include "llvm/Remarks/RemarkSerializer.h"
-#include "llvm/Support/Error.h"
-#include "llvm/Support/Regex.h"
-#include "llvm/Support/ToolOutputFile.h"
-#include "llvm/Support/raw_ostream.h"
-#include <string>
-#include <vector>
-
-namespace llvm {
-/// Streamer for remarks.
-class RemarkStreamer {
- /// The regex used to filter remarks based on the passes that emit them.
- Optional<Regex> PassFilter;
- /// The object used to serialize the remarks to a specific format.
- std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer;
- /// The filename that the remark diagnostics are emitted to.
- const Optional<std::string> Filename;
-
- /// Convert diagnostics into remark objects.
- /// The lifetime of the members of the result is bound to the lifetime of
- /// the LLVM diagnostics.
- remarks::Remark toRemark(const DiagnosticInfoOptimizationBase &Diag);
-
-public:
- RemarkStreamer(std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer,
- Optional<StringRef> Filename = None);
- /// Return the filename that the remark diagnostics are emitted to.
- Optional<StringRef> getFilename() const {
- return Filename ? Optional<StringRef>(*Filename) : None;
- }
- /// Return stream that the remark diagnostics are emitted to.
- raw_ostream &getStream() { return RemarkSerializer->OS; }
- /// Return the serializer used for this stream.
- remarks::RemarkSerializer &getSerializer() { return *RemarkSerializer; }
- /// Set a pass filter based on a regex \p Filter.
- /// Returns an error if the regex is invalid.
- Error setFilter(StringRef Filter);
- /// Emit a diagnostic through the streamer.
- void emit(const DiagnosticInfoOptimizationBase &Diag);
- /// Check if the remarks also need to have associated metadata in a section.
- bool needsSection() const;
-};
-
-template <typename ThisError>
-struct RemarkSetupErrorInfo : public ErrorInfo<ThisError> {
- std::string Msg;
- std::error_code EC;
-
- RemarkSetupErrorInfo(Error E) {
- handleAllErrors(std::move(E), [&](const ErrorInfoBase &EIB) {
- Msg = EIB.message();
- EC = EIB.convertToErrorCode();
- });
- }
-
- void log(raw_ostream &OS) const override { OS << Msg; }
- std::error_code convertToErrorCode() const override { return EC; }
-};
-
-struct RemarkSetupFileError : RemarkSetupErrorInfo<RemarkSetupFileError> {
- static char ID;
- using RemarkSetupErrorInfo<RemarkSetupFileError>::RemarkSetupErrorInfo;
-};
-
-struct RemarkSetupPatternError : RemarkSetupErrorInfo<RemarkSetupPatternError> {
- static char ID;
- using RemarkSetupErrorInfo<RemarkSetupPatternError>::RemarkSetupErrorInfo;
-};
-
-struct RemarkSetupFormatError : RemarkSetupErrorInfo<RemarkSetupFormatError> {
- static char ID;
- using RemarkSetupErrorInfo<RemarkSetupFormatError>::RemarkSetupErrorInfo;
-};
-
-/// Setup optimization remarks that output to a file.
-Expected<std::unique_ptr<ToolOutputFile>>
-setupOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
- StringRef RemarksPasses, StringRef RemarksFormat,
- bool RemarksWithHotness,
- unsigned RemarksHotnessThreshold = 0);
-
-/// Setup optimization remarks that output directly to a raw_ostream.
-/// \p OS is managed by the caller and should be open for writing as long as \p
-/// Context is streaming remarks to it.
-Error setupOptimizationRemarks(LLVMContext &Context, raw_ostream &OS,
- StringRef RemarksPasses, StringRef RemarksFormat,
- bool RemarksWithHotness,
- unsigned RemarksHotnessThreshold = 0);
-
-} // end namespace llvm
-
-#endif // LLVM_IR_REMARKSTREAMER_H
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.def b/llvm/include/llvm/IR/RuntimeLibcalls.def
index fe2c32e3c975..903db6c70498 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.def
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.def
@@ -234,6 +234,11 @@ HANDLE_LIBCALL(ROUND_F64, "round")
HANDLE_LIBCALL(ROUND_F80, "roundl")
HANDLE_LIBCALL(ROUND_F128, "roundl")
HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
+HANDLE_LIBCALL(ROUNDEVEN_F32, "roundevenf")
+HANDLE_LIBCALL(ROUNDEVEN_F64, "roundeven")
+HANDLE_LIBCALL(ROUNDEVEN_F80, "roundevenl")
+HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenl")
+HANDLE_LIBCALL(ROUNDEVEN_PPCF128, "roundevenl")
HANDLE_LIBCALL(FLOOR_F32, "floorf")
HANDLE_LIBCALL(FLOOR_F64, "floor")
HANDLE_LIBCALL(FLOOR_F80, "floorl")
diff --git a/llvm/include/llvm/IR/Statepoint.h b/llvm/include/llvm/IR/Statepoint.h
index 89f130bc3351..1ace39c10701 100644
--- a/llvm/include/llvm/IR/Statepoint.h
+++ b/llvm/include/llvm/IR/Statepoint.h
@@ -55,37 +55,25 @@ enum class StatepointFlags {
class GCRelocateInst;
class GCResultInst;
-bool isStatepoint(const CallBase *Call);
-bool isStatepoint(const Value *V);
-bool isStatepoint(const Value &V);
-
-bool isGCRelocate(const CallBase *Call);
-bool isGCRelocate(const Value *V);
-
-bool isGCResult(const CallBase *Call);
-bool isGCResult(const Value *V);
-
-/// A wrapper around a GC intrinsic call, this provides most of the actual
-/// functionality for Statepoint and ImmutableStatepoint. It is
-/// templatized to allow easily specializing of const and non-const
-/// concrete subtypes.
-template <typename FunTy, typename InstructionTy, typename ValueTy,
- typename CallBaseTy>
-class StatepointBase {
- CallBaseTy *StatepointCall;
+/// Represents a gc.statepoint intrinsic call. This extends directly from
+/// CallBase as the IntrinsicInst only supports calls and gc.statepoint is
+/// invokable.
+class GCStatepointInst : public CallBase {
+public:
+ GCStatepointInst() = delete;
+ GCStatepointInst(const GCStatepointInst &) = delete;
+ GCStatepointInst &operator=(const GCStatepointInst &) = delete;
-protected:
- explicit StatepointBase(InstructionTy *I) {
- StatepointCall = isStatepoint(I) ? cast<CallBaseTy>(I) : nullptr;
+ static bool classof(const CallBase *I) {
+ if (const Function *CF = I->getCalledFunction())
+ return CF->getIntrinsicID() == Intrinsic::experimental_gc_statepoint;
+ return false;
}
- explicit StatepointBase(CallBaseTy *Call) {
- StatepointCall = isStatepoint(Call) ? Call : nullptr;
+ static bool classof(const Value *V) {
+ return isa<CallBase>(V) && classof(cast<CallBase>(V));
}
-public:
- using arg_iterator = typename CallBaseTy::const_op_iterator;
-
enum {
IDPos = 0,
NumPatchBytesPos = 1,
@@ -95,220 +83,172 @@ public:
CallArgsBeginPos = 5,
};
- void *operator new(size_t, unsigned) = delete;
- void *operator new(size_t s) = delete;
-
- explicit operator bool() const {
- // We do not assign non-statepoint call instructions to StatepointCall.
- return (bool)StatepointCall;
- }
-
- /// Return the underlying call instruction.
- CallBaseTy *getCall() const {
- assert(*this && "check validity first!");
- return StatepointCall;
- }
-
- uint64_t getFlags() const {
- return cast<ConstantInt>(getCall()->getArgOperand(FlagsPos))
- ->getZExtValue();
- }
-
/// Return the ID associated with this statepoint.
uint64_t getID() const {
- const Value *IDVal = getCall()->getArgOperand(IDPos);
- return cast<ConstantInt>(IDVal)->getZExtValue();
+ return cast<ConstantInt>(getArgOperand(IDPos))->getZExtValue();
}
/// Return the number of patchable bytes associated with this statepoint.
uint32_t getNumPatchBytes() const {
- const Value *NumPatchBytesVal = getCall()->getArgOperand(NumPatchBytesPos);
+ const Value *NumPatchBytesVal = getArgOperand(NumPatchBytesPos);
uint64_t NumPatchBytes =
cast<ConstantInt>(NumPatchBytesVal)->getZExtValue();
assert(isInt<32>(NumPatchBytes) && "should fit in 32 bits!");
return NumPatchBytes;
}
- /// Return the value actually being called or invoked.
- ValueTy *getCalledValue() const {
- return getCall()->getArgOperand(CalledFunctionPos);
+ /// Number of arguments to be passed to the actual callee.
+ int getNumCallArgs() const {
+ return cast<ConstantInt>(getArgOperand(NumCallArgsPos))->getZExtValue();
}
- // FIXME: Migrate users of this to `getCall` and remove it.
- InstructionTy *getInstruction() const { return getCall(); }
-
- /// Return the function being called if this is a direct call, otherwise
- /// return null (if it's an indirect call).
- FunTy *getCalledFunction() const {
- return dyn_cast<Function>(getCalledValue());
+ uint64_t getFlags() const {
+ return cast<ConstantInt>(getArgOperand(FlagsPos))->getZExtValue();
}
- /// Return the caller function for this statepoint.
- FunTy *getCaller() const { return getCall()->getCaller(); }
+ /// Return the value actually being called or invoked.
+ Value *getActualCalledOperand() const {
+ return getArgOperand(CalledFunctionPos);
+ }
- /// Determine if the statepoint cannot unwind.
- bool doesNotThrow() const {
- Function *F = getCalledFunction();
- return getCall()->doesNotThrow() || (F ? F->doesNotThrow() : false);
+ /// Returns the function called if this is a wrapping a direct call, and null
+ /// otherwise.
+ Function *getActualCalledFunction() const {
+ return dyn_cast_or_null<Function>(getActualCalledOperand());
}
/// Return the type of the value returned by the call underlying the
/// statepoint.
Type *getActualReturnType() const {
- auto *FTy = cast<FunctionType>(
- cast<PointerType>(getCalledValue()->getType())->getElementType());
- return FTy->getReturnType();
+ auto *CalleeTy =
+ cast<PointerType>(getActualCalledOperand()->getType())->getElementType();
+ return cast<FunctionType>(CalleeTy)->getReturnType();
}
- /// Number of arguments to be passed to the actual callee.
- int getNumCallArgs() const {
- const Value *NumCallArgsVal = getCall()->getArgOperand(NumCallArgsPos);
- return cast<ConstantInt>(NumCallArgsVal)->getZExtValue();
- }
- size_t arg_size() const { return getNumCallArgs(); }
- arg_iterator arg_begin() const {
- assert(CallArgsBeginPos <= (int)getCall()->arg_size());
- return getCall()->arg_begin() + CallArgsBeginPos;
+ /// Return the number of arguments to the underlying call.
+ size_t actual_arg_size() const { return getNumCallArgs(); }
+ /// Return an iterator to the begining of the arguments to the underlying call
+ const_op_iterator actual_arg_begin() const {
+ assert(CallArgsBeginPos <= (int)arg_size());
+ return arg_begin() + CallArgsBeginPos;
}
- arg_iterator arg_end() const {
- auto I = arg_begin() + arg_size();
- assert((getCall()->arg_end() - I) >= 0);
+ /// Return an end iterator of the arguments to the underlying call
+ const_op_iterator actual_arg_end() const {
+ auto I = actual_arg_begin() + actual_arg_size();
+ assert((arg_end() - I) >= 0);
return I;
}
-
- ValueTy *getArgument(unsigned Index) {
- assert(Index < arg_size() && "out of bounds!");
- return *(arg_begin() + Index);
+ /// range adapter for actual call arguments
+ iterator_range<const_op_iterator> actual_args() const {
+ return make_range(actual_arg_begin(), actual_arg_end());
}
- /// range adapter for call arguments
- iterator_range<arg_iterator> call_args() const {
- return make_range(arg_begin(), arg_end());
- }
-
- /// Return true if the call or the callee has the given attribute.
- bool paramHasAttr(unsigned i, Attribute::AttrKind A) const {
- Function *F = getCalledFunction();
- return getCall()->paramHasAttr(i + CallArgsBeginPos, A) ||
- (F ? F->getAttributes().hasAttribute(i, A) : false);
- }
-
- /// Number of GC transition args.
- int getNumTotalGCTransitionArgs() const {
- const Value *NumGCTransitionArgs = *arg_end();
- return cast<ConstantInt>(NumGCTransitionArgs)->getZExtValue();
- }
- arg_iterator gc_transition_args_begin() const {
- auto I = arg_end() + 1;
- assert((getCall()->arg_end() - I) >= 0);
+ const_op_iterator gc_transition_args_begin() const {
+ if (auto Opt = getOperandBundle(LLVMContext::OB_gc_transition))
+ return Opt->Inputs.begin();
+ auto I = actual_arg_end() + 1;
+ assert((arg_end() - I) >= 0);
return I;
}
- arg_iterator gc_transition_args_end() const {
- auto I = gc_transition_args_begin() + getNumTotalGCTransitionArgs();
- assert((getCall()->arg_end() - I) >= 0);
+ const_op_iterator gc_transition_args_end() const {
+ if (auto Opt = getOperandBundle(LLVMContext::OB_gc_transition))
+ return Opt->Inputs.end();
+ auto I = gc_transition_args_begin() + getNumDeoptArgs();
+ assert((arg_end() - I) >= 0);
return I;
}
/// range adapter for GC transition arguments
- iterator_range<arg_iterator> gc_transition_args() const {
+ iterator_range<const_op_iterator> gc_transition_args() const {
return make_range(gc_transition_args_begin(), gc_transition_args_end());
}
- /// Number of additional arguments excluding those intended
- /// for garbage collection.
- int getNumTotalVMSArgs() const {
- const Value *NumVMSArgs = *gc_transition_args_end();
- return cast<ConstantInt>(NumVMSArgs)->getZExtValue();
- }
-
- arg_iterator deopt_begin() const {
- auto I = gc_transition_args_end() + 1;
- assert((getCall()->arg_end() - I) >= 0);
+ const_op_iterator deopt_begin() const {
+ if (auto Opt = getOperandBundle(LLVMContext::OB_deopt))
+ return Opt->Inputs.begin();
+ // The current format has two length prefix bundles between call args and
+ // start of gc args. This will be removed in the near future.
+ uint64_t NumTrans = getNumGCTransitionArgs();
+ const_op_iterator I = actual_arg_end() + 2 + NumTrans;
+ assert((arg_end() - I) >= 0);
return I;
}
- arg_iterator deopt_end() const {
- auto I = deopt_begin() + getNumTotalVMSArgs();
- assert((getCall()->arg_end() - I) >= 0);
+ const_op_iterator deopt_end() const {
+ if (auto Opt = getOperandBundle(LLVMContext::OB_deopt))
+ return Opt->Inputs.end();
+ auto I = deopt_begin() + getNumDeoptArgs();
+ assert((arg_end() - I) >= 0);
return I;
}
/// range adapter for vm state arguments
- iterator_range<arg_iterator> deopt_operands() const {
+ iterator_range<const_op_iterator> deopt_operands() const {
return make_range(deopt_begin(), deopt_end());
}
- arg_iterator gc_args_begin() const { return deopt_end(); }
- arg_iterator gc_args_end() const { return getCall()->arg_end(); }
+ /// Returns an iterator to the begining of the argument range describing gc
+ /// values for the statepoint.
+ const_op_iterator gc_args_begin() const {
+ if (auto Opt = getOperandBundle(LLVMContext::OB_gc_live))
+ return Opt->Inputs.begin();
+
+ // The current format has two length prefix bundles between call args and
+ // start of gc args. This will be removed in the near future.
+ uint64_t NumTrans = getNumGCTransitionArgs();
+ uint64_t NumDeopt = getNumDeoptArgs();
+ auto I = actual_arg_end() + 2 + NumTrans + NumDeopt;
+ assert((arg_end() - I) >= 0);
+ return I;
+ }
+
+ /// Return an end iterator for the gc argument range
+ const_op_iterator gc_args_end() const {
+ if (auto Opt = getOperandBundle(LLVMContext::OB_gc_live))
+ return Opt->Inputs.end();
+ return arg_end();
+ }
+
+ /// Return the operand index at which the gc args begin
unsigned gcArgsStartIdx() const {
- return gc_args_begin() - getCall()->op_begin();
+ assert(!getOperandBundle(LLVMContext::OB_gc_live));
+ return gc_args_begin() - op_begin();
}
/// range adapter for gc arguments
- iterator_range<arg_iterator> gc_args() const {
+ iterator_range<const_op_iterator> gc_args() const {
return make_range(gc_args_begin(), gc_args_end());
}
+
/// Get list of all gc reloactes linked to this statepoint
/// May contain several relocations for the same base/derived pair.
/// For example this could happen due to relocations on unwinding
/// path of invoke.
- std::vector<const GCRelocateInst *> getRelocates() const;
+ inline std::vector<const GCRelocateInst *> getGCRelocates() const;
- /// Get the experimental_gc_result call tied to this statepoint. Can be
- /// nullptr if there isn't a gc_result tied to this statepoint. Guaranteed to
- /// be a CallInst if non-null.
+ /// Get the experimental_gc_result call tied to this statepoint if there is
+ /// one, otherwise return nullptr.
const GCResultInst *getGCResult() const {
- for (auto *U : getInstruction()->users())
+ for (auto *U : users())
if (auto *GRI = dyn_cast<GCResultInst>(U))
return GRI;
return nullptr;
}
-#ifndef NDEBUG
- /// Asserts if this statepoint is malformed. Common cases for failure
- /// include incorrect length prefixes for variable length sections or
- /// illegal values for parameters.
- void verify() {
- assert(getNumCallArgs() >= 0 &&
- "number of arguments to actually callee can't be negative");
-
- // The internal asserts in the iterator accessors do the rest.
- (void)arg_begin();
- (void)arg_end();
- (void)gc_transition_args_begin();
- (void)gc_transition_args_end();
- (void)deopt_begin();
- (void)deopt_end();
- (void)gc_args_begin();
- (void)gc_args_end();
- }
-#endif
-};
-
-/// A specialization of it's base class for read only access
-/// to a gc.statepoint.
-class ImmutableStatepoint
- : public StatepointBase<const Function, const Instruction, const Value,
- const CallBase> {
- using Base = StatepointBase<const Function, const Instruction, const Value,
- const CallBase>;
-
-public:
- explicit ImmutableStatepoint(const Instruction *I) : Base(I) {}
- explicit ImmutableStatepoint(const CallBase *Call) : Base(Call) {}
-};
-
-/// A specialization of it's base class for read-write access
-/// to a gc.statepoint.
-class Statepoint
- : public StatepointBase<Function, Instruction, Value, CallBase> {
- using Base = StatepointBase<Function, Instruction, Value, CallBase>;
+private:
+ int getNumGCTransitionArgs() const {
+ const Value *NumGCTransitionArgs = *actual_arg_end();
+ return cast<ConstantInt>(NumGCTransitionArgs)->getZExtValue();
+ }
-public:
- explicit Statepoint(Instruction *I) : Base(I) {}
- explicit Statepoint(CallBase *Call) : Base(Call) {}
+ int getNumDeoptArgs() const {
+ uint64_t NumTrans = getNumGCTransitionArgs();
+ const_op_iterator trans_end = actual_arg_end() + 1 + NumTrans;
+ const Value *NumDeoptArgs = *trans_end;
+ return cast<ConstantInt>(NumDeoptArgs)->getZExtValue();
+ }
};
/// Common base class for representing values projected from a statepoint.
@@ -333,15 +273,13 @@ public:
}
/// The statepoint with which this gc.relocate is associated.
- const CallBase *getStatepoint() const {
+ const GCStatepointInst *getStatepoint() const {
const Value *Token = getArgOperand(0);
// This takes care both of relocates for call statepoints and relocates
// on normal path of invoke statepoint.
- if (!isa<LandingPadInst>(Token)) {
- assert(isStatepoint(Token));
- return cast<CallBase>(Token);
- }
+ if (!isa<LandingPadInst>(Token))
+ return cast<GCStatepointInst>(Token);
// This relocate is on exceptional path of an invoke statepoint
const BasicBlock *InvokeBB =
@@ -350,9 +288,8 @@ public:
assert(InvokeBB && "safepoints should have unique landingpads");
assert(InvokeBB->getTerminator() &&
"safepoint block should be well formed");
- assert(isStatepoint(InvokeBB->getTerminator()));
- return cast<CallBase>(InvokeBB->getTerminator());
+ return cast<GCStatepointInst>(InvokeBB->getTerminator());
}
};
@@ -381,10 +318,14 @@ public:
}
Value *getBasePtr() const {
+ if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
+ return *(Opt->Inputs.begin() + getBasePtrIndex());
return *(getStatepoint()->arg_begin() + getBasePtrIndex());
}
Value *getDerivedPtr() const {
+ if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
+ return *(Opt->Inputs.begin() + getDerivedPtrIndex());
return *(getStatepoint()->arg_begin() + getDerivedPtrIndex());
}
};
@@ -401,21 +342,17 @@ public:
}
};
-template <typename FunTy, typename InstructionTy, typename ValueTy,
- typename CallBaseTy>
-std::vector<const GCRelocateInst *>
-StatepointBase<FunTy, InstructionTy, ValueTy, CallBaseTy>::getRelocates()
- const {
+std::vector<const GCRelocateInst *> GCStatepointInst::getGCRelocates() const {
std::vector<const GCRelocateInst *> Result;
// Search for relocated pointers. Note that working backwards from the
// gc_relocates ensures that we only get pairs which are actually relocated
// and used after the statepoint.
- for (const User *U : StatepointCall->users())
+ for (const User *U : users())
if (auto *Relocate = dyn_cast<GCRelocateInst>(U))
Result.push_back(Relocate);
- auto *StatepointInvoke = dyn_cast<InvokeInst>(StatepointCall);
+ auto *StatepointInvoke = dyn_cast<InvokeInst>(this);
if (!StatepointInvoke)
return Result;
diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h
index d0961dac833d..1f546884b924 100644
--- a/llvm/include/llvm/IR/Type.h
+++ b/llvm/include/llvm/IR/Type.h
@@ -53,27 +53,28 @@ public:
/// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
///
enum TypeID {
- // PrimitiveTypes - make sure LastPrimitiveTyID stays up to date.
- VoidTyID = 0, ///< 0: type with no size
- HalfTyID, ///< 1: 16-bit floating point type
- FloatTyID, ///< 2: 32-bit floating point type
- DoubleTyID, ///< 3: 64-bit floating point type
- X86_FP80TyID, ///< 4: 80-bit floating point type (X87)
- FP128TyID, ///< 5: 128-bit floating point type (112-bit mantissa)
- PPC_FP128TyID, ///< 6: 128-bit floating point type (two 64-bits, PowerPC)
- LabelTyID, ///< 7: Labels
- MetadataTyID, ///< 8: Metadata
- X86_MMXTyID, ///< 9: MMX vectors (64 bits, X86 specific)
- TokenTyID, ///< 10: Tokens
+ // PrimitiveTypes
+ HalfTyID = 0, ///< 16-bit floating point type
+ BFloatTyID, ///< 16-bit floating point type (7-bit significand)
+ FloatTyID, ///< 32-bit floating point type
+ DoubleTyID, ///< 64-bit floating point type
+ X86_FP80TyID, ///< 80-bit floating point type (X87)
+ FP128TyID, ///< 128-bit floating point type (112-bit significand)
+ PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC)
+ VoidTyID, ///< type with no size
+ LabelTyID, ///< Labels
+ MetadataTyID, ///< Metadata
+ X86_MMXTyID, ///< MMX vectors (64 bits, X86 specific)
+ TokenTyID, ///< Tokens
// Derived types... see DerivedTypes.h file.
- // Make sure FirstDerivedTyID stays up to date!
- IntegerTyID, ///< 11: Arbitrary bit width integers
- FunctionTyID, ///< 12: Functions
- StructTyID, ///< 13: Structures
- ArrayTyID, ///< 14: Arrays
- PointerTyID, ///< 15: Pointers
- VectorTyID ///< 16: SIMD 'packed' format, or other vector type
+ IntegerTyID, ///< Arbitrary bit width integers
+ FunctionTyID, ///< Functions
+ PointerTyID, ///< Pointers
+ StructTyID, ///< Structures
+ ArrayTyID, ///< Arrays
+ FixedVectorTyID, ///< Fixed width SIMD vector type
+ ScalableVectorTyID ///< Scalable SIMD vector type
};
private:
@@ -110,10 +111,6 @@ protected:
/// Float).
Type * const *ContainedTys = nullptr;
- static bool isSequentialType(TypeID TyID) {
- return TyID == ArrayTyID || TyID == VectorTyID;
- }
-
public:
/// Print the current type.
/// Omit the type details if \p NoDetails == true.
@@ -143,6 +140,9 @@ public:
/// Return true if this is 'half', a 16-bit IEEE fp type.
bool isHalfTy() const { return getTypeID() == HalfTyID; }
+ /// Return true if this is 'bfloat', a 16-bit bfloat type.
+ bool isBFloatTy() const { return getTypeID() == BFloatTyID; }
+
/// Return true if this is 'float', a 32-bit IEEE fp type.
bool isFloatTy() const { return getTypeID() == FloatTyID; }
@@ -160,8 +160,8 @@ public:
/// Return true if this is one of the six floating-point types
bool isFloatingPointTy() const {
- return getTypeID() == HalfTyID || getTypeID() == FloatTyID ||
- getTypeID() == DoubleTyID ||
+ return getTypeID() == HalfTyID || getTypeID() == BFloatTyID ||
+ getTypeID() == FloatTyID || getTypeID() == DoubleTyID ||
getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
getTypeID() == PPC_FP128TyID;
}
@@ -169,6 +169,7 @@ public:
const fltSemantics &getFltSemantics() const {
switch (getTypeID()) {
case HalfTyID: return APFloat::IEEEhalf();
+ case BFloatTyID: return APFloat::BFloat();
case FloatTyID: return APFloat::IEEEsingle();
case DoubleTyID: return APFloat::IEEEdouble();
case X86_FP80TyID: return APFloat::x87DoubleExtended();
@@ -227,7 +228,9 @@ public:
bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
/// True if this is an instance of VectorType.
- bool isVectorTy() const { return getTypeID() == VectorTyID; }
+ inline bool isVectorTy() const {
+ return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID;
+ }
/// Return true if this type could be converted with a lossless BitCast to
/// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
@@ -270,8 +273,7 @@ public:
return true;
// If it is not something that can have a size (e.g. a function or label),
// it doesn't have a size.
- if (getTypeID() != StructTyID && getTypeID() != ArrayTyID &&
- getTypeID() != VectorTyID)
+ if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && !isVectorTy())
return false;
// Otherwise we have to try harder to decide.
return isSizedDerivedType(Visited);
@@ -304,10 +306,10 @@ public:
/// If this is a vector type, return the element type, otherwise return
/// 'this'.
- Type *getScalarType() const {
+ inline Type *getScalarType() const {
if (isVectorTy())
- return getVectorElementType();
- return const_cast<Type*>(this);
+ return getContainedType(0);
+ return const_cast<Type *>(this);
}
//===--------------------------------------------------------------------===//
@@ -343,8 +345,8 @@ public:
//===--------------------------------------------------------------------===//
// Helper methods corresponding to subclass methods. This forces a cast to
- // the specified subclass and calls its accessor. "getVectorNumElements" (for
- // example) is shorthand for cast<VectorType>(Ty)->getNumElements(). This is
+ // the specified subclass and calls its accessor. "getArrayNumElements" (for
+ // example) is shorthand for cast<ArrayType>(Ty)->getNumElements(). This is
// only intended to cover the core methods that are frequently used, helper
// methods should not be added here.
@@ -358,11 +360,6 @@ public:
inline unsigned getStructNumElements() const;
inline Type *getStructElementType(unsigned N) const;
- inline Type *getSequentialElementType() const {
- assert(isSequentialType(getTypeID()) && "Not a sequential type!");
- return ContainedTys[0];
- }
-
inline uint64_t getArrayNumElements() const;
Type *getArrayElementType() const {
@@ -370,14 +367,6 @@ public:
return ContainedTys[0];
}
- inline bool getVectorIsScalable() const;
- inline unsigned getVectorNumElements() const;
- inline ElementCount getVectorElementCount() const;
- Type *getVectorElementType() const {
- assert(getTypeID() == VectorTyID);
- return ContainedTys[0];
- }
-
Type *getPointerElementType() const {
assert(getTypeID() == PointerTyID);
return ContainedTys[0];
@@ -408,6 +397,7 @@ public:
static Type *getVoidTy(LLVMContext &C);
static Type *getLabelTy(LLVMContext &C);
static Type *getHalfTy(LLVMContext &C);
+ static Type *getBFloatTy(LLVMContext &C);
static Type *getFloatTy(LLVMContext &C);
static Type *getDoubleTy(LLVMContext &C);
static Type *getMetadataTy(LLVMContext &C);
@@ -443,6 +433,7 @@ public:
// types as pointee.
//
static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
diff --git a/llvm/include/llvm/IR/Use.h b/llvm/include/llvm/IR/Use.h
index 034ca2c8ac23..917db2679c55 100644
--- a/llvm/include/llvm/IR/Use.h
+++ b/llvm/include/llvm/IR/Use.h
@@ -41,17 +41,6 @@ class Value;
/// all of the uses for a particular value definition. It also supports jumping
/// directly to the used value when we arrive from the User's operands, and
/// jumping directly to the User when we arrive from the Value's uses.
-///
-/// The pointer to the used Value is explicit, and the pointer to the User is
-/// implicit. The implicit pointer is found via a waymarking algorithm
-/// described in the programmer's manual:
-///
-/// http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
-///
-/// This is essentially the single most memory intensive object in LLVM because
-/// of the number of uses in the system. At the same time, the constant time
-/// operations it allows are essential to many optimizations having reasonable
-/// time complexity.
class Use {
public:
Use(const Use &U) = delete;
@@ -60,34 +49,6 @@ public:
/// that also works with less standard-compliant compilers
void swap(Use &RHS);
- /// Pointer traits for the UserRef PointerIntPair. This ensures we always
- /// use the LSB regardless of pointer alignment on different targets.
- struct UserRefPointerTraits {
- static inline void *getAsVoidPointer(User *P) { return P; }
-
- static inline User *getFromVoidPointer(void *P) {
- return (User *)P;
- }
-
- enum { NumLowBitsAvailable = 1 };
- };
-
- // A type for the word following an array of hung-off Uses in memory, which is
- // a pointer back to their User with the bottom bit set.
- using UserRef = PointerIntPair<User *, 1, unsigned, UserRefPointerTraits>;
-
- /// Pointer traits for the Prev PointerIntPair. This ensures we always use
- /// the two LSBs regardless of pointer alignment on different targets.
- struct PrevPointerTraits {
- static inline void *getAsVoidPointer(Use **P) { return P; }
-
- static inline Use **getFromVoidPointer(void *P) {
- return (Use **)P;
- }
-
- enum { NumLowBitsAvailable = 2 };
- };
-
private:
/// Destructor - Only for zap()
~Use() {
@@ -95,13 +56,12 @@ private:
removeFromList();
}
- enum PrevPtrTag { zeroDigitTag, oneDigitTag, stopTag, fullStopTag };
-
/// Constructor
- Use(PrevPtrTag tag) { Prev.setInt(tag); }
+ Use(User *Parent) : Parent(Parent) {}
public:
friend class Value;
+ friend class User;
operator Value *() const { return Val; }
Value *get() const { return Val; }
@@ -110,7 +70,7 @@ public:
///
/// For an instruction operand, for example, this will return the
/// instruction.
- User *getUser() const LLVM_READONLY;
+ User *getUser() const { return Parent; };
inline void set(Value *Val);
@@ -125,38 +85,29 @@ public:
/// Return the operand # of this use in its User.
unsigned getOperandNo() const;
- /// Initializes the waymarking tags on an array of Uses.
- ///
- /// This sets up the array of Uses such that getUser() can find the User from
- /// any of those Uses.
- static Use *initTags(Use *Start, Use *Stop);
-
/// Destroys Use operands when the number of operands of
/// a User changes.
static void zap(Use *Start, const Use *Stop, bool del = false);
private:
- const Use *getImpliedUser() const LLVM_READONLY;
Value *Val = nullptr;
Use *Next = nullptr;
- PointerIntPair<Use **, 2, PrevPtrTag, PrevPointerTraits> Prev;
-
- void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); }
+ Use **Prev = nullptr;
+ User *Parent = nullptr;
void addToList(Use **List) {
Next = *List;
if (Next)
- Next->setPrev(&Next);
- setPrev(List);
- *List = this;
+ Next->Prev = &Next;
+ Prev = List;
+ *Prev = this;
}
void removeFromList() {
- Use **StrippedPrev = Prev.getPointer();
- *StrippedPrev = Next;
+ *Prev = Next;
if (Next)
- Next->setPrev(StrippedPrev);
+ Next->Prev = Prev;
}
};
diff --git a/llvm/include/llvm/IR/User.h b/llvm/include/llvm/IR/User.h
index 850ee72a0387..ebfae1db2980 100644
--- a/llvm/include/llvm/IR/User.h
+++ b/llvm/include/llvm/IR/User.h
@@ -218,6 +218,11 @@ public:
NumUserOperands = NumOps;
}
+ /// A droppable user is a user for which uses can be dropped without affecting
+ /// correctness and should be dropped rather than preventing a transformation
+ /// from happening.
+ bool isDroppable() const;
+
// ---------------------------------------------------------------------------
// Operand Iterator interface...
//
diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def
new file mode 100644
index 000000000000..d3e1fc854373
--- /dev/null
+++ b/llvm/include/llvm/IR/VPIntrinsics.def
@@ -0,0 +1,84 @@
+//===-- IR/VPIntrinsics.def - Describes llvm.vp.* Intrinsics -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains descriptions of the various Vector Predication intrinsics.
+// This is used as a central place for enumerating the different instructions
+// and should eventually be the place to put comments about the instructions.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+// Provide definitions of macros so that users of this file do not have to
+// define everything to use it...
+//
+#ifndef REGISTER_VP_INTRINSIC
+#define REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS)
+#endif
+
+// Map this VP intrinsic to its functional Opcode
+#ifndef HANDLE_VP_TO_OC
+#define HANDLE_VP_TO_OC(VPID, OC)
+#endif
+
+///// Integer Arithmetic /////
+
+// llvm.vp.add(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_add, 2, 3)
+HANDLE_VP_TO_OC(vp_add, Add)
+
+// llvm.vp.and(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_and, 2, 3)
+HANDLE_VP_TO_OC(vp_and, And)
+
+// llvm.vp.ashr(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_ashr, 2, 3)
+HANDLE_VP_TO_OC(vp_ashr, AShr)
+
+// llvm.vp.lshr(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_lshr, 2, 3)
+HANDLE_VP_TO_OC(vp_lshr, LShr)
+
+// llvm.vp.mul(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_mul, 2, 3)
+HANDLE_VP_TO_OC(vp_mul, Mul)
+
+// llvm.vp.or(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_or, 2, 3)
+HANDLE_VP_TO_OC(vp_or, Or)
+
+// llvm.vp.sdiv(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_sdiv, 2, 3)
+HANDLE_VP_TO_OC(vp_sdiv, SDiv)
+
+// llvm.vp.shl(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_shl, 2, 3)
+HANDLE_VP_TO_OC(vp_shl, Shl)
+
+// llvm.vp.srem(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_srem, 2, 3)
+HANDLE_VP_TO_OC(vp_srem, SRem)
+
+// llvm.vp.sub(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_sub, 2, 3)
+HANDLE_VP_TO_OC(vp_sub, Sub)
+
+// llvm.vp.udiv(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_udiv, 2, 3)
+HANDLE_VP_TO_OC(vp_udiv, UDiv)
+
+// llvm.vp.urem(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_urem, 2, 3)
+HANDLE_VP_TO_OC(vp_urem, URem)
+
+// llvm.vp.xor(x,y,mask,vlen)
+REGISTER_VP_INTRINSIC(vp_xor, 2, 3)
+HANDLE_VP_TO_OC(vp_xor, Xor)
+
+#undef REGISTER_VP_INTRINSIC
+#undef HANDLE_VP_TO_OC
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index f2c4b3b3f203..04ca68274626 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -72,8 +72,6 @@ using ValueName = StringMapEntry<Value *>;
/// objects that watch it and listen to RAUW and Destroy events. See
/// llvm/IR/ValueHandle.h for details.
class Value {
- // The least-significant bit of the first word of Value *must* be zero:
- // http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
Type *VTy;
Use *UseList;
@@ -444,6 +442,34 @@ public:
/// This is logically equivalent to getNumUses() >= N.
bool hasNUsesOrMore(unsigned N) const;
+ /// Return true if there is exactly one user of this value that cannot be
+ /// dropped.
+ ///
+ /// This is specialized because it is a common request and does not require
+ /// traversing the whole use list.
+ Use *getSingleUndroppableUse();
+
+ /// Return true if there this value.
+ ///
+ /// This is specialized because it is a common request and does not require
+ /// traversing the whole use list.
+ bool hasNUndroppableUses(unsigned N) const;
+
+ /// Return true if this value has N users or more.
+ ///
+ /// This is logically equivalent to getNumUses() >= N.
+ bool hasNUndroppableUsesOrMore(unsigned N) const;
+
+ /// Remove every uses that can safely be removed.
+ ///
+ /// This will remove for example uses in llvm.assume.
+ /// This should be used when performing want to perform a tranformation but
+ /// some Droppable uses pervent it.
+ /// This function optionally takes a filter to only remove some droppable
+ /// uses.
+ void dropDroppableUses(llvm::function_ref<bool(const Use *)> ShouldDrop =
+ [](const Use *) { return true; });
+
/// Check if this value is used in the specified basic block.
bool isUsedInBasicBlock(const BasicBlock *BB) const;
@@ -567,18 +593,23 @@ public:
}
/// Accumulate the constant offset this value has compared to a base pointer.
- /// Only 'getelementptr' instructions (GEPs) with constant indices are
- /// accumulated but other instructions, e.g., casts, are stripped away as
- /// well. The accumulated constant offset is added to \p Offset and the base
+ /// Only 'getelementptr' instructions (GEPs) are accumulated but other
+ /// instructions, e.g., casts, are stripped away as well.
+ /// The accumulated constant offset is added to \p Offset and the base
/// pointer is returned.
///
/// The APInt \p Offset has to have a bit-width equal to the IntPtr type for
/// the address space of 'this' pointer value, e.g., use
/// DataLayout::getIndexTypeSizeInBits(Ty).
///
- /// If \p AllowNonInbounds is true, constant offsets in GEPs are stripped and
+ /// If \p AllowNonInbounds is true, offsets in GEPs are stripped and
/// accumulated even if the GEP is not "inbounds".
///
+ /// If \p ExternalAnalysis is provided it will be used to calculate a offset
+ /// when a operand of GEP is not constant.
+ /// For example, for a value \p ExternalAnalysis might try to calculate a
+ /// lower bound. If \p ExternalAnalysis is successful, it should return true.
+ ///
/// If this is called on a non-pointer value, it returns 'this' and the
/// \p Offset is not modified.
///
@@ -587,9 +618,10 @@ public:
/// between the underlying value and the returned one. Thus, if no constant
/// offset was found, the returned value is the underlying one and \p Offset
/// is unchanged.
- const Value *stripAndAccumulateConstantOffsets(const DataLayout &DL,
- APInt &Offset,
- bool AllowNonInbounds) const;
+ const Value *stripAndAccumulateConstantOffsets(
+ const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
+ nullptr) const;
Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
bool AllowNonInbounds) {
return const_cast<Value *>(
@@ -614,10 +646,12 @@ public:
///
/// Returns the original pointer value. If this is called on a non-pointer
/// value, it returns 'this'.
- const Value *stripInBoundsOffsets() const;
- Value *stripInBoundsOffsets() {
+ const Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
+ [](const Value *) {}) const;
+ inline Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
+ [](const Value *) {}) {
return const_cast<Value *>(
- static_cast<const Value *>(this)->stripInBoundsOffsets());
+ static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
}
/// Returns the number of bytes known to be dereferenceable for the
@@ -632,7 +666,7 @@ public:
///
/// Returns an alignment which is either specified explicitly, e.g. via
/// align attribute of a function argument, or guaranteed by DataLayout.
- MaybeAlign getPointerAlignment(const DataLayout &DL) const;
+ Align getPointerAlignment(const DataLayout &DL) const;
/// Translate PHI node to its predecessor from the given basic block.
///
@@ -805,7 +839,7 @@ template <class Compare> void Value::sortUseList(Compare Cmp) {
// Fix the Prev pointers.
for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
- I->setPrev(Prev);
+ I->Prev = Prev;
Prev = &I->Next;
}
}
diff --git a/llvm/include/llvm/IR/ValueHandle.h b/llvm/include/llvm/IR/ValueHandle.h
index 50b7701f6716..badc1ca8d1f6 100644
--- a/llvm/include/llvm/IR/ValueHandle.h
+++ b/llvm/include/llvm/IR/ValueHandle.h
@@ -89,7 +89,11 @@ public:
}
Value *operator->() const { return getValPtr(); }
- Value &operator*() const { return *getValPtr(); }
+ Value &operator*() const {
+ Value *V = getValPtr();
+ assert(V && "Dereferencing deleted ValueHandle");
+ return *V;
+ }
protected:
Value *getValPtr() const { return Val; }
@@ -303,30 +307,10 @@ public:
ValueTy &operator*() const { return *getValPtr(); }
};
-// Specialize DenseMapInfo to allow AssertingVH to participate in DenseMap.
+// Treat AssertingVH<T> like T* inside maps. This also allows using find_as()
+// to look up a value without constructing a value handle.
template<typename T>
-struct DenseMapInfo<AssertingVH<T>> {
- static inline AssertingVH<T> getEmptyKey() {
- AssertingVH<T> Res;
- Res.setRawValPtr(DenseMapInfo<Value *>::getEmptyKey());
- return Res;
- }
-
- static inline AssertingVH<T> getTombstoneKey() {
- AssertingVH<T> Res;
- Res.setRawValPtr(DenseMapInfo<Value *>::getTombstoneKey());
- return Res;
- }
-
- static unsigned getHashValue(const AssertingVH<T> &Val) {
- return DenseMapInfo<Value *>::getHashValue(Val.getRawValPtr());
- }
-
- static bool isEqual(const AssertingVH<T> &LHS, const AssertingVH<T> &RHS) {
- return DenseMapInfo<Value *>::isEqual(LHS.getRawValPtr(),
- RHS.getRawValPtr());
- }
-};
+struct DenseMapInfo<AssertingVH<T>> : DenseMapInfo<T *> {};
/// Value handle that tracks a Value across RAUW.
///
@@ -410,6 +394,7 @@ protected:
public:
CallbackVH() : ValueHandleBase(Callback) {}
CallbackVH(Value *P) : ValueHandleBase(Callback, P) {}
+ CallbackVH(const Value *P) : CallbackVH(const_cast<Value *>(P)) {}
operator Value*() const {
return getValPtr();
@@ -557,6 +542,17 @@ template <typename T> struct DenseMapInfo<PoisoningVH<T>> {
return DenseMapInfo<Value *>::isEqual(LHS.getRawValPtr(),
RHS.getRawValPtr());
}
+
+ // Allow lookup by T* via find_as(), without constructing a temporary
+ // value handle.
+
+ static unsigned getHashValue(const T *Val) {
+ return DenseMapInfo<Value *>::getHashValue(Val);
+ }
+
+ static bool isEqual(const T *LHS, const PoisoningVH<T> &RHS) {
+ return DenseMapInfo<Value *>::isEqual(LHS, RHS.getRawValPtr());
+ }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/ValueMap.h b/llvm/include/llvm/IR/ValueMap.h
index fb5440d5efe8..a5a06b76dbf6 100644
--- a/llvm/include/llvm/IR/ValueMap.h
+++ b/llvm/include/llvm/IR/ValueMap.h
@@ -243,7 +243,7 @@ class ValueMapCallbackVH final : public CallbackVH {
friend struct DenseMapInfo<ValueMapCallbackVH>;
using ValueMapT = ValueMap<KeyT, ValueT, Config>;
- using KeySansPointerT = typename std::remove_pointer<KeyT>::type;
+ using KeySansPointerT = std::remove_pointer_t<KeyT>;
ValueMapT *Map;