diff options
433 files changed, 36835 insertions, 11120 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index b51bc421fdbf6..2f5df776e9e0a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -512,6 +512,9 @@ set(LLVM_INSTALL_OCAMLDOC_HTML_DIR "share/doc/llvm/ocaml-html" option (LLVM_BUILD_EXTERNAL_COMPILER_RT "Build compiler-rt as an external project." OFF) +option (LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO + "Show target and host info when tools are invoked with --version." ON) + # You can configure which libraries from LLVM you want to include in the # shared library by setting LLVM_DYLIB_COMPONENTS to a semi-colon delimited # list of LLVM components. All component names handled by llvm-config are valid. diff --git a/bindings/go/llvm/DIBuilderBindings.cpp b/bindings/go/llvm/DIBuilderBindings.cpp index 53e223d67b4e8..a0792e93d4ba5 100644 --- a/bindings/go/llvm/DIBuilderBindings.cpp +++ b/bindings/go/llvm/DIBuilderBindings.cpp @@ -19,8 +19,6 @@ using namespace llvm; -DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DIBuilder, LLVMDIBuilderRef) - LLVMDIBuilderRef LLVMNewDIBuilder(LLVMModuleRef mref) { Module *m = unwrap(mref); return wrap(new DIBuilder(*m)); diff --git a/bindings/go/llvm/IRBindings.h b/bindings/go/llvm/IRBindings.h index f4f490391d4f1..25a00b1380442 100644 --- a/bindings/go/llvm/IRBindings.h +++ b/bindings/go/llvm/IRBindings.h @@ -26,7 +26,6 @@ extern "C" { #endif -typedef struct LLVMOpaqueMetadata *LLVMMetadataRef; struct LLVMDebugLocMetadata{ unsigned Line; unsigned Col; @@ -59,16 +58,6 @@ void LLVMSetSubprogram(LLVMValueRef Fn, LLVMMetadataRef SP); #ifdef __cplusplus } -namespace llvm { - -DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef) - -inline Metadata **unwrap(LLVMMetadataRef *Vals) { - return reinterpret_cast<Metadata**>(Vals); -} - -} - #endif #endif diff --git a/cmake/modules/AddLLVM.cmake b/cmake/modules/AddLLVM.cmake index 7f7608cff33d3..e011bb402757e 100755 --- a/cmake/modules/AddLLVM.cmake +++ b/cmake/modules/AddLLVM.cmake @@ -81,8 +81,9 @@ function(add_llvm_symbol_exports target_name export_file) # Gold and BFD ld require a version script rather than a plain list. set(native_export_file "${target_name}.exports") # FIXME: Don't write the "local:" line on OpenBSD. + # in the export file, also add a linker script to version LLVM symbols (form: LLVM_N.M) add_custom_command(OUTPUT ${native_export_file} - COMMAND echo "{" > ${native_export_file} + COMMAND echo "LLVM_${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR} {" > ${native_export_file} COMMAND grep -q "[[:alnum:]]" ${export_file} && echo " global:" >> ${native_export_file} || : COMMAND sed -e "s/$/;/" -e "s/^/ /" < ${export_file} >> ${native_export_file} COMMAND echo " local: *;" >> ${native_export_file} diff --git a/docs/BitCodeFormat.rst b/docs/BitCodeFormat.rst index a9a123595f7f5..6ee3842c8d908 100644 --- a/docs/BitCodeFormat.rst +++ b/docs/BitCodeFormat.rst @@ -550,6 +550,8 @@ LLVM IR is defined with the following blocks: * 17 --- `TYPE_BLOCK`_ --- This describes all of the types in the module. +* 23 --- `STRTAB_BLOCK`_ --- The bitcode file's string table. + .. _MODULE_BLOCK: MODULE_BLOCK Contents @@ -577,7 +579,7 @@ MODULE_CODE_VERSION Record ``[VERSION, version#]`` The ``VERSION`` record (code 1) contains a single value indicating the format -version. Versions 0 and 1 are supported at this time. The difference between +version. Versions 0, 1 and 2 are supported at this time. The difference between version 0 and 1 is in the encoding of instruction operands in each `FUNCTION_BLOCK`_. @@ -620,6 +622,12 @@ as unsigned VBRs. However, forward references are rare, except in the case of phi instructions. For phi instructions, operands are encoded as `Signed VBRs`_ to deal with forward references. +In version 2, the meaning of module records ``FUNCTION``, ``GLOBALVAR``, +``ALIAS``, ``IFUNC`` and ``COMDAT`` change such that the first two operands +specify an offset and size of a string in a string table (see `STRTAB_BLOCK +Contents`_), the function name is removed from the ``FNENTRY`` record in the +value symbol table, and the top-level ``VALUE_SYMTAB_BLOCK`` may only contain +``FNENTRY`` records. MODULE_CODE_TRIPLE Record ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -673,11 +681,14 @@ for each library name referenced. MODULE_CODE_GLOBALVAR Record ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``[GLOBALVAR, pointer type, isconst, initid, linkage, alignment, section, visibility, threadlocal, unnamed_addr, externally_initialized, dllstorageclass, comdat]`` +``[GLOBALVAR, strtab offset, strtab size, pointer type, isconst, initid, linkage, alignment, section, visibility, threadlocal, unnamed_addr, externally_initialized, dllstorageclass, comdat]`` The ``GLOBALVAR`` record (code 7) marks the declaration or definition of a global variable. The operand fields are: +* *strtab offset*, *strtab size*: Specifies the name of the global variable. + See `STRTAB_BLOCK Contents`_. + * *pointer type*: The type index of the pointer type used to point to this global variable @@ -755,11 +766,14 @@ global variable. The operand fields are: MODULE_CODE_FUNCTION Record ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``[FUNCTION, type, callingconv, isproto, linkage, paramattr, alignment, section, visibility, gc, prologuedata, dllstorageclass, comdat, prefixdata, personalityfn]`` +``[FUNCTION, strtab offset, strtab size, type, callingconv, isproto, linkage, paramattr, alignment, section, visibility, gc, prologuedata, dllstorageclass, comdat, prefixdata, personalityfn]`` The ``FUNCTION`` record (code 8) marks the declaration or definition of a function. The operand fields are: +* *strtab offset*, *strtab size*: Specifies the name of the function. + See `STRTAB_BLOCK Contents`_. + * *type*: The type index of the function type describing this function * *callingconv*: The calling convention number: @@ -817,11 +831,14 @@ function. The operand fields are: MODULE_CODE_ALIAS Record ^^^^^^^^^^^^^^^^^^^^^^^^ -``[ALIAS, alias type, aliasee val#, linkage, visibility, dllstorageclass, threadlocal, unnamed_addr]`` +``[ALIAS, strtab offset, strtab size, alias type, aliasee val#, linkage, visibility, dllstorageclass, threadlocal, unnamed_addr]`` The ``ALIAS`` record (code 9) marks the definition of an alias. The operand fields are +* *strtab offset*, *strtab size*: Specifies the name of the alias. + See `STRTAB_BLOCK Contents`_. + * *alias type*: The type index of the alias * *aliasee val#*: The value index of the aliased value @@ -1300,3 +1317,20 @@ METADATA_ATTACHMENT Contents ---------------------------- The ``METADATA_ATTACHMENT`` block (id 16) ... + +.. _STRTAB_BLOCK: + +STRTAB_BLOCK Contents +--------------------- + +The ``STRTAB`` block (id 23) contains a single record (``STRTAB_BLOB``, id 1) +with a single blob operand containing the bitcode file's string table. + +Strings in the string table are not null terminated. A record's *strtab +offset* and *strtab size* operands specify the byte offset and size of a +string within the string table. + +The string table is used by all preceding blocks in the bitcode file that are +not succeeded by another intervening ``STRTAB`` block. Normally a bitcode +file will have a single string table, but it may have more than one if it +was created by binary concatenation of multiple bitcode files. diff --git a/docs/LangRef.rst b/docs/LangRef.rst index d17bbc18ab7d7..b0a31589cc4f8 100644 --- a/docs/LangRef.rst +++ b/docs/LangRef.rst @@ -4380,7 +4380,7 @@ referenced LLVM variable relates to the source language variable. The current supported vocabulary is limited: -- ``DW_OP_deref`` dereferences the working expression. +- ``DW_OP_deref`` dereferences the top of the expression stack. - ``DW_OP_plus, 93`` adds ``93`` to the working expression. - ``DW_OP_LLVM_fragment, 16, 8`` specifies the offset and size (``16`` and ``8`` here, respectively) of the variable fragment from the working expression. Note @@ -4396,12 +4396,17 @@ DIExpression nodes that contain a ``DW_OP_stack_value`` operator are standalone location descriptions that describe constant values. This form is used to describe global constants that have been optimized away. All other expressions are modifiers to another location: A debug intrinsic ties a location and a -DIExpression together. Contrary to DWARF expressions, a DIExpression always -describes the *value* of a source variable and never its *address*. In DWARF -terminology, a DIExpression can always be considered an implicit location -description regardless whether it contains a ``DW_OP_stack_value`` or not. +DIExpression together. -.. code-block:: text +DWARF specifies three kinds of simple location descriptions: Register, memory, +and implicit location descriptions. Register and memory location descriptions +describe the *location* of a source variable (in the sense that a debugger might +modify its value), whereas implicit locations describe merely the *value* of a +source variable. DIExpressions also follow this model: A DIExpression that +doesn't have a trailing ``DW_OP_stack_value`` will describe an *address* when +combined with a concrete location. + +.. code-block:: llvm !0 = !DIExpression(DW_OP_deref) !1 = !DIExpression(DW_OP_plus, 3) @@ -12285,6 +12290,7 @@ The third argument is a metadata argument specifying the rounding mode to be assumed. This argument must be one of the following strings: :: + "round.dynamic" "round.tonearest" "round.downward" @@ -12316,6 +12322,7 @@ required exception behavior. This argument must be one of the following strings: :: + "fpexcept.ignore" "fpexcept.maytrap" "fpexcept.strict" diff --git a/docs/SourceLevelDebugging.rst b/docs/SourceLevelDebugging.rst index 41f8dbfab3dce..a9f5c3a081472 100644 --- a/docs/SourceLevelDebugging.rst +++ b/docs/SourceLevelDebugging.rst @@ -180,11 +180,27 @@ provide debug information at various points in generated code. void @llvm.dbg.declare(metadata, metadata, metadata) -This intrinsic provides information about a local element (e.g., variable). -The first argument is metadata holding the alloca for the variable. The second +This intrinsic provides information about a local element (e.g., variable). The +first argument is metadata holding the alloca for the variable. The second argument is a `local variable <LangRef.html#dilocalvariable>`_ containing a description of the variable. The third argument is a `complex expression -<LangRef.html#diexpression>`_. +<LangRef.html#diexpression>`_. An `llvm.dbg.declare` instrinsic describes the +*location* of a source variable. + +.. code-block:: llvm + + %i.addr = alloca i32, align 4 + call void @llvm.dbg.declare(metadata i32* %i.addr, metadata !1, metadata !2), !dbg !3 + !1 = !DILocalVariable(name: "i", ...) ; int i + !2 = !DIExpression() + !3 = !DILocation(...) + ... + %buffer = alloca [256 x i8], align 8 + ; The address of i is buffer+64. + call void @llvm.dbg.declare(metadata [256 x i8]* %buffer, metadata !1, metadata !2) + !1 = !DILocalVariable(name: "i", ...) ; int i + !2 = !DIExpression(DW_OP_plus, 64) + ``llvm.dbg.value`` ^^^^^^^^^^^^^^^^^^ diff --git a/docs/Statepoints.rst b/docs/Statepoints.rst index 7f2b20544812f..73e09ae8b620b 100644 --- a/docs/Statepoints.rst +++ b/docs/Statepoints.rst @@ -9,15 +9,22 @@ Garbage Collection Safepoints in LLVM Status ======= -This document describes a set of experimental extensions to LLVM. Use -with caution. Because the intrinsics have experimental status, -compatibility across LLVM releases is not guaranteed. - -LLVM currently supports an alternate mechanism for conservative -garbage collection support using the ``gcroot`` intrinsic. The mechanism -described here shares little in common with the alternate ``gcroot`` -implementation and it is hoped that this mechanism will eventually -replace the gc_root mechanism. +This document describes a set of extensions to LLVM to support garbage +collection. By now, these mechanisms are well proven with commercial java +implementation with a fully relocating collector having shipped using them. +There are a couple places where bugs might still linger; these are called out +below. + +They are still listed as "experimental" to indicate that no forward or backward +compatibility guarantees are offered across versions. If your use case is such +that you need some form of forward compatibility guarantee, please raise the +issue on the llvm-dev mailing list. + +LLVM still supports an alternate mechanism for conservative garbage collection +support using the ``gcroot`` intrinsic. The ``gcroot`` mechanism is mostly of +historical interest at this point with one exception - its implementation of +shadow stacks has been used successfully by a number of language frontends and +is still supported. Overview ======== @@ -86,9 +93,36 @@ the collector must be able to: This document describes the mechanism by which an LLVM based compiler can provide this information to a language runtime/collector, and -ensure that all pointers can be read and updated if desired. The -heart of the approach is to construct (or rewrite) the IR in a manner -where the possible updates performed by the garbage collector are +ensure that all pointers can be read and updated if desired. + +At a high level, LLVM has been extended to support compiling to an abstract +machine which extends the actual target with a non-integral pointer type +suitable for representing a garbage collected reference to an object. In +particular, such non-integral pointer type have no defined mapping to an +integer representation. This semantic quirk allows the runtime to pick a +integer mapping for each point in the program allowing relocations of objects +without visible effects. + +Warning: Non-Integral Pointer Types are a newly added concept in LLVM IR. +It's possible that we've missed disabling some of the optimizations which +assume an integral value for pointers. If you find such a case, please +file a bug or share a patch. + +Warning: There is one currently known semantic hole in the definition of +non-integral pointers which has not been addressed upstream. To work around +this, you need to disable speculation of loads unless the memory type +(non-integral pointer vs anything else) is known to unchanged. That is, it is +not safe to speculate a load if doing causes a non-integral pointer value to +be loaded as any other type or vice versa. In practice, this restriction is +well isolated to isSafeToSpeculate in ValueTracking.cpp. + +This high level abstract machine model is used for most of the LLVM optimizer. +Before starting code generation, we switch representations to an explicit form. +In theory, a frontend could directly generate this low level explicit form, but +doing so is likely to inhibit optimization. + +The heart of the explicit approach is to construct (or rewrite) the IR in a +manner where the possible updates performed by the garbage collector are explicitly visible in the IR. Doing so requires that we: #. create a new SSA value for each potentially relocated pointer, and @@ -104,7 +138,7 @@ explicitly visible in the IR. Doing so requires that we: At the most abstract level, inserting a safepoint can be thought of as replacing a call instruction with a call to a multiple return value function which both calls the original target of the call, returns -it's result, and returns updated values for any live pointers to +its result, and returns updated values for any live pointers to garbage collected objects. Note that the task of identifying all live pointers to garbage @@ -200,7 +234,9 @@ The relevant parts of the StackMap section for our example are: .short 7 .long 0 -This example was taken from the tests for the :ref:`RewriteStatepointsForGC` utility pass. As such, it's full StackMap can be easily examined with the following command. +This example was taken from the tests for the :ref:`RewriteStatepointsForGC` +utility pass. As such, its full StackMap can be easily examined with the +following command. .. code-block:: bash @@ -536,7 +572,7 @@ Semantics: """""""""" The return value of ``gc.relocate`` is the potentially relocated value -of the pointer specified by it's arguments. It is unspecified how the +of the pointer specified by its arguments. It is unspecified how the value of the returned pointer relates to the argument to the ``gc.statepoint`` other than that a) it points to the same source language object with the same offset, and b) the 'based-on' @@ -654,11 +690,15 @@ Utility Passes for Safepoint Insertion RewriteStatepointsForGC ^^^^^^^^^^^^^^^^^^^^^^^^ -The pass RewriteStatepointsForGC transforms a functions IR by replacing a -``gc.statepoint`` (with an optional ``gc.result``) with a full relocation -sequence, including all required ``gc.relocates``. To function, the pass -requires that the GC strategy specified for the function be able to reliably -distinguish between GC references and non-GC references in IR it is given. +The pass RewriteStatepointsForGC transforms a function's IR to lower from the +abstract machine model described above to the explicit statepoint model of +relocations. To do this, it replaces all calls or invokes of functions which +might contain a safepoint poll with a ``gc.statepoint`` and associated full +relocation sequence, including all required ``gc.relocates``. + +Note that by default, this pass only runs for the "statepoint-example" or +"core-clr" gc strategies. You will need to add your custom strategy to this +whitelist or use one of the predefined ones. As an example, given this code: @@ -666,7 +706,7 @@ As an example, given this code: define i8 addrspace(1)* @test1(i8 addrspace(1)* %obj) gc "statepoint-example" { - call token (i64, i32, void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0) + call void @foo() ret i8 addrspace(1)* %obj } @@ -683,7 +723,8 @@ The pass would produce this IR: In the above examples, the addrspace(1) marker on the pointers is the mechanism that the ``statepoint-example`` GC strategy uses to distinguish references from -non references. Address space 1 is not globally reserved for this purpose. +non references. The pass assumes that all addrspace(1) pointers are non-integral +pointer types. Address space 1 is not globally reserved for this purpose. This pass can be used an utility function by a language frontend that doesn't want to manually reason about liveness, base pointers, or relocation when @@ -701,23 +742,34 @@ can be relaxed to producing interior derived pointers provided the target collector can find the associated allocation from an arbitrary interior derived pointer. -In practice, RewriteStatepointsForGC can be run much later in the pass +By default RewriteStatepointsForGC passes in ``0xABCDEF00`` as the statepoint +ID and ``0`` as the number of patchable bytes to the newly constructed +``gc.statepoint``. These values can be configured on a per-callsite +basis using the attributes ``"statepoint-id"`` and +``"statepoint-num-patch-bytes"``. If a call site is marked with a +``"statepoint-id"`` function attribute and its value is a positive +integer (represented as a string), then that value is used as the ID +of the newly constructed ``gc.statepoint``. If a call site is marked +with a ``"statepoint-num-patch-bytes"`` function attribute and its +value is a positive integer, then that value is used as the 'num patch +bytes' parameter of the newly constructed ``gc.statepoint``. The +``"statepoint-id"`` and ``"statepoint-num-patch-bytes"`` attributes +are not propagated to the ``gc.statepoint`` call or invoke if they +could be successfully parsed. + +In practice, RewriteStatepointsForGC should be run much later in the pass pipeline, after most optimization is already done. This helps to improve the quality of the generated code when compiled with garbage collection support. -In the long run, this is the intended usage model. At this time, a few details -have yet to be worked out about the semantic model required to guarantee this -is always correct. As such, please use with caution and report bugs. .. _PlaceSafepoints: PlaceSafepoints ^^^^^^^^^^^^^^^^ -The pass PlaceSafepoints transforms a function's IR by replacing any call or -invoke instructions with appropriate ``gc.statepoint`` and ``gc.result`` pairs, -and inserting safepoint polls sufficient to ensure running code checks for a -safepoint request on a timely manner. This pass is expected to be run before -RewriteStatepointsForGC and thus does not produce full relocation sequences. +The pass PlaceSafepoints inserts safepoint polls sufficient to ensure running +code checks for a safepoint request on a timely manner. This pass is expected +to be run before RewriteStatepointsForGC and thus does not produce full +relocation sequences. As an example, given input IR of the following: @@ -740,13 +792,16 @@ This pass would produce the following IR: .. code-block:: text define void @test() gc "statepoint-example" { - %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* @do_safepoint, i32 0, i32 0, i32 0, i32 0) - %safepoint_token1 = call token (i64, i32, void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 0) + call void @do_safepoint() + call void @foo() ret void } -In this case, we've added an (unconditional) entry safepoint poll and converted the call into a ``gc.statepoint``. Note that despite appearances, the entry poll is not necessarily redundant. We'd have to know that ``foo`` and ``test`` were not mutually recursive for the poll to be redundant. In practice, you'd probably want to your poll definition to contain a conditional branch of some form. - +In this case, we've added an (unconditional) entry safepoint poll. Note that +despite appearances, the entry poll is not necessarily redundant. We'd have to +know that ``foo`` and ``test`` were not mutually recursive for the poll to be +redundant. In practice, you'd probably want to your poll definition to contain +a conditional branch of some form. At the moment, PlaceSafepoints can insert safepoint polls at method entry and loop backedges locations. Extending this to work with return polls would be @@ -763,26 +818,13 @@ of this function is inserted at each poll site desired. While calls or invokes inside this method are transformed to a ``gc.statepoints``, recursive poll insertion is not performed. -By default PlaceSafepoints passes in ``0xABCDEF00`` as the statepoint -ID and ``0`` as the number of patchable bytes to the newly constructed -``gc.statepoint``. These values can be configured on a per-callsite -basis using the attributes ``"statepoint-id"`` and -``"statepoint-num-patch-bytes"``. If a call site is marked with a -``"statepoint-id"`` function attribute and its value is a positive -integer (represented as a string), then that value is used as the ID -of the newly constructed ``gc.statepoint``. If a call site is marked -with a ``"statepoint-num-patch-bytes"`` function attribute and its -value is a positive integer, then that value is used as the 'num patch -bytes' parameter of the newly constructed ``gc.statepoint``. The -``"statepoint-id"`` and ``"statepoint-num-patch-bytes"`` attributes -are not propagated to the ``gc.statepoint`` call or invoke if they -could be successfully parsed. - -If you are scheduling the RewriteStatepointsForGC pass late in the pass order, -you should probably schedule this pass immediately before it. The exception -would be if you need to preserve abstract frame information (e.g. for -deoptimization or introspection) at safepoints. In that case, ask on the -llvm-dev mailing list for suggestions. +This pass is useful for any language frontend which only has to support +garbage collection semantics at safepoints. If you need other abstract +frame information at safepoints (e.g. for deoptimization or introspection), +you can insert safepoint polls in the frontend. If you have the later case, +please ask on llvm-dev for suggestions. There's been a good amount of work +done on making such a scheme work well in practice which is not yet documented +here. Supported Architectures @@ -794,13 +836,6 @@ Today, only X86_64 is supported. Problem Areas and Active Work ============================= -#. As the existing users of the late rewriting model have matured, we've found - cases where the optimizer breaks the assumption that an SSA value of - gc-pointer type actually contains a gc-pointer and vice-versa. We need to - clarify our expectations and propose at least one small IR change. (Today, - the gc-pointer distinction is managed via address spaces. This turns out - not to be quite strong enough.) - #. Support for languages which allow unmanaged pointers to garbage collected objects (i.e. pass a pointer to an object to a C routine) via pinning. diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h index 7f5c05d21e650..0a1d8faf99b76 100644 --- a/include/llvm-c/Core.h +++ b/include/llvm-c/Core.h @@ -2131,6 +2131,16 @@ LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals, LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count); /** + * Obtain a Metadata as a Value. + */ +LLVMValueRef LLVMMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD); + +/** + * Obtain a Value as a Metadata. + */ +LLVMMetadataRef LLVMValueAsMetadata(LLVMValueRef Val); + +/** * Obtain the underlying string from a MDString value. * * @param V Instance to obtain string from. diff --git a/include/llvm-c/Types.h b/include/llvm-c/Types.h index 3d472a6bf47d0..d63ea4de933de 100644 --- a/include/llvm-c/Types.h +++ b/include/llvm-c/Types.h @@ -83,6 +83,13 @@ typedef struct LLVMOpaqueValue *LLVMValueRef; typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef; /** + * Represents an LLVM Metadata. + * + * This models llvm::Metadata. + */ +typedef struct LLVMOpaqueMetadata *LLVMMetadataRef; + +/** * Represents an LLVM basic block builder. * * This models llvm::IRBuilder. @@ -90,6 +97,13 @@ typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef; typedef struct LLVMOpaqueBuilder *LLVMBuilderRef; /** + * Represents an LLVM debug info builder. + * + * This models llvm::DIBuilder. + */ +typedef struct LLVMOpaqueDIBuilder *LLVMDIBuilderRef; + +/** * Interface used to provide a module to JIT or interpreter. * This is now just a synonym for llvm::Module, but we have to keep using the * different type to keep binary compatibility. diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h index ab23130b137d8..ceb623d34531c 100644 --- a/include/llvm/ADT/APInt.h +++ b/include/llvm/ADT/APInt.h @@ -189,17 +189,17 @@ private: void initSlowCase(const APInt &that); /// out-of-line slow case for shl - APInt shlSlowCase(unsigned shiftAmt) const; + void shlSlowCase(unsigned ShiftAmt); + + /// out-of-line slow case for lshr. + void lshrSlowCase(unsigned ShiftAmt); /// out-of-line slow case for operator= - APInt &AssignSlowCase(const APInt &RHS); + void AssignSlowCase(const APInt &RHS); /// out-of-line slow case for operator== bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY; - /// out-of-line slow case for operator== - bool EqualSlowCase(uint64_t Val) const LLVM_READONLY; - /// out-of-line slow case for countLeadingZeros unsigned countLeadingZerosSlowCase() const LLVM_READONLY; @@ -209,6 +209,12 @@ private: /// out-of-line slow case for countPopulation unsigned countPopulationSlowCase() const LLVM_READONLY; + /// out-of-line slow case for intersects. + bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY; + + /// out-of-line slow case for isSubsetOf. + bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY; + /// out-of-line slow case for setBits. void setBitsSlowCase(unsigned loBit, unsigned hiBit); @@ -216,13 +222,13 @@ private: void flipAllBitsSlowCase(); /// out-of-line slow case for operator&=. - APInt& AndAssignSlowCase(const APInt& RHS); + void AndAssignSlowCase(const APInt& RHS); /// out-of-line slow case for operator|=. - APInt& OrAssignSlowCase(const APInt& RHS); + void OrAssignSlowCase(const APInt& RHS); /// out-of-line slow case for operator^=. - APInt& XorAssignSlowCase(const APInt& RHS); + void XorAssignSlowCase(const APInt& RHS); public: /// \name Constructors @@ -330,6 +336,20 @@ public: /// This tests the high bit of the APInt to determine if it is unset. bool isNonNegative() const { return !isNegative(); } + /// \brief Determine if sign bit of this APInt is set. + /// + /// This tests the high bit of this APInt to determine if it is set. + /// + /// \returns true if this APInt has its sign bit set, false otherwise. + bool isSignBitSet() const { return (*this)[BitWidth-1]; } + + /// \brief Determine if sign bit of this APInt is clear. + /// + /// This tests the high bit of this APInt to determine if it is clear. + /// + /// \returns true if this APInt has its sign bit clear, false otherwise. + bool isSignBitClear() const { return !isSignBitSet(); } + /// \brief Determine if this APInt Value is positive. /// /// This tests if the value of this APInt is positive (> 0). Note @@ -396,10 +416,10 @@ public: return countPopulationSlowCase() == 1; } - /// \brief Check if the APInt's value is returned by getSignBit. + /// \brief Check if the APInt's value is returned by getSignMask. /// - /// \returns true if this is the value returned by getSignBit. - bool isSignBit() const { return isMinSignedValue(); } + /// \returns true if this is the value returned by getSignMask. + bool isSignMask() const { return isMinSignedValue(); } /// \brief Convert APInt to a boolean value. /// @@ -409,8 +429,7 @@ public: /// If this value is smaller than the specified limit, return it, otherwise /// return the limit value. This causes the value to saturate to the limit. uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) const { - return (getActiveBits() > 64 || getZExtValue() > Limit) ? Limit - : getZExtValue(); + return ugt(Limit) ? Limit : getZExtValue(); } /// \brief Check if the APInt consists of a repeated bit pattern. @@ -427,8 +446,9 @@ public: assert(numBits <= BitWidth && "numBits out of range"); if (isSingleWord()) return VAL == (UINT64_MAX >> (APINT_BITS_PER_WORD - numBits)); - unsigned Ones = countTrailingOnes(); - return (numBits == Ones) && ((Ones + countLeadingZeros()) == BitWidth); + unsigned Ones = countTrailingOnesSlowCase(); + return (numBits == Ones) && + ((Ones + countLeadingZerosSlowCase()) == BitWidth); } /// \returns true if this APInt is a non-empty sequence of ones starting at @@ -437,8 +457,8 @@ public: bool isMask() const { if (isSingleWord()) return isMask_64(VAL); - unsigned Ones = countTrailingOnes(); - return (Ones > 0) && ((Ones + countLeadingZeros()) == BitWidth); + unsigned Ones = countTrailingOnesSlowCase(); + return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth); } /// \brief Return true if this APInt value contains a sequence of ones with @@ -446,8 +466,9 @@ public: bool isShiftedMask() const { if (isSingleWord()) return isShiftedMask_64(VAL); - unsigned Ones = countPopulation(); - return (Ones + countTrailingZeros() + countLeadingZeros()) == BitWidth; + unsigned Ones = countPopulationSlowCase(); + unsigned LeadZ = countLeadingZerosSlowCase(); + return (Ones + LeadZ + countTrailingZeros()) == BitWidth; } /// @} @@ -476,11 +497,11 @@ public: return API; } - /// \brief Get the SignBit for a specific bit width. + /// \brief Get the SignMask for a specific bit width. /// /// This is just a wrapper function of getSignedMinValue(), and it helps code - /// readability when we want to get a SignBit. - static APInt getSignBit(unsigned BitWidth) { + /// readability when we want to get a SignMask. + static APInt getSignMask(unsigned BitWidth) { return getSignedMinValue(BitWidth); } @@ -674,29 +695,22 @@ public: return clearUnusedBits(); } - return AssignSlowCase(RHS); + AssignSlowCase(RHS); + return *this; } /// @brief Move assignment operator. APInt &operator=(APInt &&that) { - if (!isSingleWord()) { - // The MSVC STL shipped in 2013 requires that self move assignment be a - // no-op. Otherwise algorithms like stable_sort will produce answers - // where half of the output is left in a moved-from state. - if (this == &that) - return *this; + assert(this != &that && "Self-move not supported"); + if (!isSingleWord()) delete[] pVal; - } // Use memcpy so that type based alias analysis sees both VAL and pVal // as modified. memcpy(&VAL, &that.VAL, sizeof(uint64_t)); - // If 'this == &that', avoid zeroing our own bitwidth by storing to 'that' - // first. - unsigned ThatBitWidth = that.BitWidth; + BitWidth = that.BitWidth; that.BitWidth = 0; - BitWidth = ThatBitWidth; return *this; } @@ -727,11 +741,11 @@ public: /// \returns *this after ANDing with RHS. APInt &operator&=(const APInt &RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); - if (isSingleWord()) { + if (isSingleWord()) VAL &= RHS.VAL; - return *this; - } - return AndAssignSlowCase(RHS); + else + AndAssignSlowCase(RHS); + return *this; } /// \brief Bitwise AND assignment operator. @@ -757,11 +771,11 @@ public: /// \returns *this after ORing with RHS. APInt &operator|=(const APInt &RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); - if (isSingleWord()) { + if (isSingleWord()) VAL |= RHS.VAL; - return *this; - } - return OrAssignSlowCase(RHS); + else + OrAssignSlowCase(RHS); + return *this; } /// \brief Bitwise OR assignment operator. @@ -787,11 +801,11 @@ public: /// \returns *this after XORing with RHS. APInt &operator^=(const APInt &RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); - if (isSingleWord()) { + if (isSingleWord()) VAL ^= RHS.VAL; - return *this; - } - return XorAssignSlowCase(RHS); + else + XorAssignSlowCase(RHS); + return *this; } /// \brief Bitwise XOR assignment operator. @@ -836,9 +850,17 @@ public: /// /// Shifts *this left by shiftAmt and assigns the result to *this. /// - /// \returns *this after shifting left by shiftAmt - APInt &operator<<=(unsigned shiftAmt) { - *this = shl(shiftAmt); + /// \returns *this after shifting left by ShiftAmt + APInt &operator<<=(unsigned ShiftAmt) { + assert(ShiftAmt <= BitWidth && "Invalid shift amount"); + if (isSingleWord()) { + if (ShiftAmt == BitWidth) + VAL = 0; + else + VAL <<= ShiftAmt; + return clearUnusedBits(); + } + shlSlowCase(ShiftAmt); return *this; } @@ -875,20 +897,26 @@ public: return R; } - /// Logical right-shift this APInt by shiftAmt in place. - void lshrInPlace(unsigned shiftAmt); + /// Logical right-shift this APInt by ShiftAmt in place. + void lshrInPlace(unsigned ShiftAmt) { + assert(ShiftAmt <= BitWidth && "Invalid shift amount"); + if (isSingleWord()) { + if (ShiftAmt == BitWidth) + VAL = 0; + else + VAL >>= ShiftAmt; + return; + } + lshrSlowCase(ShiftAmt); + } /// \brief Left-shift function. /// /// Left-shift this APInt by shiftAmt. APInt shl(unsigned shiftAmt) const { - assert(shiftAmt <= BitWidth && "Invalid shift amount"); - if (isSingleWord()) { - if (shiftAmt >= BitWidth) - return APInt(BitWidth, 0); // avoid undefined shift results - return APInt(BitWidth, VAL << shiftAmt); - } - return shlSlowCase(shiftAmt); + APInt R(*this); + R <<= shiftAmt; + return R; } /// \brief Rotate left by rotateAmt. @@ -905,7 +933,14 @@ public: /// \brief Logical right-shift function. /// /// Logical right-shift this APInt by shiftAmt. - APInt lshr(const APInt &shiftAmt) const; + APInt lshr(const APInt &ShiftAmt) const { + APInt R(*this); + R.lshrInPlace(ShiftAmt); + return R; + } + + /// Logical right-shift this APInt by ShiftAmt in place. + void lshrInPlace(const APInt &ShiftAmt); /// \brief Left-shift function. /// @@ -1003,9 +1038,7 @@ public: /// /// \returns true if *this == Val bool operator==(uint64_t Val) const { - if (isSingleWord()) - return VAL == Val; - return EqualSlowCase(Val); + return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val; } /// \brief Equality comparison. @@ -1055,7 +1088,8 @@ public: /// /// \returns true if *this < RHS when considered unsigned. bool ult(uint64_t RHS) const { - return getActiveBits() > 64 ? false : getZExtValue() < RHS; + // Only need to check active bits if not a single word. + return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS; } /// \brief Signed less than comparison @@ -1073,7 +1107,8 @@ public: /// /// \returns true if *this < RHS when considered signed. bool slt(int64_t RHS) const { - return getMinSignedBits() > 64 ? isNegative() : getSExtValue() < RHS; + return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative() + : getSExtValue() < RHS; } /// \brief Unsigned less or equal comparison @@ -1123,7 +1158,8 @@ public: /// /// \returns true if *this > RHS when considered unsigned. bool ugt(uint64_t RHS) const { - return getActiveBits() > 64 ? true : getZExtValue() > RHS; + // Only need to check active bits if not a single word. + return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS; } /// \brief Signed greather than comparison @@ -1141,7 +1177,8 @@ public: /// /// \returns true if *this > RHS when considered signed. bool sgt(int64_t RHS) const { - return getMinSignedBits() > 64 ? !isNegative() : getSExtValue() > RHS; + return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative() + : getSExtValue() > RHS; } /// \brief Unsigned greater or equal comparison @@ -1179,9 +1216,18 @@ public: /// This operation tests if there are any pairs of corresponding bits /// between this APInt and RHS that are both set. bool intersects(const APInt &RHS) const { - APInt temp(*this); - temp &= RHS; - return temp != 0; + assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); + if (isSingleWord()) + return (VAL & RHS.VAL) != 0; + return intersectsSlowCase(RHS); + } + + /// This operation checks that all bits set in this APInt are also set in RHS. + bool isSubsetOf(const APInt &RHS) const { + assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); + if (isSingleWord()) + return (VAL & ~RHS.VAL) == 0; + return isSubsetOfSlowCase(RHS); } /// @} @@ -1404,8 +1450,7 @@ public: /// int64_t. Otherwise an assertion will result. int64_t getSExtValue() const { if (isSingleWord()) - return int64_t(VAL << (APINT_BITS_PER_WORD - BitWidth)) >> - (APINT_BITS_PER_WORD - BitWidth); + return SignExtend64(VAL, BitWidth); assert(getMinSignedBits() <= 64 && "Too many bits for int64_t"); return int64_t(pVal[0]); } @@ -1759,13 +1804,13 @@ public: WordType *remainder, WordType *scratch, unsigned parts); - /// Shift a bignum left COUNT bits. Shifted in bits are zero. There are no - /// restrictions on COUNT. - static void tcShiftLeft(WordType *, unsigned parts, unsigned count); + /// Shift a bignum left Count bits. Shifted in bits are zero. There are no + /// restrictions on Count. + static void tcShiftLeft(WordType *, unsigned Words, unsigned Count); - /// Shift a bignum right COUNT bits. Shifted in bits are zero. There are no - /// restrictions on COUNT. - static void tcShiftRight(WordType *, unsigned parts, unsigned count); + /// Shift a bignum right Count bits. Shifted in bits are zero. There are no + /// restrictions on Count. + static void tcShiftRight(WordType *, unsigned Words, unsigned Count); /// The obvious AND, OR and XOR and complement operations. static void tcAnd(WordType *, const WordType *, unsigned); @@ -1959,7 +2004,7 @@ inline const APInt &umax(const APInt &A, const APInt &B) { /// \brief Compute GCD of two unsigned APInt values. /// /// This function returns the greatest common divisor of the two APInt values -/// using Euclid's algorithm. +/// using Stein's algorithm. /// /// \returns the greatest common divisor of A and B. APInt GreatestCommonDivisor(APInt A, APInt B); diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h index 8240d01ae977c..e48c023ae7df9 100644 --- a/include/llvm/ADT/BitVector.h +++ b/include/llvm/ADT/BitVector.h @@ -14,6 +14,8 @@ #ifndef LLVM_ADT_BITVECTOR_H #define LLVM_ADT_BITVECTOR_H +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/MathExtras.h" #include <algorithm> #include <cassert> @@ -455,6 +457,105 @@ public: return *this; } + BitVector &operator>>=(unsigned N) { + assert(N <= Size); + if (LLVM_UNLIKELY(empty() || N == 0)) + return *this; + + unsigned NumWords = NumBitWords(Size); + assert(NumWords >= 1); + + wordShr(N / BITWORD_SIZE); + + unsigned BitDistance = N % BITWORD_SIZE; + if (BitDistance == 0) + return *this; + + // When the shift size is not a multiple of the word size, then we have + // a tricky situation where each word in succession needs to extract some + // of the bits from the next word and or them into this word while + // shifting this word to make room for the new bits. This has to be done + // for every word in the array. + + // Since we're shifting each word right, some bits will fall off the end + // of each word to the right, and empty space will be created on the left. + // The final word in the array will lose bits permanently, so starting at + // the beginning, work forwards shifting each word to the right, and + // OR'ing in the bits from the end of the next word to the beginning of + // the current word. + + // Example: + // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right + // by 4 bits. + // Step 1: Word[0] >>= 4 ; 0x0ABBCCDD + // Step 2: Word[0] |= 0x10000000 ; 0x1ABBCCDD + // Step 3: Word[1] >>= 4 ; 0x0EEFF001 + // Step 4: Word[1] |= 0x50000000 ; 0x5EEFF001 + // Step 5: Word[2] >>= 4 ; 0x02334455 + // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 } + const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance); + const unsigned LSH = BITWORD_SIZE - BitDistance; + + for (unsigned I = 0; I < NumWords - 1; ++I) { + Bits[I] >>= BitDistance; + Bits[I] |= (Bits[I + 1] & Mask) << LSH; + } + + Bits[NumWords - 1] >>= BitDistance; + + return *this; + } + + BitVector &operator<<=(unsigned N) { + assert(N <= Size); + if (LLVM_UNLIKELY(empty() || N == 0)) + return *this; + + unsigned NumWords = NumBitWords(Size); + assert(NumWords >= 1); + + wordShl(N / BITWORD_SIZE); + + unsigned BitDistance = N % BITWORD_SIZE; + if (BitDistance == 0) + return *this; + + // When the shift size is not a multiple of the word size, then we have + // a tricky situation where each word in succession needs to extract some + // of the bits from the previous word and or them into this word while + // shifting this word to make room for the new bits. This has to be done + // for every word in the array. This is similar to the algorithm outlined + // in operator>>=, but backwards. + + // Since we're shifting each word left, some bits will fall off the end + // of each word to the left, and empty space will be created on the right. + // The first word in the array will lose bits permanently, so starting at + // the end, work backwards shifting each word to the left, and OR'ing + // in the bits from the end of the next word to the beginning of the + // current word. + + // Example: + // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left + // by 4 bits. + // Step 1: Word[2] <<= 4 ; 0x23344550 + // Step 2: Word[2] |= 0x0000000E ; 0x2334455E + // Step 3: Word[1] <<= 4 ; 0xEFF00110 + // Step 4: Word[1] |= 0x0000000A ; 0xEFF0011A + // Step 5: Word[0] <<= 4 ; 0xABBCCDD0 + // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E } + const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance); + const unsigned RSH = BITWORD_SIZE - BitDistance; + + for (int I = NumWords - 1; I > 0; --I) { + Bits[I] <<= BitDistance; + Bits[I] |= (Bits[I - 1] & Mask) >> RSH; + } + Bits[0] <<= BitDistance; + clear_unused_bits(); + + return *this; + } + // Assignment operator. const BitVector &operator=(const BitVector &RHS) { if (this == &RHS) return *this; @@ -538,6 +639,54 @@ public: } private: + /// \brief Perform a logical left shift of \p Count words by moving everything + /// \p Count words to the right in memory. + /// + /// While confusing, words are stored from least significant at Bits[0] to + /// most significant at Bits[NumWords-1]. A logical shift left, however, + /// moves the current least significant bit to a higher logical index, and + /// fills the previous least significant bits with 0. Thus, we actually + /// need to move the bytes of the memory to the right, not to the left. + /// Example: + /// Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000] + /// represents a BitVector where 0xBBBBAAAA contain the least significant + /// bits. So if we want to shift the BitVector left by 2 words, we need to + /// turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a + /// memmove which moves right, not left. + void wordShl(uint32_t Count) { + if (Count == 0) + return; + + uint32_t NumWords = NumBitWords(Size); + + auto Src = ArrayRef<BitWord>(Bits, NumWords).drop_back(Count); + auto Dest = MutableArrayRef<BitWord>(Bits, NumWords).drop_front(Count); + + // Since we always move Word-sized chunks of data with src and dest both + // aligned to a word-boundary, we don't need to worry about endianness + // here. + std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord)); + std::memset(Bits, 0, Count * sizeof(BitWord)); + clear_unused_bits(); + } + + /// \brief Perform a logical right shift of \p Count words by moving those + /// words to the left in memory. See wordShl for more information. + /// + void wordShr(uint32_t Count) { + if (Count == 0) + return; + + uint32_t NumWords = NumBitWords(Size); + + auto Src = ArrayRef<BitWord>(Bits, NumWords).drop_front(Count); + auto Dest = MutableArrayRef<BitWord>(Bits, NumWords).drop_back(Count); + assert(Dest.size() == Src.size()); + + std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord)); + std::memset(Dest.end(), 0, Count * sizeof(BitWord)); + } + int next_unset_in_word(int WordIndex, BitWord Word) const { unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word); return Result < size() ? Result : -1; diff --git a/include/llvm/ADT/SmallBitVector.h b/include/llvm/ADT/SmallBitVector.h index edb37da38da1b..607e040a606cb 100644 --- a/include/llvm/ADT/SmallBitVector.h +++ b/include/llvm/ADT/SmallBitVector.h @@ -508,6 +508,22 @@ public: return *this; } + SmallBitVector &operator<<=(unsigned N) { + if (isSmall()) + setSmallBits(getSmallBits() << N); + else + getPointer()->operator<<=(N); + return *this; + } + + SmallBitVector &operator>>=(unsigned N) { + if (isSmall()) + setSmallBits(getSmallBits() >> N); + else + getPointer()->operator>>=(N); + return *this; + } + // Assignment operator. const SmallBitVector &operator=(const SmallBitVector &RHS) { if (isSmall()) { diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h index e3d81fea49ea1..3e05e09900a5f 100644 --- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h +++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h @@ -1164,9 +1164,8 @@ template <class BT> struct BlockEdgesAdder { void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr, const LoopData *OuterLoop) { const BlockT *BB = BFI.RPOT[Irr.Node.Index]; - for (auto I = Successor::child_begin(BB), E = Successor::child_end(BB); - I != E; ++I) - G.addEdge(Irr, BFI.getNode(*I), OuterLoop); + for (const auto Succ : children<const BlockT *>(BB)) + G.addEdge(Irr, BFI.getNode(Succ), OuterLoop); } }; } @@ -1210,10 +1209,9 @@ BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop, return false; } else { const BlockT *BB = getBlock(Node); - for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB); - SI != SE; ++SI) - if (!addToDist(Dist, OuterLoop, Node, getNode(*SI), - getWeightFromBranchProb(BPI->getEdgeProbability(BB, SI)))) + for (const auto Succ : children<const BlockT *>(BB)) + if (!addToDist(Dist, OuterLoop, Node, getNode(Succ), + getWeightFromBranchProb(BPI->getEdgeProbability(BB, Succ)))) // Irreducible backedge. return false; } diff --git a/include/llvm/Analysis/DominanceFrontierImpl.h b/include/llvm/Analysis/DominanceFrontierImpl.h index 629ae38090457..9f8cacc24f2ce 100644 --- a/include/llvm/Analysis/DominanceFrontierImpl.h +++ b/include/llvm/Analysis/DominanceFrontierImpl.h @@ -174,12 +174,10 @@ ForwardDominanceFrontierBase<BlockT>::calculate(const DomTreeT &DT, // Visit each block only once. if (visited.insert(currentBB).second) { // Loop over CFG successors to calculate DFlocal[currentNode] - for (auto SI = BlockTraits::child_begin(currentBB), - SE = BlockTraits::child_end(currentBB); - SI != SE; ++SI) { + for (const auto Succ : children<BlockT *>(currentBB)) { // Does Node immediately dominate this successor? - if (DT[*SI]->getIDom() != currentNode) - S.insert(*SI); + if (DT[Succ]->getIDom() != currentNode) + S.insert(Succ); } } diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h index 996794b660a9e..2fad1737d1c03 100644 --- a/include/llvm/Analysis/LoopInfo.h +++ b/include/llvm/Analysis/LoopInfo.h @@ -158,11 +158,8 @@ public: /// True if terminator in the block can branch to another block that is /// outside of the current loop. bool isLoopExiting(const BlockT *BB) const { - typedef GraphTraits<const BlockT*> BlockTraits; - for (typename BlockTraits::ChildIteratorType SI = - BlockTraits::child_begin(BB), - SE = BlockTraits::child_end(BB); SI != SE; ++SI) { - if (!contains(*SI)) + for (const auto Succ : children<const BlockT*>(BB)) { + if (!contains(Succ)) return true; } return false; @@ -186,11 +183,8 @@ public: unsigned NumBackEdges = 0; BlockT *H = getHeader(); - typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; - for (typename InvBlockTraits::ChildIteratorType I = - InvBlockTraits::child_begin(H), - E = InvBlockTraits::child_end(H); I != E; ++I) - if (contains(*I)) + for (const auto Pred : children<Inverse<BlockT*> >(H)) + if (contains(Pred)) ++NumBackEdges; return NumBackEdges; @@ -249,12 +243,9 @@ public: /// contains a branch back to the header. void getLoopLatches(SmallVectorImpl<BlockT *> &LoopLatches) const { BlockT *H = getHeader(); - typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; - for (typename InvBlockTraits::ChildIteratorType I = - InvBlockTraits::child_begin(H), - E = InvBlockTraits::child_end(H); I != E; ++I) - if (contains(*I)) - LoopLatches.push_back(*I); + for (const auto Pred : children<Inverse<BlockT*>>(H)) + if (contains(Pred)) + LoopLatches.push_back(Pred); } //===--------------------------------------------------------------------===// diff --git a/include/llvm/Analysis/LoopInfoImpl.h b/include/llvm/Analysis/LoopInfoImpl.h index 761f8721b54fd..6dc0422ce0e94 100644 --- a/include/llvm/Analysis/LoopInfoImpl.h +++ b/include/llvm/Analysis/LoopInfoImpl.h @@ -34,14 +34,11 @@ namespace llvm { template<class BlockT, class LoopT> void LoopBase<BlockT, LoopT>:: getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const { - typedef GraphTraits<BlockT*> BlockTraits; - for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI) - for (typename BlockTraits::ChildIteratorType I = - BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI); - I != E; ++I) - if (!contains(*I)) { + for (const auto BB : blocks()) + for (const auto Succ : children<BlockT*>(BB)) + if (!contains(Succ)) { // Not in current loop? It must be an exit block. - ExitingBlocks.push_back(*BI); + ExitingBlocks.push_back(BB); break; } } @@ -63,14 +60,11 @@ BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const { template<class BlockT, class LoopT> void LoopBase<BlockT, LoopT>:: getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const { - typedef GraphTraits<BlockT*> BlockTraits; - for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI) - for (typename BlockTraits::ChildIteratorType I = - BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI); - I != E; ++I) - if (!contains(*I)) + for (const auto BB : blocks()) + for (const auto Succ : children<BlockT*>(BB)) + if (!contains(Succ)) // Not in current loop? It must be an exit block. - ExitBlocks.push_back(*I); + ExitBlocks.push_back(Succ); } /// getExitBlock - If getExitBlocks would return exactly one block, @@ -88,14 +82,11 @@ BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const { template<class BlockT, class LoopT> void LoopBase<BlockT, LoopT>:: getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const { - typedef GraphTraits<BlockT*> BlockTraits; - for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI) - for (typename BlockTraits::ChildIteratorType I = - BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI); - I != E; ++I) - if (!contains(*I)) + for (const auto BB : blocks()) + for (const auto Succ : children<BlockT*>(BB)) + if (!contains(Succ)) // Not in current loop? It must be an exit block. - ExitEdges.push_back(Edge(*BI, *I)); + ExitEdges.emplace_back(BB, Succ); } /// getLoopPreheader - If there is a preheader for this loop, return it. A @@ -134,15 +125,11 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const { // Loop over the predecessors of the header node... BlockT *Header = getHeader(); - typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; - for (typename InvBlockTraits::ChildIteratorType PI = - InvBlockTraits::child_begin(Header), - PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) { - typename InvBlockTraits::NodeRef N = *PI; - if (!contains(N)) { // If the block is not in the loop... - if (Out && Out != N) + for (const auto Pred : children<Inverse<BlockT*>>(Header)) { + if (!contains(Pred)) { // If the block is not in the loop... + if (Out && Out != Pred) return nullptr; // Multiple predecessors outside the loop - Out = N; + Out = Pred; } } @@ -156,17 +143,11 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const { template<class BlockT, class LoopT> BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const { BlockT *Header = getHeader(); - typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; - typename InvBlockTraits::ChildIteratorType PI = - InvBlockTraits::child_begin(Header); - typename InvBlockTraits::ChildIteratorType PE = - InvBlockTraits::child_end(Header); BlockT *Latch = nullptr; - for (; PI != PE; ++PI) { - typename InvBlockTraits::NodeRef N = *PI; - if (contains(N)) { + for (const auto Pred : children<Inverse<BlockT*>>(Header)) { + if (contains(Pred)) { if (Latch) return nullptr; - Latch = N; + Latch = Pred; } } @@ -394,11 +375,9 @@ static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT*> Backedges, // within this subloop tree itself. Note that a predecessor may directly // reach another subloop that is not yet discovered to be a subloop of // this loop, which we must traverse. - for (typename InvBlockTraits::ChildIteratorType PI = - InvBlockTraits::child_begin(PredBB), - PE = InvBlockTraits::child_end(PredBB); PI != PE; ++PI) { - if (LI->getLoopFor(*PI) != Subloop) - ReverseCFGWorklist.push_back(*PI); + for (const auto Pred : children<Inverse<BlockT*>>(PredBB)) { + if (LI->getLoopFor(Pred) != Subloop) + ReverseCFGWorklist.push_back(Pred); } } } @@ -482,13 +461,7 @@ analyze(const DominatorTreeBase<BlockT> &DomTree) { SmallVector<BlockT *, 4> Backedges; // Check each predecessor of the potential loop header. - typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; - for (typename InvBlockTraits::ChildIteratorType PI = - InvBlockTraits::child_begin(Header), - PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) { - - BlockT *Backedge = *PI; - + for (const auto Backedge : children<Inverse<BlockT*>>(Header)) { // If Header dominates predBB, this is a new loop. Collect the backedges. if (DomTree.dominates(Header, Backedge) && DomTree.isReachableFromEntry(Backedge)) { diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h index c5514316f75f0..743faf2b67db1 100644 --- a/include/llvm/Analysis/MemoryBuiltins.h +++ b/include/llvm/Analysis/MemoryBuiltins.h @@ -54,6 +54,11 @@ bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast = false); /// \brief Tests if a value is a call or invoke to a library function that +/// allocates memory similar to malloc or calloc. +bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); + +/// \brief Tests if a value is a call or invoke to a library function that /// allocates memory (either malloc, calloc, or strdup like). bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast = false); diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index 9a50de540f2b1..91aeae0f728f9 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -1159,8 +1159,20 @@ public: const SCEV *getConstant(const APInt &Val); const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false); const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty); + + typedef SmallDenseMap<std::pair<const SCEV *, Type *>, const SCEV *, 8> + ExtendCacheTy; const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty); + const SCEV *getZeroExtendExprCached(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache); + const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache); + const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty); + const SCEV *getSignExtendExprCached(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache); + const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache); const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty); const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops, SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, diff --git a/include/llvm/Bitcode/BitcodeReader.h b/include/llvm/Bitcode/BitcodeReader.h index 9e042b17241f7..0701ddbb7f1c0 100644 --- a/include/llvm/Bitcode/BitcodeReader.h +++ b/include/llvm/Bitcode/BitcodeReader.h @@ -46,6 +46,9 @@ namespace llvm { ArrayRef<uint8_t> Buffer; StringRef ModuleIdentifier; + // The string table used to interpret this module. + StringRef Strtab; + // The bitstream location of the IDENTIFICATION_BLOCK. uint64_t IdentificationBit; @@ -70,6 +73,7 @@ namespace llvm { StringRef getBuffer() const { return StringRef((const char *)Buffer.begin(), Buffer.size()); } + StringRef getStrtab() const { return Strtab; } StringRef getModuleIdentifier() const { return ModuleIdentifier; } diff --git a/include/llvm/Bitcode/BitcodeWriter.h b/include/llvm/Bitcode/BitcodeWriter.h index 271cb2d81bbb2..23b5ae87b2787 100644 --- a/include/llvm/Bitcode/BitcodeWriter.h +++ b/include/llvm/Bitcode/BitcodeWriter.h @@ -15,6 +15,7 @@ #define LLVM_BITCODE_BITCODEWRITER_H #include "llvm/IR/ModuleSummaryIndex.h" +#include "llvm/MC/StringTableBuilder.h" #include <string> namespace llvm { @@ -26,12 +27,25 @@ namespace llvm { SmallVectorImpl<char> &Buffer; std::unique_ptr<BitstreamWriter> Stream; + StringTableBuilder StrtabBuilder{StringTableBuilder::RAW}; + bool WroteStrtab = false; + + void writeBlob(unsigned Block, unsigned Record, StringRef Blob); + public: /// Create a BitcodeWriter that writes to Buffer. BitcodeWriter(SmallVectorImpl<char> &Buffer); ~BitcodeWriter(); + /// Write the bitcode file's string table. This must be called exactly once + /// after all modules have been written. + void writeStrtab(); + + /// Copy the string table for another module into this bitcode file. This + /// should be called after copying the module itself into the bitcode file. + void copyStrtab(StringRef Strtab); + /// Write the specified module to the buffer specified at construction time. /// /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h index e2d2fbb0f449a..03eac80bc1e89 100644 --- a/include/llvm/Bitcode/LLVMBitCodes.h +++ b/include/llvm/Bitcode/LLVMBitCodes.h @@ -22,7 +22,7 @@ namespace llvm { namespace bitc { -// The only top-level block type defined is for a module. +// The only top-level block types are MODULE, IDENTIFICATION and STRTAB. enum BlockIDs { // Blocks MODULE_BLOCK_ID = FIRST_APPLICATION_BLOCKID, @@ -52,7 +52,9 @@ enum BlockIDs { OPERAND_BUNDLE_TAGS_BLOCK_ID, - METADATA_KIND_BLOCK_ID + METADATA_KIND_BLOCK_ID, + + STRTAB_BLOCK_ID, }; /// Identification block contains a string that describes the producer details, @@ -232,6 +234,10 @@ enum GlobalValueSummarySymtabCodes { // llvm.type.checked.load intrinsic with all constant integer arguments. // [typeid, offset, n x arg] FS_TYPE_CHECKED_LOAD_CONST_VCALL = 15, + // Assigns a GUID to a value ID. This normally appears only in combined + // summaries, but it can also appear in per-module summaries for PGO data. + // [valueid, guid] + FS_VALUE_GUID = 16, }; enum MetadataCodes { @@ -550,6 +556,10 @@ enum ComdatSelectionKindCodes { COMDAT_SELECTION_KIND_SAME_SIZE = 5, }; +enum StrtabCodes { + STRTAB_BLOB = 1, +}; + } // End bitc namespace } // End llvm namespace diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h index d8096aeb215ad..911e8756070b2 100644 --- a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h +++ b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h @@ -62,9 +62,6 @@ protected: const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const; - Optional<int64_t> getConstantVRegVal(unsigned VReg, - const MachineRegisterInfo &MRI) const; - bool isOperandImmEqual(const MachineOperand &MO, int64_t Value, const MachineRegisterInfo &MRI) const; diff --git a/include/llvm/CodeGen/GlobalISel/Utils.h b/include/llvm/CodeGen/GlobalISel/Utils.h index 52bf965a3cb3f..92bc9736141a1 100644 --- a/include/llvm/CodeGen/GlobalISel/Utils.h +++ b/include/llvm/CodeGen/GlobalISel/Utils.h @@ -60,5 +60,8 @@ void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, const char *PassName, StringRef Msg, const MachineInstr &MI); +Optional<int64_t> getConstantVRegVal(unsigned VReg, + const MachineRegisterInfo &MRI); + } // End namespace llvm. #endif diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h index ef4226d30fe36..412c55d542ea6 100644 --- a/include/llvm/CodeGen/MachineInstrBuilder.h +++ b/include/llvm/CodeGen/MachineInstrBuilder.h @@ -413,6 +413,11 @@ MachineInstrBuilder BuildMI(MachineBasicBlock &BB, unsigned Reg, unsigned Offset, const MDNode *Variable, const MDNode *Expr); +/// Clone a DBG_VALUE whose value has been spilled to FrameIndex. +MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB, + MachineBasicBlock::iterator I, + const MachineInstr &Orig, int FrameIndex); + inline unsigned getDefRegState(bool B) { return B ? RegState::Define : 0; } diff --git a/include/llvm/CodeGen/MachineValueType.h b/include/llvm/CodeGen/MachineValueType.h index e4744fd5e260b..a90fe96227b99 100644 --- a/include/llvm/CodeGen/MachineValueType.h +++ b/include/llvm/CodeGen/MachineValueType.h @@ -28,155 +28,246 @@ namespace llvm { /// type can be represented by an MVT. class MVT { public: - enum SimpleValueType : int8_t { - // Simple value types less than zero are considered extended value types. - INVALID_SIMPLE_VALUE_TYPE = -1, + enum SimpleValueType : uint8_t { + // Simple value types that aren't explicitly part of this enumeration + // are considered extended value types. + INVALID_SIMPLE_VALUE_TYPE = 0, // If you change this numbering, you must change the values in // ValueTypes.td as well! - Other = 0, // This is a non-standard value - i1 = 1, // This is a 1 bit integer value - i8 = 2, // This is an 8 bit integer value - i16 = 3, // This is a 16 bit integer value - i32 = 4, // This is a 32 bit integer value - i64 = 5, // This is a 64 bit integer value - i128 = 6, // This is a 128 bit integer value + Other = 1, // This is a non-standard value + i1 = 2, // This is a 1 bit integer value + i8 = 3, // This is an 8 bit integer value + i16 = 4, // This is a 16 bit integer value + i32 = 5, // This is a 32 bit integer value + i64 = 6, // This is a 64 bit integer value + i128 = 7, // This is a 128 bit integer value FIRST_INTEGER_VALUETYPE = i1, LAST_INTEGER_VALUETYPE = i128, - f16 = 7, // This is a 16 bit floating point value - f32 = 8, // This is a 32 bit floating point value - f64 = 9, // This is a 64 bit floating point value - f80 = 10, // This is a 80 bit floating point value - f128 = 11, // This is a 128 bit floating point value - ppcf128 = 12, // This is a PPC 128-bit floating point value + f16 = 8, // This is a 16 bit floating point value + f32 = 9, // This is a 32 bit floating point value + f64 = 10, // This is a 64 bit floating point value + f80 = 11, // This is a 80 bit floating point value + f128 = 12, // This is a 128 bit floating point value + ppcf128 = 13, // This is a PPC 128-bit floating point value FIRST_FP_VALUETYPE = f16, LAST_FP_VALUETYPE = ppcf128, - v2i1 = 13, // 2 x i1 - v4i1 = 14, // 4 x i1 - v8i1 = 15, // 8 x i1 - v16i1 = 16, // 16 x i1 - v32i1 = 17, // 32 x i1 - v64i1 = 18, // 64 x i1 - v512i1 = 19, // 512 x i1 - v1024i1 = 20, // 1024 x i1 - - v1i8 = 21, // 1 x i8 - v2i8 = 22, // 2 x i8 - v4i8 = 23, // 4 x i8 - v8i8 = 24, // 8 x i8 - v16i8 = 25, // 16 x i8 - v32i8 = 26, // 32 x i8 - v64i8 = 27, // 64 x i8 - v128i8 = 28, //128 x i8 - v256i8 = 29, //256 x i8 - - v1i16 = 30, // 1 x i16 - v2i16 = 31, // 2 x i16 - v4i16 = 32, // 4 x i16 - v8i16 = 33, // 8 x i16 - v16i16 = 34, // 16 x i16 - v32i16 = 35, // 32 x i16 - v64i16 = 36, // 64 x i16 - v128i16 = 37, //128 x i16 - - v1i32 = 38, // 1 x i32 - v2i32 = 39, // 2 x i32 - v4i32 = 40, // 4 x i32 - v8i32 = 41, // 8 x i32 - v16i32 = 42, // 16 x i32 - v32i32 = 43, // 32 x i32 - v64i32 = 44, // 64 x i32 - - v1i64 = 45, // 1 x i64 - v2i64 = 46, // 2 x i64 - v4i64 = 47, // 4 x i64 - v8i64 = 48, // 8 x i64 - v16i64 = 49, // 16 x i64 - v32i64 = 50, // 32 x i64 - - v1i128 = 51, // 1 x i128 + v2i1 = 14, // 2 x i1 + v4i1 = 15, // 4 x i1 + v8i1 = 16, // 8 x i1 + v16i1 = 17, // 16 x i1 + v32i1 = 18, // 32 x i1 + v64i1 = 19, // 64 x i1 + v512i1 = 20, // 512 x i1 + v1024i1 = 21, // 1024 x i1 + + v1i8 = 22, // 1 x i8 + v2i8 = 23, // 2 x i8 + v4i8 = 24, // 4 x i8 + v8i8 = 25, // 8 x i8 + v16i8 = 26, // 16 x i8 + v32i8 = 27, // 32 x i8 + v64i8 = 28, // 64 x i8 + v128i8 = 29, //128 x i8 + v256i8 = 30, //256 x i8 + + v1i16 = 31, // 1 x i16 + v2i16 = 32, // 2 x i16 + v4i16 = 33, // 4 x i16 + v8i16 = 34, // 8 x i16 + v16i16 = 35, // 16 x i16 + v32i16 = 36, // 32 x i16 + v64i16 = 37, // 64 x i16 + v128i16 = 38, //128 x i16 + + v1i32 = 39, // 1 x i32 + v2i32 = 40, // 2 x i32 + v4i32 = 41, // 4 x i32 + v8i32 = 42, // 8 x i32 + v16i32 = 43, // 16 x i32 + v32i32 = 44, // 32 x i32 + v64i32 = 45, // 64 x i32 + + v1i64 = 46, // 1 x i64 + v2i64 = 47, // 2 x i64 + v4i64 = 48, // 4 x i64 + v8i64 = 49, // 8 x i64 + v16i64 = 50, // 16 x i64 + v32i64 = 51, // 32 x i64 + + v1i128 = 52, // 1 x i128 + + // Scalable integer types + nxv2i1 = 53, // n x 2 x i1 + nxv4i1 = 54, // n x 4 x i1 + nxv8i1 = 55, // n x 8 x i1 + nxv16i1 = 56, // n x 16 x i1 + nxv32i1 = 57, // n x 32 x i1 + + nxv1i8 = 58, // n x 1 x i8 + nxv2i8 = 59, // n x 2 x i8 + nxv4i8 = 60, // n x 4 x i8 + nxv8i8 = 61, // n x 8 x i8 + nxv16i8 = 62, // n x 16 x i8 + nxv32i8 = 63, // n x 32 x i8 + + nxv1i16 = 64, // n x 1 x i16 + nxv2i16 = 65, // n x 2 x i16 + nxv4i16 = 66, // n x 4 x i16 + nxv8i16 = 67, // n x 8 x i16 + nxv16i16 = 68, // n x 16 x i16 + nxv32i16 = 69, // n x 32 x i16 + + nxv1i32 = 70, // n x 1 x i32 + nxv2i32 = 71, // n x 2 x i32 + nxv4i32 = 72, // n x 4 x i32 + nxv8i32 = 73, // n x 8 x i32 + nxv16i32 = 74, // n x 16 x i32 + nxv32i32 = 75, // n x 32 x i32 + + nxv1i64 = 76, // n x 1 x i64 + nxv2i64 = 77, // n x 2 x i64 + nxv4i64 = 78, // n x 4 x i64 + nxv8i64 = 79, // n x 8 x i64 + nxv16i64 = 80, // n x 16 x i64 + nxv32i64 = 81, // n x 32 x i64 FIRST_INTEGER_VECTOR_VALUETYPE = v2i1, - LAST_INTEGER_VECTOR_VALUETYPE = v1i128, - - v2f16 = 52, // 2 x f16 - v4f16 = 53, // 4 x f16 - v8f16 = 54, // 8 x f16 - v1f32 = 55, // 1 x f32 - v2f32 = 56, // 2 x f32 - v4f32 = 57, // 4 x f32 - v8f32 = 58, // 8 x f32 - v16f32 = 59, // 16 x f32 - v1f64 = 60, // 1 x f64 - v2f64 = 61, // 2 x f64 - v4f64 = 62, // 4 x f64 - v8f64 = 63, // 8 x f64 + LAST_INTEGER_VECTOR_VALUETYPE = nxv32i64, + + FIRST_INTEGER_SCALABLE_VALUETYPE = nxv2i1, + LAST_INTEGER_SCALABLE_VALUETYPE = nxv32i64, + + v2f16 = 82, // 2 x f16 + v4f16 = 83, // 4 x f16 + v8f16 = 84, // 8 x f16 + v1f32 = 85, // 1 x f32 + v2f32 = 86, // 2 x f32 + v4f32 = 87, // 4 x f32 + v8f32 = 88, // 8 x f32 + v16f32 = 89, // 16 x f32 + v1f64 = 90, // 1 x f64 + v2f64 = 91, // 2 x f64 + v4f64 = 92, // 4 x f64 + v8f64 = 93, // 8 x f64 + + nxv2f16 = 94, // n x 2 x f16 + nxv4f16 = 95, // n x 4 x f16 + nxv8f16 = 96, // n x 8 x f16 + nxv1f32 = 97, // n x 1 x f32 + nxv2f32 = 98, // n x 2 x f32 + nxv4f32 = 99, // n x 4 x f32 + nxv8f32 = 100, // n x 8 x f32 + nxv16f32 = 101, // n x 16 x f32 + nxv1f64 = 102, // n x 1 x f64 + nxv2f64 = 103, // n x 2 x f64 + nxv4f64 = 104, // n x 4 x f64 + nxv8f64 = 105, // n x 8 x f64 FIRST_FP_VECTOR_VALUETYPE = v2f16, - LAST_FP_VECTOR_VALUETYPE = v8f64, + LAST_FP_VECTOR_VALUETYPE = nxv8f64, + + FIRST_FP_SCALABLE_VALUETYPE = nxv2f16, + LAST_FP_SCALABLE_VALUETYPE = nxv8f64, FIRST_VECTOR_VALUETYPE = v2i1, - LAST_VECTOR_VALUETYPE = v8f64, + LAST_VECTOR_VALUETYPE = nxv8f64, - x86mmx = 64, // This is an X86 MMX value + x86mmx = 106, // This is an X86 MMX value - Glue = 65, // This glues nodes together during pre-RA sched + Glue = 107, // This glues nodes together during pre-RA sched - isVoid = 66, // This has no value + isVoid = 108, // This has no value - Untyped = 67, // This value takes a register, but has - // unspecified type. The register class - // will be determined by the opcode. + Untyped = 109, // This value takes a register, but has + // unspecified type. The register class + // will be determined by the opcode. - FIRST_VALUETYPE = 0, // This is always the beginning of the list. - LAST_VALUETYPE = 68, // This always remains at the end of the list. + FIRST_VALUETYPE = 1, // This is always the beginning of the list. + LAST_VALUETYPE = 110, // This always remains at the end of the list. // This is the current maximum for LAST_VALUETYPE. // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors // This value must be a multiple of 32. - MAX_ALLOWED_VALUETYPE = 96, + MAX_ALLOWED_VALUETYPE = 128, // A value of type llvm::TokenTy - token = 120, + token = 248, // This is MDNode or MDString. - Metadata = 121, + Metadata = 249, // An int value the size of the pointer of the current // target to any address space. This must only be used internal to // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR. - iPTRAny = 122, + iPTRAny = 250, // A vector with any length and element size. This is used // for intrinsics that have overloadings based on vector types. // This is only for tblgen's consumption! - vAny = 123, + vAny = 251, // Any floating-point or vector floating-point value. This is used // for intrinsics that have overloadings based on floating-point types. // This is only for tblgen's consumption! - fAny = 124, + fAny = 252, // An integer or vector integer value of any bit width. This is // used for intrinsics that have overloadings based on integer bit widths. // This is only for tblgen's consumption! - iAny = 125, + iAny = 253, // An int value the size of the pointer of the current // target. This should only be used internal to tblgen! - iPTR = 126, + iPTR = 254, // Any type. This is used for intrinsics that have overloadings. // This is only for tblgen's consumption! - Any = 127 + Any = 255 }; SimpleValueType SimpleTy; + + // A class to represent the number of elements in a vector + // + // For fixed-length vectors, the total number of elements is equal to 'Min' + // For scalable vectors, the total number of elements is a multiple of 'Min' + class ElementCount { + public: + unsigned Min; + bool Scalable; + + ElementCount(unsigned Min, bool Scalable) + : Min(Min), Scalable(Scalable) {} + + ElementCount operator*(unsigned RHS) { + return { Min * RHS, Scalable }; + } + + ElementCount& operator*=(unsigned RHS) { + Min *= RHS; + return *this; + } + + ElementCount operator/(unsigned RHS) { + return { Min / RHS, Scalable }; + } + + ElementCount& operator/=(unsigned RHS) { + Min /= RHS; + return *this; + } + + bool operator==(const ElementCount& RHS) { + return Min == RHS.Min && Scalable == RHS.Scalable; + } + }; + constexpr MVT() : SimpleTy(INVALID_SIMPLE_VALUE_TYPE) {} constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {} @@ -221,6 +312,15 @@ class MVT { SimpleTy <= MVT::LAST_VECTOR_VALUETYPE); } + /// Return true if this is a vector value type where the + /// runtime length is machine dependent + bool isScalableVector() const { + return ((SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VALUETYPE && + SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VALUETYPE) || + (SimpleTy >= MVT::FIRST_FP_SCALABLE_VALUETYPE && + SimpleTy <= MVT::LAST_FP_SCALABLE_VALUETYPE)); + } + /// Return true if this is a 16-bit vector type. bool is16BitVector() const { return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 || @@ -318,7 +418,12 @@ class MVT { case v32i1: case v64i1: case v512i1: - case v1024i1: return i1; + case v1024i1: + case nxv2i1: + case nxv4i1: + case nxv8i1: + case nxv16i1: + case nxv32i1: return i1; case v1i8: case v2i8: case v4i8: @@ -327,7 +432,13 @@ class MVT { case v32i8: case v64i8: case v128i8: - case v256i8: return i8; + case v256i8: + case nxv1i8: + case nxv2i8: + case nxv4i8: + case nxv8i8: + case nxv16i8: + case nxv32i8: return i8; case v1i16: case v2i16: case v4i16: @@ -335,33 +446,63 @@ class MVT { case v16i16: case v32i16: case v64i16: - case v128i16: return i16; + case v128i16: + case nxv1i16: + case nxv2i16: + case nxv4i16: + case nxv8i16: + case nxv16i16: + case nxv32i16: return i16; case v1i32: case v2i32: case v4i32: case v8i32: case v16i32: case v32i32: - case v64i32: return i32; + case v64i32: + case nxv1i32: + case nxv2i32: + case nxv4i32: + case nxv8i32: + case nxv16i32: + case nxv32i32: return i32; case v1i64: case v2i64: case v4i64: case v8i64: case v16i64: - case v32i64: return i64; + case v32i64: + case nxv1i64: + case nxv2i64: + case nxv4i64: + case nxv8i64: + case nxv16i64: + case nxv32i64: return i64; case v1i128: return i128; case v2f16: case v4f16: - case v8f16: return f16; + case v8f16: + case nxv2f16: + case nxv4f16: + case nxv8f16: return f16; case v1f32: case v2f32: case v4f32: case v8f32: - case v16f32: return f32; + case v16f32: + case nxv1f32: + case nxv2f32: + case nxv4f32: + case nxv8f32: + case nxv16f32: return f32; case v1f64: case v2f64: case v4f64: - case v8f64: return f64; + case v8f64: + case nxv1f64: + case nxv2f64: + case nxv4f64: + case nxv8f64: return f64; } } @@ -382,13 +523,24 @@ class MVT { case v32i8: case v32i16: case v32i32: - case v32i64: return 32; + case v32i64: + case nxv32i1: + case nxv32i8: + case nxv32i16: + case nxv32i32: + case nxv32i64: return 32; case v16i1: case v16i8: case v16i16: case v16i32: case v16i64: - case v16f32: return 16; + case v16f32: + case nxv16i1: + case nxv16i8: + case nxv16i16: + case nxv16i32: + case nxv16i64: + case nxv16f32: return 16; case v8i1: case v8i8: case v8i16: @@ -396,7 +548,15 @@ class MVT { case v8i64: case v8f16: case v8f32: - case v8f64: return 8; + case v8f64: + case nxv8i1: + case nxv8i8: + case nxv8i16: + case nxv8i32: + case nxv8i64: + case nxv8f16: + case nxv8f32: + case nxv8f64: return 8; case v4i1: case v4i8: case v4i16: @@ -404,7 +564,15 @@ class MVT { case v4i64: case v4f16: case v4f32: - case v4f64: return 4; + case v4f64: + case nxv4i1: + case nxv4i8: + case nxv4i16: + case nxv4i32: + case nxv4i64: + case nxv4f16: + case nxv4f32: + case nxv4f64: return 4; case v2i1: case v2i8: case v2i16: @@ -412,17 +580,35 @@ class MVT { case v2i64: case v2f16: case v2f32: - case v2f64: return 2; + case v2f64: + case nxv2i1: + case nxv2i8: + case nxv2i16: + case nxv2i32: + case nxv2i64: + case nxv2f16: + case nxv2f32: + case nxv2f64: return 2; case v1i8: case v1i16: case v1i32: case v1i64: case v1i128: case v1f32: - case v1f64: return 1; + case v1f64: + case nxv1i8: + case nxv1i16: + case nxv1i32: + case nxv1i64: + case nxv1f32: + case nxv1f64: return 1; } } + MVT::ElementCount getVectorElementCount() const { + return { getVectorNumElements(), isScalableVector() }; + } + unsigned getSizeInBits() const { switch (SimpleTy) { default: @@ -443,16 +629,23 @@ class MVT { case Metadata: llvm_unreachable("Value type is metadata."); case i1 : return 1; - case v2i1: return 2; - case v4i1: return 4; + case v2i1: + case nxv2i1: return 2; + case v4i1: + case nxv4i1: return 4; case i8 : case v1i8: - case v8i1: return 8; + case v8i1: + case nxv1i8: + case nxv8i1: return 8; case i16 : case f16: case v16i1: case v2i8: - case v1i16: return 16; + case v1i16: + case nxv16i1: + case nxv2i8: + case nxv1i16: return 16; case f32 : case i32 : case v32i1: @@ -460,7 +653,13 @@ class MVT { case v2i16: case v2f16: case v1f32: - case v1i32: return 32; + case v1i32: + case nxv32i1: + case nxv4i8: + case nxv2i16: + case nxv1i32: + case nxv2f16: + case nxv1f32: return 32; case x86mmx: case f64 : case i64 : @@ -471,7 +670,14 @@ class MVT { case v1i64: case v4f16: case v2f32: - case v1f64: return 64; + case v1f64: + case nxv8i8: + case nxv4i16: + case nxv2i32: + case nxv1i64: + case nxv4f16: + case nxv2f32: + case nxv1f64: return 64; case f80 : return 80; case f128: case ppcf128: @@ -483,29 +689,50 @@ class MVT { case v1i128: case v8f16: case v4f32: - case v2f64: return 128; + case v2f64: + case nxv16i8: + case nxv8i16: + case nxv4i32: + case nxv2i64: + case nxv8f16: + case nxv4f32: + case nxv2f64: return 128; case v32i8: case v16i16: case v8i32: case v4i64: case v8f32: - case v4f64: return 256; + case v4f64: + case nxv32i8: + case nxv16i16: + case nxv8i32: + case nxv4i64: + case nxv8f32: + case nxv4f64: return 256; case v512i1: case v64i8: case v32i16: case v16i32: case v8i64: case v16f32: - case v8f64: return 512; + case v8f64: + case nxv32i16: + case nxv16i32: + case nxv8i64: + case nxv16f32: + case nxv8f64: return 512; case v1024i1: case v128i8: case v64i16: case v32i32: - case v16i64: return 1024; + case v16i64: + case nxv32i32: + case nxv16i64: return 1024; case v256i8: case v128i16: case v64i32: - case v32i64: return 2048; + case v32i64: + case nxv32i64: return 2048; } } @@ -659,6 +886,83 @@ class MVT { return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE); } + static MVT getScalableVectorVT(MVT VT, unsigned NumElements) { + switch(VT.SimpleTy) { + default: + break; + case MVT::i1: + if (NumElements == 2) return MVT::nxv2i1; + if (NumElements == 4) return MVT::nxv4i1; + if (NumElements == 8) return MVT::nxv8i1; + if (NumElements == 16) return MVT::nxv16i1; + if (NumElements == 32) return MVT::nxv32i1; + break; + case MVT::i8: + if (NumElements == 1) return MVT::nxv1i8; + if (NumElements == 2) return MVT::nxv2i8; + if (NumElements == 4) return MVT::nxv4i8; + if (NumElements == 8) return MVT::nxv8i8; + if (NumElements == 16) return MVT::nxv16i8; + if (NumElements == 32) return MVT::nxv32i8; + break; + case MVT::i16: + if (NumElements == 1) return MVT::nxv1i16; + if (NumElements == 2) return MVT::nxv2i16; + if (NumElements == 4) return MVT::nxv4i16; + if (NumElements == 8) return MVT::nxv8i16; + if (NumElements == 16) return MVT::nxv16i16; + if (NumElements == 32) return MVT::nxv32i16; + break; + case MVT::i32: + if (NumElements == 1) return MVT::nxv1i32; + if (NumElements == 2) return MVT::nxv2i32; + if (NumElements == 4) return MVT::nxv4i32; + if (NumElements == 8) return MVT::nxv8i32; + if (NumElements == 16) return MVT::nxv16i32; + if (NumElements == 32) return MVT::nxv32i32; + break; + case MVT::i64: + if (NumElements == 1) return MVT::nxv1i64; + if (NumElements == 2) return MVT::nxv2i64; + if (NumElements == 4) return MVT::nxv4i64; + if (NumElements == 8) return MVT::nxv8i64; + if (NumElements == 16) return MVT::nxv16i64; + if (NumElements == 32) return MVT::nxv32i64; + break; + case MVT::f16: + if (NumElements == 2) return MVT::nxv2f16; + if (NumElements == 4) return MVT::nxv4f16; + if (NumElements == 8) return MVT::nxv8f16; + break; + case MVT::f32: + if (NumElements == 1) return MVT::nxv1f32; + if (NumElements == 2) return MVT::nxv2f32; + if (NumElements == 4) return MVT::nxv4f32; + if (NumElements == 8) return MVT::nxv8f32; + if (NumElements == 16) return MVT::nxv16f32; + break; + case MVT::f64: + if (NumElements == 1) return MVT::nxv1f64; + if (NumElements == 2) return MVT::nxv2f64; + if (NumElements == 4) return MVT::nxv4f64; + if (NumElements == 8) return MVT::nxv8f64; + break; + } + return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE); + } + + static MVT getVectorVT(MVT VT, unsigned NumElements, bool IsScalable) { + if (IsScalable) + return getScalableVectorVT(VT, NumElements); + return getVectorVT(VT, NumElements); + } + + static MVT getVectorVT(MVT VT, MVT::ElementCount EC) { + if (EC.Scalable) + return getScalableVectorVT(VT, EC.Min); + return getVectorVT(VT, EC.Min); + } + /// Return the value type corresponding to the specified type. This returns /// all pointers as iPTR. If HandleUnknown is true, unknown types are /// returned as Other, otherwise they are invalid. @@ -709,6 +1013,14 @@ class MVT { MVT::FIRST_FP_VECTOR_VALUETYPE, (MVT::SimpleValueType)(MVT::LAST_FP_VECTOR_VALUETYPE + 1)); } + static mvt_range integer_scalable_vector_valuetypes() { + return mvt_range(MVT::FIRST_INTEGER_SCALABLE_VALUETYPE, + (MVT::SimpleValueType)(MVT::LAST_INTEGER_SCALABLE_VALUETYPE + 1)); + } + static mvt_range fp_scalable_vector_valuetypes() { + return mvt_range(MVT::FIRST_FP_SCALABLE_VALUETYPE, + (MVT::SimpleValueType)(MVT::LAST_FP_SCALABLE_VALUETYPE + 1)); + } /// @} }; diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h index 0a3063663cef8..b404b4ca701f9 100644 --- a/include/llvm/CodeGen/ValueTypes.h +++ b/include/llvm/CodeGen/ValueTypes.h @@ -44,7 +44,7 @@ namespace llvm { bool operator!=(EVT VT) const { if (V.SimpleTy != VT.V.SimpleTy) return true; - if (V.SimpleTy < 0) + if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE) return LLVMTy != VT.LLVMTy; return false; } @@ -60,31 +60,48 @@ namespace llvm { /// bits. static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) { MVT M = MVT::getIntegerVT(BitWidth); - if (M.SimpleTy >= 0) + if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE) return M; return getExtendedIntegerVT(Context, BitWidth); } /// Returns the EVT that represents a vector NumElements in length, where /// each element is of type VT. - static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements) { - MVT M = MVT::getVectorVT(VT.V, NumElements); - if (M.SimpleTy >= 0) + static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, + bool IsScalable = false) { + MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable); + if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE) return M; + + assert(!IsScalable && "We don't support extended scalable types yet"); return getExtendedVectorVT(Context, VT, NumElements); } + /// Returns the EVT that represents a vector EC.Min elements in length, + /// where each element is of type VT. + static EVT getVectorVT(LLVMContext &Context, EVT VT, MVT::ElementCount EC) { + MVT M = MVT::getVectorVT(VT.V, EC); + if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE) + return M; + assert (!EC.Scalable && "We don't support extended scalable types yet"); + return getExtendedVectorVT(Context, VT, EC.Min); + } + /// Return a vector with the same number of elements as this vector, but /// with the element type converted to an integer type with the same /// bitwidth. EVT changeVectorElementTypeToInteger() const { - if (!isSimple()) + if (!isSimple()) { + assert (!isScalableVector() && + "We don't support extended scalable types yet"); return changeExtendedVectorElementTypeToInteger(); + } MVT EltTy = getSimpleVT().getVectorElementType(); unsigned BitWidth = EltTy.getSizeInBits(); MVT IntTy = MVT::getIntegerVT(BitWidth); - MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements()); - assert(VecTy.SimpleTy >= 0 && + MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements(), + isScalableVector()); + assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE && "Simple vector VT not representable by simple integer vector VT!"); return VecTy; } @@ -104,7 +121,7 @@ namespace llvm { /// Test if the given EVT is simple (as opposed to being extended). bool isSimple() const { - return V.SimpleTy >= 0; + return V.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE; } /// Test if the given EVT is extended (as opposed to being simple). @@ -132,6 +149,17 @@ namespace llvm { return isSimple() ? V.isVector() : isExtendedVector(); } + /// Return true if this is a vector type where the runtime + /// length is machine dependent + bool isScalableVector() const { + // FIXME: We don't support extended scalable types yet, because the + // matching IR type doesn't exist. Once it has been added, this can + // be changed to call isExtendedScalableVector. + if (!isSimple()) + return false; + return V.isScalableVector(); + } + /// Return true if this is a 16-bit vector type. bool is16BitVector() const { return isSimple() ? V.is16BitVector() : isExtended16BitVector(); @@ -247,6 +275,17 @@ namespace llvm { return getExtendedVectorNumElements(); } + // Given a (possibly scalable) vector type, return the ElementCount + MVT::ElementCount getVectorElementCount() const { + assert((isVector()) && "Invalid vector type!"); + if (isSimple()) + return V.getVectorElementCount(); + + assert(!isScalableVector() && + "We don't support extended scalable types yet"); + return {getExtendedVectorNumElements(), false}; + } + /// Return the size of the specified value type in bits. unsigned getSizeInBits() const { if (isSimple()) @@ -301,7 +340,17 @@ namespace llvm { EVT widenIntegerVectorElementType(LLVMContext &Context) const { EVT EltVT = getVectorElementType(); EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits()); - return EVT::getVectorVT(Context, EltVT, getVectorNumElements()); + return EVT::getVectorVT(Context, EltVT, getVectorElementCount()); + } + + // Return a VT for a vector type with the same element type but + // half the number of elements. The type returned may be an + // extended type. + EVT getHalfNumVectorElementsVT(LLVMContext &Context) const { + EVT EltVT = getVectorElementType(); + auto EltCnt = getVectorElementCount(); + assert(!(EltCnt.Min & 1) && "Splitting vector, but not in half!"); + return EVT::getVectorVT(Context, EltVT, EltCnt / 2); } /// Returns true if the given vector is a power of 2. @@ -316,7 +365,8 @@ namespace llvm { if (!isPow2VectorType()) { unsigned NElts = getVectorNumElements(); unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts); - return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts); + return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts, + isScalableVector()); } else { return *this; diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td index f7b1661d7451b..cd84344754512 100644 --- a/include/llvm/CodeGen/ValueTypes.td +++ b/include/llvm/CodeGen/ValueTypes.td @@ -19,101 +19,147 @@ class ValueType<int size, int value> { int Value = value; } -def OtherVT: ValueType<0 , 0>; // "Other" value -def i1 : ValueType<1 , 1>; // One bit boolean value -def i8 : ValueType<8 , 2>; // 8-bit integer value -def i16 : ValueType<16 , 3>; // 16-bit integer value -def i32 : ValueType<32 , 4>; // 32-bit integer value -def i64 : ValueType<64 , 5>; // 64-bit integer value -def i128 : ValueType<128, 6>; // 128-bit integer value -def f16 : ValueType<16 , 7>; // 16-bit floating point value -def f32 : ValueType<32 , 8>; // 32-bit floating point value -def f64 : ValueType<64 , 9>; // 64-bit floating point value -def f80 : ValueType<80 , 10>; // 80-bit floating point value -def f128 : ValueType<128, 11>; // 128-bit floating point value -def ppcf128: ValueType<128, 12>; // PPC 128-bit floating point value - -def v2i1 : ValueType<2 , 13>; // 2 x i1 vector value -def v4i1 : ValueType<4 , 14>; // 4 x i1 vector value -def v8i1 : ValueType<8 , 15>; // 8 x i1 vector value -def v16i1 : ValueType<16, 16>; // 16 x i1 vector value -def v32i1 : ValueType<32 , 17>; // 32 x i1 vector value -def v64i1 : ValueType<64 , 18>; // 64 x i1 vector value -def v512i1 : ValueType<512, 19>; // 512 x i1 vector value -def v1024i1: ValueType<1024,20>; //1024 x i1 vector value - -def v1i8 : ValueType<16, 21>; // 1 x i8 vector value -def v2i8 : ValueType<16 , 22>; // 2 x i8 vector value -def v4i8 : ValueType<32 , 23>; // 4 x i8 vector value -def v8i8 : ValueType<64 , 24>; // 8 x i8 vector value -def v16i8 : ValueType<128, 25>; // 16 x i8 vector value -def v32i8 : ValueType<256, 26>; // 32 x i8 vector value -def v64i8 : ValueType<512, 27>; // 64 x i8 vector value -def v128i8 : ValueType<1024,28>; //128 x i8 vector value -def v256i8 : ValueType<2048,29>; //256 x i8 vector value - -def v1i16 : ValueType<16 , 30>; // 1 x i16 vector value -def v2i16 : ValueType<32 , 31>; // 2 x i16 vector value -def v4i16 : ValueType<64 , 32>; // 4 x i16 vector value -def v8i16 : ValueType<128, 33>; // 8 x i16 vector value -def v16i16 : ValueType<256, 34>; // 16 x i16 vector value -def v32i16 : ValueType<512, 35>; // 32 x i16 vector value -def v64i16 : ValueType<1024,36>; // 64 x i16 vector value -def v128i16: ValueType<2048,37>; //128 x i16 vector value - -def v1i32 : ValueType<32 , 38>; // 1 x i32 vector value -def v2i32 : ValueType<64 , 39>; // 2 x i32 vector value -def v4i32 : ValueType<128, 40>; // 4 x i32 vector value -def v8i32 : ValueType<256, 41>; // 8 x i32 vector value -def v16i32 : ValueType<512, 42>; // 16 x i32 vector value -def v32i32 : ValueType<1024,43>; // 32 x i32 vector value -def v64i32 : ValueType<2048,44>; // 32 x i32 vector value - -def v1i64 : ValueType<64 , 45>; // 1 x i64 vector value -def v2i64 : ValueType<128, 46>; // 2 x i64 vector value -def v4i64 : ValueType<256, 47>; // 4 x i64 vector value -def v8i64 : ValueType<512, 48>; // 8 x i64 vector value -def v16i64 : ValueType<1024,49>; // 16 x i64 vector value -def v32i64 : ValueType<2048,50>; // 32 x i64 vector value - -def v1i128 : ValueType<128, 51>; // 1 x i128 vector value - -def v2f16 : ValueType<32 , 52>; // 2 x f16 vector value -def v4f16 : ValueType<64 , 53>; // 4 x f16 vector value -def v8f16 : ValueType<128, 54>; // 8 x f16 vector value -def v1f32 : ValueType<32 , 55>; // 1 x f32 vector value -def v2f32 : ValueType<64 , 56>; // 2 x f32 vector value -def v4f32 : ValueType<128, 57>; // 4 x f32 vector value -def v8f32 : ValueType<256, 58>; // 8 x f32 vector value -def v16f32 : ValueType<512, 59>; // 16 x f32 vector value -def v1f64 : ValueType<64, 60>; // 1 x f64 vector value -def v2f64 : ValueType<128, 61>; // 2 x f64 vector value -def v4f64 : ValueType<256, 62>; // 4 x f64 vector value -def v8f64 : ValueType<512, 63>; // 8 x f64 vector value - - -def x86mmx : ValueType<64 , 64>; // X86 MMX value -def FlagVT : ValueType<0 , 65>; // Pre-RA sched glue -def isVoid : ValueType<0 , 66>; // Produces no value -def untyped: ValueType<8 , 67>; // Produces an untyped value -def token : ValueType<0 , 120>; // TokenTy -def MetadataVT: ValueType<0, 121>; // Metadata +def OtherVT: ValueType<0 , 1>; // "Other" value +def i1 : ValueType<1 , 2>; // One bit boolean value +def i8 : ValueType<8 , 3>; // 8-bit integer value +def i16 : ValueType<16 , 4>; // 16-bit integer value +def i32 : ValueType<32 , 5>; // 32-bit integer value +def i64 : ValueType<64 , 6>; // 64-bit integer value +def i128 : ValueType<128, 7>; // 128-bit integer value +def f16 : ValueType<16 , 8>; // 16-bit floating point value +def f32 : ValueType<32 , 9>; // 32-bit floating point value +def f64 : ValueType<64 , 10>; // 64-bit floating point value +def f80 : ValueType<80 , 11>; // 80-bit floating point value +def f128 : ValueType<128, 12>; // 128-bit floating point value +def ppcf128: ValueType<128, 13>; // PPC 128-bit floating point value + +def v2i1 : ValueType<2 , 14>; // 2 x i1 vector value +def v4i1 : ValueType<4 , 15>; // 4 x i1 vector value +def v8i1 : ValueType<8 , 16>; // 8 x i1 vector value +def v16i1 : ValueType<16, 17>; // 16 x i1 vector value +def v32i1 : ValueType<32 , 18>; // 32 x i1 vector value +def v64i1 : ValueType<64 , 19>; // 64 x i1 vector value +def v512i1 : ValueType<512, 20>; // 512 x i1 vector value +def v1024i1: ValueType<1024,21>; //1024 x i1 vector value + +def v1i8 : ValueType<16, 22>; // 1 x i8 vector value +def v2i8 : ValueType<16 , 23>; // 2 x i8 vector value +def v4i8 : ValueType<32 , 24>; // 4 x i8 vector value +def v8i8 : ValueType<64 , 25>; // 8 x i8 vector value +def v16i8 : ValueType<128, 26>; // 16 x i8 vector value +def v32i8 : ValueType<256, 27>; // 32 x i8 vector value +def v64i8 : ValueType<512, 28>; // 64 x i8 vector value +def v128i8 : ValueType<1024,29>; //128 x i8 vector value +def v256i8 : ValueType<2048,30>; //256 x i8 vector value + +def v1i16 : ValueType<16 , 31>; // 1 x i16 vector value +def v2i16 : ValueType<32 , 32>; // 2 x i16 vector value +def v4i16 : ValueType<64 , 33>; // 4 x i16 vector value +def v8i16 : ValueType<128, 34>; // 8 x i16 vector value +def v16i16 : ValueType<256, 35>; // 16 x i16 vector value +def v32i16 : ValueType<512, 36>; // 32 x i16 vector value +def v64i16 : ValueType<1024,37>; // 64 x i16 vector value +def v128i16: ValueType<2048,38>; //128 x i16 vector value + +def v1i32 : ValueType<32 , 39>; // 1 x i32 vector value +def v2i32 : ValueType<64 , 40>; // 2 x i32 vector value +def v4i32 : ValueType<128, 41>; // 4 x i32 vector value +def v8i32 : ValueType<256, 42>; // 8 x i32 vector value +def v16i32 : ValueType<512, 43>; // 16 x i32 vector value +def v32i32 : ValueType<1024,44>; // 32 x i32 vector value +def v64i32 : ValueType<2048,45>; // 32 x i32 vector value + +def v1i64 : ValueType<64 , 46>; // 1 x i64 vector value +def v2i64 : ValueType<128, 47>; // 2 x i64 vector value +def v4i64 : ValueType<256, 48>; // 4 x i64 vector value +def v8i64 : ValueType<512, 49>; // 8 x i64 vector value +def v16i64 : ValueType<1024,50>; // 16 x i64 vector value +def v32i64 : ValueType<2048,51>; // 32 x i64 vector value + +def v1i128 : ValueType<128, 52>; // 1 x i128 vector value + +def nxv2i1 : ValueType<2, 53>; // n x 2 x i1 vector value +def nxv4i1 : ValueType<4, 54>; // n x 4 x i1 vector value +def nxv8i1 : ValueType<8, 55>; // n x 8 x i1 vector value +def nxv16i1 : ValueType<16, 56>; // n x 16 x i1 vector value +def nxv32i1 : ValueType<32, 57>; // n x 32 x i1 vector value + +def nxv1i8 : ValueType<8, 58>; // n x 1 x i8 vector value +def nxv2i8 : ValueType<16, 59>; // n x 2 x i8 vector value +def nxv4i8 : ValueType<32, 60>; // n x 4 x i8 vector value +def nxv8i8 : ValueType<64, 61>; // n x 8 x i8 vector value +def nxv16i8 : ValueType<128, 62>; // n x 16 x i8 vector value +def nxv32i8 : ValueType<256, 63>; // n x 32 x i8 vector value + +def nxv1i16 : ValueType<16, 64>; // n x 1 x i16 vector value +def nxv2i16 : ValueType<32, 65>; // n x 2 x i16 vector value +def nxv4i16 : ValueType<64, 66>; // n x 4 x i16 vector value +def nxv8i16 : ValueType<128, 67>; // n x 8 x i16 vector value +def nxv16i16: ValueType<256, 68>; // n x 16 x i16 vector value +def nxv32i16: ValueType<512, 69>; // n x 32 x i16 vector value + +def nxv1i32 : ValueType<32, 70>; // n x 1 x i32 vector value +def nxv2i32 : ValueType<64, 71>; // n x 2 x i32 vector value +def nxv4i32 : ValueType<128, 72>; // n x 4 x i32 vector value +def nxv8i32 : ValueType<256, 73>; // n x 8 x i32 vector value +def nxv16i32: ValueType<512, 74>; // n x 16 x i32 vector value +def nxv32i32: ValueType<1024,75>; // n x 32 x i32 vector value + +def nxv1i64 : ValueType<64, 76>; // n x 1 x i64 vector value +def nxv2i64 : ValueType<128, 77>; // n x 2 x i64 vector value +def nxv4i64 : ValueType<256, 78>; // n x 4 x i64 vector value +def nxv8i64 : ValueType<512, 79>; // n x 8 x i64 vector value +def nxv16i64: ValueType<1024,80>; // n x 16 x i64 vector value +def nxv32i64: ValueType<2048,81>; // n x 32 x i64 vector value + +def v2f16 : ValueType<32 , 82>; // 2 x f16 vector value +def v4f16 : ValueType<64 , 83>; // 4 x f16 vector value +def v8f16 : ValueType<128, 84>; // 8 x f16 vector value +def v1f32 : ValueType<32 , 85>; // 1 x f32 vector value +def v2f32 : ValueType<64 , 86>; // 2 x f32 vector value +def v4f32 : ValueType<128, 87>; // 4 x f32 vector value +def v8f32 : ValueType<256, 88>; // 8 x f32 vector value +def v16f32 : ValueType<512, 89>; // 16 x f32 vector value +def v1f64 : ValueType<64, 90>; // 1 x f64 vector value +def v2f64 : ValueType<128, 91>; // 2 x f64 vector value +def v4f64 : ValueType<256, 92>; // 4 x f64 vector value +def v8f64 : ValueType<512, 93>; // 8 x f64 vector value + +def nxv2f16 : ValueType<32 , 94>; // n x 2 x f16 vector value +def nxv4f16 : ValueType<64 , 95>; // n x 4 x f16 vector value +def nxv8f16 : ValueType<128, 96>; // n x 8 x f16 vector value +def nxv1f32 : ValueType<32 , 97>; // n x 1 x f32 vector value +def nxv2f32 : ValueType<64 , 98>; // n x 2 x f32 vector value +def nxv4f32 : ValueType<128, 99>; // n x 4 x f32 vector value +def nxv8f32 : ValueType<256, 100>; // n x 8 x f32 vector value +def nxv16f32 : ValueType<512, 101>; // n x 16 x f32 vector value +def nxv1f64 : ValueType<64, 102>; // n x 1 x f64 vector value +def nxv2f64 : ValueType<128, 103>; // n x 2 x f64 vector value +def nxv4f64 : ValueType<256, 104>; // n x 4 x f64 vector value +def nxv8f64 : ValueType<512, 105>; // n x 8 x f64 vector value + +def x86mmx : ValueType<64 , 106>; // X86 MMX value +def FlagVT : ValueType<0 , 107>; // Pre-RA sched glue +def isVoid : ValueType<0 , 108>; // Produces no value +def untyped: ValueType<8 , 109>; // Produces an untyped value +def token : ValueType<0 , 248>; // TokenTy +def MetadataVT: ValueType<0, 249>; // Metadata // Pseudo valuetype mapped to the current pointer size to any address space. // Should only be used in TableGen. -def iPTRAny : ValueType<0, 122>; +def iPTRAny : ValueType<0, 250>; // Pseudo valuetype to represent "vector of any size" -def vAny : ValueType<0 , 123>; +def vAny : ValueType<0 , 251>; // Pseudo valuetype to represent "float of any format" -def fAny : ValueType<0 , 124>; +def fAny : ValueType<0 , 252>; // Pseudo valuetype to represent "integer of any bit width" -def iAny : ValueType<0 , 125>; +def iAny : ValueType<0 , 253>; // Pseudo valuetype mapped to the current pointer size. -def iPTR : ValueType<0 , 126>; +def iPTR : ValueType<0 , 254>; // Pseudo valuetype to represent "any type of any size". -def Any : ValueType<0 , 127>; +def Any : ValueType<0 , 255>; diff --git a/include/llvm/Config/config.h.cmake b/include/llvm/Config/config.h.cmake index a3c919d39804f..a64e208fa7846 100644 --- a/include/llvm/Config/config.h.cmake +++ b/include/llvm/Config/config.h.cmake @@ -59,9 +59,6 @@ /* Define to 1 if you have the <errno.h> header file. */ #cmakedefine HAVE_ERRNO_H ${HAVE_ERRNO_H} -/* Define to 1 if you have the <execinfo.h> header file. */ -#cmakedefine HAVE_EXECINFO_H ${HAVE_EXECINFO_H} - /* Define to 1 if you have the <fcntl.h> header file. */ #cmakedefine HAVE_FCNTL_H ${HAVE_FCNTL_H} @@ -389,6 +386,9 @@ /* LLVM version information */ #cmakedefine LLVM_VERSION_INFO "${LLVM_VERSION_INFO}" +/* Whether tools show host and target info when invoked with --version */ +#cmakedefine01 LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO + /* Major version of the LLVM API */ #define LLVM_VERSION_MAJOR ${LLVM_VERSION_MAJOR} diff --git a/include/llvm/DebugInfo/DWARF/DWARFDie.h b/include/llvm/DebugInfo/DWARF/DWARFDie.h index 33e24fe3adc90..ee06125ea2786 100644 --- a/include/llvm/DebugInfo/DWARF/DWARFDie.h +++ b/include/llvm/DebugInfo/DWARF/DWARFDie.h @@ -247,16 +247,11 @@ public: /// DW_AT_call_line attribute in this DIE. /// \param CallColumn filled in with non-zero if successful, zero if there is /// no DW_AT_call_column attribute in this DIE. + /// \param CallDiscriminator filled in with non-zero if successful, zero if + /// there is no DW_AT_GNU_discriminator attribute in this DIE. void getCallerFrame(uint32_t &CallFile, uint32_t &CallLine, - uint32_t &CallColumn) const; + uint32_t &CallColumn, uint32_t &CallDiscriminator) const; - /// Get inlined chain for a given address, rooted at the current DIE. - /// Returns empty chain if address is not contained in address range - /// of current DIE. - void - getInlinedChainForAddress(const uint64_t Address, - SmallVectorImpl<DWARFDie> &InlinedChain) const; - class attribute_iterator; /// Get an iterator range to all attributes in the current DIE only. diff --git a/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/include/llvm/DebugInfo/DWARF/DWARFUnit.h index 40eb4434bd61e..023a0f7b9fb24 100644 --- a/include/llvm/DebugInfo/DWARF/DWARFUnit.h +++ b/include/llvm/DebugInfo/DWARF/DWARFUnit.h @@ -31,6 +31,7 @@ #include <cstdint> #include <memory> #include <vector> +#include <map> namespace llvm { @@ -134,6 +135,11 @@ class DWARFUnit { uint64_t BaseAddr; // The compile unit debug information entry items. std::vector<DWARFDebugInfoEntry> DieArray; + + // Map from range's start address to end address and corresponding DIE. + // IntervalMap does not support range removal, as a result, we use the + // std::map::upper_bound for address range lookup. + std::map<uint64_t, std::pair<uint64_t, DWARFDie>> AddrDieMap; typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator> die_iterator_range; @@ -183,6 +189,9 @@ public: AddrOffsetSectionBase = Base; } + // Recursively update address to Die map. + void updateAddressDieMap(DWARFDie Die); + void setRangesSection(StringRef RS, uint32_t Base) { RangeSection = RS; RangeSectionBase = Base; @@ -339,10 +348,10 @@ private: /// it was actually constructed. bool parseDWO(); - /// getSubprogramForAddress - Returns subprogram DIE with address range + /// getSubroutineForAddress - Returns subprogram DIE with address range /// encompassing the provided address. The pointer is alive as long as parsed /// compile unit DIEs are not cleared. - DWARFDie getSubprogramForAddress(uint64_t Address); + DWARFDie getSubroutineForAddress(uint64_t Address); }; } // end namespace llvm diff --git a/include/llvm/IR/Argument.h b/include/llvm/IR/Argument.h index 6fc1dd2f285a1..5c05f19abc1fd 100644 --- a/include/llvm/IR/Argument.h +++ b/include/llvm/IR/Argument.h @@ -108,18 +108,16 @@ public: bool hasSExtAttr() const; /// Add attributes to an argument. - void addAttr(AttributeList AS); + void addAttrs(AttrBuilder &B); - void addAttr(Attribute::AttrKind Kind) { - addAttr(AttributeList::get(getContext(), getArgNo() + 1, Kind)); - } + void addAttr(Attribute::AttrKind Kind); + + void addAttr(Attribute Attr); /// Remove attributes from an argument. void removeAttr(AttributeList AS); - void removeAttr(Attribute::AttrKind Kind) { - removeAttr(AttributeList::get(getContext(), getArgNo() + 1, Kind)); - } + void removeAttr(Attribute::AttrKind Kind); /// Check if an argument has a given attribute. bool hasAttribute(Attribute::AttrKind Kind) const; diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h index 121f57a433acb..b13f197d25fdc 100644 --- a/include/llvm/IR/Attributes.h +++ b/include/llvm/IR/Attributes.h @@ -357,9 +357,6 @@ public: AttributeList Attrs) const; AttributeList addAttributes(LLVMContext &C, unsigned Index, - AttributeSet AS) const; - - AttributeList addAttributes(LLVMContext &C, unsigned Index, const AttrBuilder &B) const; /// \brief Remove the specified attribute at the specified index from this diff --git a/include/llvm/IR/ConstantRange.h b/include/llvm/IR/ConstantRange.h index 6d704666933fc..47004e82cc19a 100644 --- a/include/llvm/IR/ConstantRange.h +++ b/include/llvm/IR/ConstantRange.h @@ -41,17 +41,14 @@ namespace llvm { class MDNode; /// This class represents a range of values. -/// class ConstantRange { APInt Lower, Upper; public: /// Initialize a full (the default) or empty set for the specified bit width. - /// explicit ConstantRange(uint32_t BitWidth, bool isFullSet = true); /// Initialize a range to hold the single specified value. - /// ConstantRange(APInt Value); /// @brief Initialize a range of values explicitly. This will assert out if @@ -119,46 +116,36 @@ public: bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const; /// Return the lower value for this range. - /// const APInt &getLower() const { return Lower; } /// Return the upper value for this range. - /// const APInt &getUpper() const { return Upper; } /// Get the bit width of this ConstantRange. - /// uint32_t getBitWidth() const { return Lower.getBitWidth(); } /// Return true if this set contains all of the elements possible /// for this data-type. - /// bool isFullSet() const; /// Return true if this set contains no members. - /// bool isEmptySet() const; /// Return true if this set wraps around the top of the range. /// For example: [100, 8). - /// bool isWrappedSet() const; /// Return true if this set wraps around the INT_MIN of /// its bitwidth. For example: i8 [120, 140). - /// bool isSignWrappedSet() const; /// Return true if the specified value is in the set. - /// bool contains(const APInt &Val) const; /// Return true if the other range is a subset of this one. - /// bool contains(const ConstantRange &CR) const; /// If this set contains a single element, return it, otherwise return null. - /// const APInt *getSingleElement() const { if (Upper == Lower + 1) return &Lower; @@ -174,35 +161,27 @@ public: } /// Return true if this set contains exactly one member. - /// bool isSingleElement() const { return getSingleElement() != nullptr; } /// Return the number of elements in this set. - /// APInt getSetSize() const; /// Compare set size of this range with the range CR. - /// bool isSizeStrictlySmallerThanOf(const ConstantRange &CR) const; /// Return the largest unsigned value contained in the ConstantRange. - /// APInt getUnsignedMax() const; /// Return the smallest unsigned value contained in the ConstantRange. - /// APInt getUnsignedMin() const; /// Return the largest signed value contained in the ConstantRange. - /// APInt getSignedMax() const; /// Return the smallest signed value contained in the ConstantRange. - /// APInt getSignedMin() const; /// Return true if this range is equal to another range. - /// bool operator==(const ConstantRange &CR) const { return Lower == CR.Lower && Upper == CR.Upper; } @@ -213,8 +192,8 @@ public: /// Subtract the specified constant from the endpoints of this constant range. ConstantRange subtract(const APInt &CI) const; - /// \brief Subtract the specified range from this range (aka relative - /// complement of the sets). + /// Subtract the specified range from this range (aka relative complement of + /// the sets). ConstantRange difference(const ConstantRange &CR) const; /// Return the range that results from the intersection of @@ -223,7 +202,6 @@ public: /// smallest possible set size that does so. Because there may be two /// intersections with the same set size, A.intersectWith(B) might not /// be equal to B.intersectWith(A). - /// ConstantRange intersectWith(const ConstantRange &CR) const; /// Return the range that results from the union of this range @@ -231,7 +209,6 @@ public: /// elements of both sets, but may contain more. For example, [3, 9) union /// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included /// in either set before. - /// ConstantRange unionWith(const ConstantRange &CR) const; /// Return a new range representing the possible values resulting @@ -331,15 +308,12 @@ public: ConstantRange lshr(const ConstantRange &Other) const; /// Return a new range that is the logical not of the current set. - /// ConstantRange inverse() const; /// Print out the bounds to a stream. - /// void print(raw_ostream &OS) const; /// Allow printing from a debugger easily. - /// void dump() const; }; diff --git a/include/llvm/IR/DIBuilder.h b/include/llvm/IR/DIBuilder.h index 69bd5c847a8d0..a4b2a02d50503 100644 --- a/include/llvm/IR/DIBuilder.h +++ b/include/llvm/IR/DIBuilder.h @@ -778,6 +778,9 @@ namespace llvm { } }; + // Create wrappers for C Binding types (see CBindingWrapping.h). + DEFINE_ISA_CONVERSION_FUNCTIONS(DIBuilder, LLVMDIBuilderRef) + } // end namespace llvm #endif // LLVM_IR_DIBUILDER_H diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h index 8a924b40143aa..8041e35e0e0a6 100644 --- a/include/llvm/IR/DebugInfoMetadata.h +++ b/include/llvm/IR/DebugInfoMetadata.h @@ -2232,6 +2232,9 @@ public: expr_op_iterator expr_op_end() const { return expr_op_iterator(elements_end()); } + iterator_range<expr_op_iterator> expr_ops() const { + return {expr_op_begin(), expr_op_end()}; + } /// @} bool isValid() const; @@ -2240,7 +2243,7 @@ public: return MD->getMetadataID() == DIExpressionKind; } - /// Is the first element a DW_OP_deref?. + /// Return whether the first element a DW_OP_deref. bool startsWithDeref() const { return getNumElements() > 0 && getElement(0) == dwarf::DW_OP_deref; } diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h index 34dafebe0fc5d..d23c1ddf9257b 100644 --- a/include/llvm/IR/Instructions.h +++ b/include/llvm/IR/Instructions.h @@ -273,10 +273,11 @@ public: Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; } + Type *getPointerOperandType() const { return getPointerOperand()->getType(); } /// Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { - return getPointerOperand()->getType()->getPointerAddressSpace(); + return getPointerOperandType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: @@ -397,10 +398,11 @@ public: Value *getPointerOperand() { return getOperand(1); } const Value *getPointerOperand() const { return getOperand(1); } static unsigned getPointerOperandIndex() { return 1U; } + Type *getPointerOperandType() const { return getPointerOperand()->getType(); } /// Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { - return getPointerOperand()->getType()->getPointerAddressSpace(); + return getPointerOperandType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: diff --git a/include/llvm/IR/Metadata.h b/include/llvm/IR/Metadata.h index fd79355bff1ad..8f24a6a1d69d8 100644 --- a/include/llvm/IR/Metadata.h +++ b/include/llvm/IR/Metadata.h @@ -30,6 +30,7 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Value.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/ErrorHandling.h" #include <cassert> #include <cstddef> @@ -133,6 +134,14 @@ public: /// @} }; +// Create wrappers for C Binding types (see CBindingWrapping.h). +DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef) + +// Specialized opaque metadata conversions. +inline Metadata **unwrap(LLVMMetadataRef *MDs) { + return reinterpret_cast<Metadata**>(MDs); +} + #define HANDLE_METADATA(CLASS) class CLASS; #include "llvm/IR/Metadata.def" diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h index 09f6c18970095..9c0a4159cad2f 100644 --- a/include/llvm/IR/ModuleSummaryIndex.h +++ b/include/llvm/IR/ModuleSummaryIndex.h @@ -160,7 +160,6 @@ private: std::vector<ValueInfo> RefEdgeList; protected: - /// GlobalValueSummary constructor. GlobalValueSummary(SummaryKind K, GVFlags Flags, std::vector<ValueInfo> Refs) : Kind(K), Flags(Flags), OriginalName(0), RefEdgeList(std::move(Refs)) {} @@ -221,7 +220,6 @@ class AliasSummary : public GlobalValueSummary { GlobalValueSummary *AliaseeSummary; public: - /// Summary constructors. AliasSummary(GVFlags Flags, std::vector<ValueInfo> Refs) : GlobalValueSummary(AliasKind, Flags, std::move(Refs)) {} @@ -297,7 +295,6 @@ private: std::unique_ptr<TypeIdInfo> TIdInfo; public: - /// Summary constructors. FunctionSummary(GVFlags Flags, unsigned NumInsts, std::vector<ValueInfo> Refs, std::vector<EdgeTy> CGEdges, std::vector<GlobalValue::GUID> TypeTests, @@ -418,7 +415,6 @@ template <> struct DenseMapInfo<FunctionSummary::ConstVCall> { class GlobalVarSummary : public GlobalValueSummary { public: - /// Summary constructors. GlobalVarSummary(GVFlags Flags, std::vector<ValueInfo> Refs) : GlobalValueSummary(GlobalVarKind, Flags, std::move(Refs)) {} diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h index 40f9c21f646bc..31a76b4ed6c32 100644 --- a/include/llvm/IR/PatternMatch.h +++ b/include/llvm/IR/PatternMatch.h @@ -267,15 +267,15 @@ inline cst_pred_ty<is_all_ones> m_AllOnes() { } inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; } -struct is_sign_bit { - bool isValue(const APInt &C) { return C.isSignBit(); } +struct is_sign_mask { + bool isValue(const APInt &C) { return C.isSignMask(); } }; /// \brief Match an integer or vector with only the sign bit(s) set. -inline cst_pred_ty<is_sign_bit> m_SignBit() { - return cst_pred_ty<is_sign_bit>(); +inline cst_pred_ty<is_sign_mask> m_SignMask() { + return cst_pred_ty<is_sign_mask>(); } -inline api_pred_ty<is_sign_bit> m_SignBit(const APInt *&V) { return V; } +inline api_pred_ty<is_sign_mask> m_SignMask(const APInt *&V) { return V; } struct is_power2 { bool isValue(const APInt &C) { return C.isPowerOf2(); } diff --git a/include/llvm/IR/Use.h b/include/llvm/IR/Use.h index 05b68ccbb38e8..6b56546f44219 100644 --- a/include/llvm/IR/Use.h +++ b/include/llvm/IR/Use.h @@ -61,9 +61,29 @@ public: /// that also works with less standard-compliant compilers void swap(Use &RHS); + /// Pointer traits for the UserRef PointerIntPair. This ensures we always + /// use the LSB regardless of pointer alignment on different targets. + struct UserRefPointerTraits { + static inline void *getAsVoidPointer(User *P) { return P; } + static inline User *getFromVoidPointer(void *P) { + return (User *)P; + } + enum { NumLowBitsAvailable = 1 }; + }; + // A type for the word following an array of hung-off Uses in memory, which is // a pointer back to their User with the bottom bit set. - typedef PointerIntPair<User *, 1, unsigned> UserRef; + typedef PointerIntPair<User *, 1, unsigned, UserRefPointerTraits> UserRef; + + /// Pointer traits for the Prev PointerIntPair. This ensures we always use + /// the two LSBs regardless of pointer alignment on different targets. + struct PrevPointerTraits { + static inline void *getAsVoidPointer(Use **P) { return P; } + static inline Use **getFromVoidPointer(void *P) { + return (Use **)P; + } + enum { NumLowBitsAvailable = 2 }; + }; private: /// Destructor - Only for zap() @@ -115,7 +135,7 @@ private: Value *Val; Use *Next; - PointerIntPair<Use **, 2, PrevPtrTag> Prev; + PointerIntPair<Use **, 2, PrevPtrTag, PrevPointerTraits> Prev; void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); } diff --git a/include/llvm/MC/MCAsmInfo.h b/include/llvm/MC/MCAsmInfo.h index bd2717de9960b..869706c454834 100644 --- a/include/llvm/MC/MCAsmInfo.h +++ b/include/llvm/MC/MCAsmInfo.h @@ -65,8 +65,8 @@ protected: // Properties to be set by the target writer, used to configure asm printer. // - /// Pointer size in bytes. Default is 4. - unsigned PointerSize = 4; + /// Code pointer size in bytes. Default is 4. + unsigned CodePointerSize = 4; /// Size of the stack slot reserved for callee-saved registers, in bytes. /// Default is same as pointer size. @@ -384,8 +384,8 @@ public: explicit MCAsmInfo(); virtual ~MCAsmInfo(); - /// Get the pointer size in bytes. - unsigned getPointerSize() const { return PointerSize; } + /// Get the code pointer size in bytes. + unsigned getCodePointerSize() const { return CodePointerSize; } /// Get the callee-saved register stack slot /// size in bytes. diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h index e466b368ed34b..eb301031ba3fe 100644 --- a/include/llvm/MC/MCStreamer.h +++ b/include/llvm/MC/MCStreamer.h @@ -128,6 +128,7 @@ public: virtual void emitArch(unsigned Arch); virtual void emitArchExtension(unsigned ArchExt); virtual void emitObjectArch(unsigned Arch); + void emitTargetAttributes(const MCSubtargetInfo &STI); virtual void finishAttributeSection(); virtual void emitInst(uint32_t Inst, char Suffix = '\0'); diff --git a/include/llvm/MC/MCSubtargetInfo.h b/include/llvm/MC/MCSubtargetInfo.h index 6229db3bbcb28..bb16463588c3c 100644 --- a/include/llvm/MC/MCSubtargetInfo.h +++ b/include/llvm/MC/MCSubtargetInfo.h @@ -86,6 +86,10 @@ public: FeatureBits = FeatureBits_; } + bool hasFeature(unsigned Feature) const { + return FeatureBits[Feature]; + } + protected: /// Initialize the scheduling model and feature bits. /// diff --git a/include/llvm/Object/Archive.h b/include/llvm/Object/Archive.h index d423957d9b79d..807508107c56d 100644 --- a/include/llvm/Object/Archive.h +++ b/include/llvm/Object/Archive.h @@ -14,15 +14,20 @@ #ifndef LLVM_OBJECT_ARCHIVE_H #define LLVM_OBJECT_ARCHIVE_H +#include "llvm/ADT/iterator_range.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/StringRef.h" -#include "llvm/ADT/iterator_range.h" #include "llvm/Object/Binary.h" #include "llvm/Support/Chrono.h" #include "llvm/Support/Error.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" +#include <algorithm> +#include <cassert> +#include <cstdint> +#include <memory> +#include <string> +#include <vector> namespace llvm { namespace object { @@ -32,25 +37,28 @@ class Archive; class ArchiveMemberHeader { public: friend class Archive; + ArchiveMemberHeader(Archive const *Parent, const char *RawHeaderPtr, uint64_t Size, Error *Err); // ArchiveMemberHeader() = default; /// Get the name without looking up long names. - Expected<llvm::StringRef> getRawName() const; + Expected<StringRef> getRawName() const; /// Get the name looking up long names. - Expected<llvm::StringRef> getName(uint64_t Size) const; + Expected<StringRef> getName(uint64_t Size) const; /// Members are not larger than 4GB. Expected<uint32_t> getSize() const; Expected<sys::fs::perms> getAccessMode() const; Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const; - llvm::StringRef getRawLastModified() const { + + StringRef getRawLastModified() const { return StringRef(ArMemHdr->LastModified, sizeof(ArMemHdr->LastModified)).rtrim(' '); } + Expected<unsigned> getUID() const; Expected<unsigned> getGID() const; @@ -75,11 +83,13 @@ private: class Archive : public Binary { virtual void anchor(); + public: class Child { friend Archive; - const Archive *Parent; friend ArchiveMemberHeader; + + const Archive *Parent; ArchiveMemberHeader Header; /// \brief Includes header but not padding byte. StringRef Data; @@ -103,17 +113,22 @@ public: Expected<StringRef> getName() const; Expected<std::string> getFullName() const; Expected<StringRef> getRawName() const { return Header.getRawName(); } + Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const { return Header.getLastModified(); } + StringRef getRawLastModified() const { return Header.getRawLastModified(); } + Expected<unsigned> getUID() const { return Header.getUID(); } Expected<unsigned> getGID() const { return Header.getGID(); } + Expected<sys::fs::perms> getAccessMode() const { return Header.getAccessMode(); } + /// \return the size of the archive member without the header or padding. Expected<uint64_t> getSize() const; /// \return the size in the archive header for this member. @@ -130,11 +145,12 @@ public: class child_iterator { Child C; - Error *E; + Error *E = nullptr; public: - child_iterator() : C(Child(nullptr, nullptr, nullptr)), E(nullptr) {} + child_iterator() : C(Child(nullptr, nullptr, nullptr)) {} child_iterator(const Child &C, Error *E) : C(C), E(E) {} + const Child *operator->() const { return &C; } const Child &operator*() const { return C; } @@ -171,14 +187,15 @@ public: uint32_t StringIndex; // Extra index to the string. public: - bool operator ==(const Symbol &other) const { - return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex); - } - Symbol(const Archive *p, uint32_t symi, uint32_t stri) : Parent(p) , SymbolIndex(symi) , StringIndex(stri) {} + + bool operator ==(const Symbol &other) const { + return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex); + } + StringRef getName() const; Expected<Child> getMember() const; Symbol getNext() const; @@ -186,8 +203,10 @@ public: class symbol_iterator { Symbol symbol; + public: symbol_iterator(const Symbol &s) : symbol(s) {} + const Symbol *operator->() const { return &symbol; } const Symbol &operator*() const { return symbol; } @@ -264,7 +283,7 @@ private: mutable std::vector<std::unique_ptr<MemoryBuffer>> ThinBuffers; }; -} -} +} // end namespace object +} // end namespace llvm -#endif +#endif // LLVM_OBJECT_ARCHIVE_H diff --git a/include/llvm/Object/Binary.h b/include/llvm/Object/Binary.h index bdbe94301dc76..06788326ff578 100644 --- a/include/llvm/Object/Binary.h +++ b/include/llvm/Object/Binary.h @@ -15,10 +15,11 @@ #define LLVM_OBJECT_BINARY_H #include "llvm/ADT/Triple.h" -#include "llvm/Object/Error.h" -#include "llvm/Support/ErrorOr.h" -#include "llvm/Support/FileSystem.h" +#include "llvm/Support/Error.h" #include "llvm/Support/MemoryBuffer.h" +#include <algorithm> +#include <memory> +#include <utility> namespace llvm { @@ -29,9 +30,6 @@ namespace object { class Binary { private: - Binary() = delete; - Binary(const Binary &other) = delete; - unsigned int TypeID; protected: @@ -80,6 +78,8 @@ protected: } public: + Binary() = delete; + Binary(const Binary &other) = delete; virtual ~Binary(); StringRef getData() const; @@ -173,7 +173,7 @@ OwningBinary<T>::OwningBinary(std::unique_ptr<T> Bin, std::unique_ptr<MemoryBuffer> Buf) : Bin(std::move(Bin)), Buf(std::move(Buf)) {} -template <typename T> OwningBinary<T>::OwningBinary() {} +template <typename T> OwningBinary<T>::OwningBinary() = default; template <typename T> OwningBinary<T>::OwningBinary(OwningBinary &&Other) @@ -201,7 +201,9 @@ template <typename T> const T* OwningBinary<T>::getBinary() const { } Expected<OwningBinary<Binary>> createBinary(StringRef Path); -} -} -#endif +} // end namespace object + +} // end namespace llvm + +#endif // LLVM_OBJECT_BINARY_H diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h index 696042d29dabd..e0bb8f1cf3dd6 100644 --- a/include/llvm/Object/COFF.h +++ b/include/llvm/Object/COFF.h @@ -14,28 +14,39 @@ #ifndef LLVM_OBJECT_COFF_H #define LLVM_OBJECT_COFF_H -#include "llvm/ADT/PointerUnion.h" +#include "llvm/ADT/iterator_range.h" #include "llvm/DebugInfo/CodeView/CVDebugRecord.h" +#include "llvm/MC/SubtargetFeature.h" +#include "llvm/Object/Binary.h" +#include "llvm/Object/Error.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/COFF.h" #include "llvm/Support/Endian.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorOr.h" +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <system_error> namespace llvm { + template <typename T> class ArrayRef; namespace object { -class ImportDirectoryEntryRef; + +class BaseRelocRef; class DelayImportDirectoryEntryRef; class ExportDirectoryEntryRef; +class ImportDirectoryEntryRef; class ImportedSymbolRef; -class BaseRelocRef; -typedef content_iterator<ImportDirectoryEntryRef> import_directory_iterator; -typedef content_iterator<DelayImportDirectoryEntryRef> - delay_import_directory_iterator; -typedef content_iterator<ExportDirectoryEntryRef> export_directory_iterator; -typedef content_iterator<ImportedSymbolRef> imported_symbol_iterator; -typedef content_iterator<BaseRelocRef> base_reloc_iterator; + +using import_directory_iterator = content_iterator<ImportDirectoryEntryRef>; +using delay_import_directory_iterator = + content_iterator<DelayImportDirectoryEntryRef>; +using export_directory_iterator = content_iterator<ExportDirectoryEntryRef>; +using imported_symbol_iterator = content_iterator<ImportedSymbolRef>; +using base_reloc_iterator = content_iterator<BaseRelocRef>; /// The DOS compatible header at the front of all PE/COFF executables. struct dos_header { @@ -190,10 +201,10 @@ struct import_lookup_table_entry { } }; -typedef import_lookup_table_entry<support::little32_t> - import_lookup_table_entry32; -typedef import_lookup_table_entry<support::little64_t> - import_lookup_table_entry64; +using import_lookup_table_entry32 = + import_lookup_table_entry<support::little32_t>; +using import_lookup_table_entry64 = + import_lookup_table_entry<support::little64_t>; struct delay_import_directory_table_entry { // dumpbin reports this field as "Characteristics" instead of "Attributes". @@ -226,8 +237,8 @@ union export_address_table_entry { support::ulittle32_t ForwarderRVA; }; -typedef support::ulittle32_t export_name_pointer_table_entry; -typedef support::ulittle16_t export_ordinal_table_entry; +using export_name_pointer_table_entry = support::ulittle32_t; +using export_ordinal_table_entry = support::ulittle16_t; struct StringTableOffset { support::ulittle32_t Zeroes; @@ -250,8 +261,8 @@ struct coff_symbol { uint8_t NumberOfAuxSymbols; }; -typedef coff_symbol<support::ulittle16_t> coff_symbol16; -typedef coff_symbol<support::ulittle32_t> coff_symbol32; +using coff_symbol16 = coff_symbol<support::ulittle16_t>; +using coff_symbol32 = coff_symbol<support::ulittle32_t>; // Contains only common parts of coff_symbol16 and coff_symbol32. struct coff_symbol_generic { @@ -264,9 +275,9 @@ struct coff_symbol_generic { class COFFSymbolRef { public: - COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS), CS32(nullptr) {} - COFFSymbolRef(const coff_symbol32 *CS) : CS16(nullptr), CS32(CS) {} - COFFSymbolRef() : CS16(nullptr), CS32(nullptr) {} + COFFSymbolRef() = default; + COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS) {} + COFFSymbolRef(const coff_symbol32 *CS) : CS32(CS) {} const void *getRawPtr() const { return CS16 ? static_cast<const void *>(CS16) : CS32; @@ -396,8 +407,8 @@ public: private: bool isSet() const { return CS16 || CS32; } - const coff_symbol16 *CS16; - const coff_symbol32 *CS32; + const coff_symbol16 *CS16 = nullptr; + const coff_symbol32 *CS32 = nullptr; }; struct coff_section { @@ -418,6 +429,7 @@ struct coff_section { return (Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL) && NumberOfRelocations == UINT16_MAX; } + uint32_t getAlignment() const { // The IMAGE_SCN_TYPE_NO_PAD bit is a legacy way of getting to // IMAGE_SCN_ALIGN_1BYTES. @@ -508,6 +520,7 @@ struct coff_import_header { support::ulittle32_t SizeOfData; support::ulittle16_t OrdinalHint; support::ulittle16_t TypeInfo; + int getType() const { return TypeInfo & 0x3; } int getNameType() const { return (TypeInfo >> 2) & 0x7; } }; @@ -518,6 +531,7 @@ struct coff_import_directory_table_entry { support::ulittle32_t ForwarderChain; support::ulittle32_t NameRVA; support::ulittle32_t ImportAddressTableRVA; + bool isNull() const { return ImportLookupTableRVA == 0 && TimeDateStamp == 0 && ForwarderChain == 0 && NameRVA == 0 && ImportAddressTableRVA == 0; @@ -532,6 +546,7 @@ struct coff_tls_directory { IntTy AddressOfCallBacks; support::ulittle32_t SizeOfZeroFill; support::ulittle32_t Characteristics; + uint32_t getAlignment() const { // Bit [20:24] contains section alignment. uint32_t Shift = (Characteristics & 0x00F00000) >> 20; @@ -541,8 +556,8 @@ struct coff_tls_directory { } }; -typedef coff_tls_directory<support::little32_t> coff_tls_directory32; -typedef coff_tls_directory<support::little64_t> coff_tls_directory64; +using coff_tls_directory32 = coff_tls_directory<support::little32_t>; +using coff_tls_directory64 = coff_tls_directory<support::little64_t>; struct coff_load_configuration32 { support::ulittle32_t Characteristics; @@ -603,6 +618,7 @@ struct coff_base_reloc_block_header { struct coff_base_reloc_block_entry { support::ulittle16_t Data; + int getType() const { return Data >> 12; } int getOffset() const { return Data & ((1 << 12) - 1); } }; @@ -652,6 +668,7 @@ public: return reinterpret_cast<uintptr_t>(SymbolTable32); return uintptr_t(0); } + uint16_t getMachine() const { if (COFFHeader) return COFFHeader->Machine; @@ -659,6 +676,7 @@ public: return COFFBigObjHeader->Machine; llvm_unreachable("no COFF header!"); } + uint16_t getSizeOfOptionalHeader() const { if (COFFHeader) return COFFHeader->isImportLibrary() ? 0 @@ -668,6 +686,7 @@ public: return 0; llvm_unreachable("no COFF header!"); } + uint16_t getCharacteristics() const { if (COFFHeader) return COFFHeader->isImportLibrary() ? 0 : COFFHeader->Characteristics; @@ -677,6 +696,7 @@ public: return 0; llvm_unreachable("no COFF header!"); } + uint32_t getTimeDateStamp() const { if (COFFHeader) return COFFHeader->TimeDateStamp; @@ -684,6 +704,7 @@ public: return COFFBigObjHeader->TimeDateStamp; llvm_unreachable("no COFF header!"); } + uint32_t getNumberOfSections() const { if (COFFHeader) return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSections; @@ -691,6 +712,7 @@ public: return COFFBigObjHeader->NumberOfSections; llvm_unreachable("no COFF header!"); } + uint32_t getPointerToSymbolTable() const { if (COFFHeader) return COFFHeader->isImportLibrary() ? 0 @@ -699,6 +721,7 @@ public: return COFFBigObjHeader->PointerToSymbolTable; llvm_unreachable("no COFF header!"); } + uint32_t getRawNumberOfSymbols() const { if (COFFHeader) return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSymbols; @@ -706,11 +729,13 @@ public: return COFFBigObjHeader->NumberOfSymbols; llvm_unreachable("no COFF header!"); } + uint32_t getNumberOfSymbols() const { if (!SymbolTable16 && !SymbolTable32) return 0; return getRawNumberOfSymbols(); } + protected: void moveSymbolNext(DataRefImpl &Symb) const override; Expected<StringRef> getSymbolName(DataRefImpl Symb) const override; @@ -746,6 +771,7 @@ protected: public: COFFObjectFile(MemoryBufferRef Object, std::error_code &EC); + basic_symbol_iterator symbol_begin() const override; basic_symbol_iterator symbol_end() const override; section_iterator section_begin() const override; @@ -797,6 +823,7 @@ public: std::error_code getDataDirectory(uint32_t index, const data_directory *&Res) const; std::error_code getSection(int32_t index, const coff_section *&Res) const; + template <typename coff_symbol_type> std::error_code getSymbol(uint32_t Index, const coff_symbol_type *&Res) const { @@ -821,6 +848,7 @@ public: } return object_error::parse_failed; } + template <typename T> std::error_code getAuxSymbol(uint32_t index, const T *&Res) const { ErrorOr<COFFSymbolRef> s = getSymbol(index); @@ -829,6 +857,7 @@ public: Res = reinterpret_cast<const T *>(s->getRawPtr()); return std::error_code(); } + std::error_code getSymbolName(COFFSymbolRef Symbol, StringRef &Res) const; std::error_code getSymbolName(const coff_symbol_generic *Symbol, StringRef &Res) const; @@ -885,7 +914,7 @@ public: // The iterator for the import directory table. class ImportDirectoryEntryRef { public: - ImportDirectoryEntryRef() : OwningObject(nullptr) {} + ImportDirectoryEntryRef() = default; ImportDirectoryEntryRef(const coff_import_directory_table_entry *Table, uint32_t I, const COFFObjectFile *Owner) : ImportTable(Table), Index(I), OwningObject(Owner) {} @@ -911,12 +940,12 @@ public: private: const coff_import_directory_table_entry *ImportTable; uint32_t Index; - const COFFObjectFile *OwningObject; + const COFFObjectFile *OwningObject = nullptr; }; class DelayImportDirectoryEntryRef { public: - DelayImportDirectoryEntryRef() : OwningObject(nullptr) {} + DelayImportDirectoryEntryRef() = default; DelayImportDirectoryEntryRef(const delay_import_directory_table_entry *T, uint32_t I, const COFFObjectFile *Owner) : Table(T), Index(I), OwningObject(Owner) {} @@ -936,13 +965,13 @@ public: private: const delay_import_directory_table_entry *Table; uint32_t Index; - const COFFObjectFile *OwningObject; + const COFFObjectFile *OwningObject = nullptr; }; // The iterator for the export directory table entry. class ExportDirectoryEntryRef { public: - ExportDirectoryEntryRef() : OwningObject(nullptr) {} + ExportDirectoryEntryRef() = default; ExportDirectoryEntryRef(const export_directory_table_entry *Table, uint32_t I, const COFFObjectFile *Owner) : ExportTable(Table), Index(I), OwningObject(Owner) {} @@ -962,12 +991,12 @@ public: private: const export_directory_table_entry *ExportTable; uint32_t Index; - const COFFObjectFile *OwningObject; + const COFFObjectFile *OwningObject = nullptr; }; class ImportedSymbolRef { public: - ImportedSymbolRef() : OwningObject(nullptr) {} + ImportedSymbolRef() = default; ImportedSymbolRef(const import_lookup_table_entry32 *Entry, uint32_t I, const COFFObjectFile *Owner) : Entry32(Entry), Entry64(nullptr), Index(I), OwningObject(Owner) {} @@ -987,12 +1016,12 @@ private: const import_lookup_table_entry32 *Entry32; const import_lookup_table_entry64 *Entry64; uint32_t Index; - const COFFObjectFile *OwningObject; + const COFFObjectFile *OwningObject = nullptr; }; class BaseRelocRef { public: - BaseRelocRef() : OwningObject(nullptr) {} + BaseRelocRef() = default; BaseRelocRef(const coff_base_reloc_block_header *Header, const COFFObjectFile *Owner) : Header(Header), Index(0), OwningObject(Owner) {} @@ -1006,7 +1035,7 @@ public: private: const coff_base_reloc_block_header *Header; uint32_t Index; - const COFFObjectFile *OwningObject; + const COFFObjectFile *OwningObject = nullptr; }; // Corresponds to `_FPO_DATA` structure in the PE/COFF spec. @@ -1034,6 +1063,7 @@ struct FpoData { }; } // end namespace object + } // end namespace llvm -#endif +#endif // LLVM_OBJECT_COFF_H diff --git a/include/llvm/Object/IRSymtab.h b/include/llvm/Object/IRSymtab.h index cde6f3b0f6517..be0f02aa7f171 100644 --- a/include/llvm/Object/IRSymtab.h +++ b/include/llvm/Object/IRSymtab.h @@ -41,9 +41,9 @@ typedef support::ulittle32_t Word; /// A reference to a string in the string table. struct Str { - Word Offset; + Word Offset, Size; StringRef get(StringRef Strtab) const { - return Strtab.data() + Offset; + return {Strtab.data() + Offset, Size}; } }; @@ -59,6 +59,9 @@ template <typename T> struct Range { /// table. struct Module { Word Begin, End; + + /// The index of the first Uncommon for this Module. + Word UncBegin; }; /// This is equivalent to an IR comdat. @@ -82,7 +85,8 @@ struct Symbol { Word Flags; enum FlagBits { FB_visibility, // 2 bits - FB_undefined = FB_visibility + 2, + FB_has_uncommon = FB_visibility + 2, + FB_undefined, FB_weak, FB_common, FB_indirect, @@ -94,10 +98,6 @@ struct Symbol { FB_unnamed_addr, FB_executable, }; - - /// The index into the Uncommon table, or -1 if this symbol does not have an - /// Uncommon. - Word UncommonIndex; }; /// This data structure contains rarely used symbol fields and is optionally @@ -249,15 +249,9 @@ public: /// Reader::module_symbols(). class Reader::SymbolRef : public Symbol { const storage::Symbol *SymI, *SymE; + const storage::Uncommon *UncI; const Reader *R; -public: - SymbolRef(const storage::Symbol *SymI, const storage::Symbol *SymE, - const Reader *R) - : SymI(SymI), SymE(SymE), R(R) { - read(); - } - void read() { if (SymI == SymE) return; @@ -267,16 +261,24 @@ public: ComdatIndex = SymI->ComdatIndex; Flags = SymI->Flags; - uint32_t UncI = SymI->UncommonIndex; - if (UncI != -1u) { - const storage::Uncommon &Unc = R->Uncommons[UncI]; - CommonSize = Unc.CommonSize; - CommonAlign = Unc.CommonAlign; - COFFWeakExternFallbackName = R->str(Unc.COFFWeakExternFallbackName); + if (Flags & (1 << storage::Symbol::FB_has_uncommon)) { + CommonSize = UncI->CommonSize; + CommonAlign = UncI->CommonAlign; + COFFWeakExternFallbackName = R->str(UncI->COFFWeakExternFallbackName); } } + +public: + SymbolRef(const storage::Symbol *SymI, const storage::Symbol *SymE, + const storage::Uncommon *UncI, const Reader *R) + : SymI(SymI), SymE(SymE), UncI(UncI), R(R) { + read(); + } + void moveNext() { ++SymI; + if (Flags & (1 << storage::Symbol::FB_has_uncommon)) + ++UncI; read(); } @@ -284,15 +286,16 @@ public: }; inline Reader::symbol_range Reader::symbols() const { - return {SymbolRef(Symbols.begin(), Symbols.end(), this), - SymbolRef(Symbols.end(), Symbols.end(), this)}; + return {SymbolRef(Symbols.begin(), Symbols.end(), Uncommons.begin(), this), + SymbolRef(Symbols.end(), Symbols.end(), nullptr, this)}; } inline Reader::symbol_range Reader::module_symbols(unsigned I) const { const storage::Module &M = Modules[I]; const storage::Symbol *MBegin = Symbols.begin() + M.Begin, *MEnd = Symbols.begin() + M.End; - return {SymbolRef(MBegin, MEnd, this), SymbolRef(MEnd, MEnd, this)}; + return {SymbolRef(MBegin, MEnd, Uncommons.begin() + M.UncBegin, this), + SymbolRef(MEnd, MEnd, nullptr, this)}; } } diff --git a/include/llvm/Object/ObjectFile.h b/include/llvm/Object/ObjectFile.h index b689dc2ac03ac..9a7bc618ffd0a 100644 --- a/include/llvm/Object/ObjectFile.h +++ b/include/llvm/Object/ObjectFile.h @@ -14,39 +14,46 @@ #ifndef LLVM_OBJECT_OBJECTFILE_H #define LLVM_OBJECT_OBJECTFILE_H +#include "llvm/ADT/iterator_range.h" #include "llvm/ADT/StringRef.h" #include "llvm/MC/SubtargetFeature.h" +#include "llvm/Object/Binary.h" +#include "llvm/Object/Error.h" #include "llvm/Object/SymbolicFile.h" -#include "llvm/Support/DataTypes.h" -#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorOr.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" -#include <cstring> +#include <cassert> +#include <cstdint> +#include <memory> +#include <system_error> namespace llvm { + class ARMAttributeParser; namespace object { -class ObjectFile; class COFFObjectFile; class MachOObjectFile; -class WasmObjectFile; - +class ObjectFile; +class SectionRef; class SymbolRef; class symbol_iterator; -class SectionRef; -typedef content_iterator<SectionRef> section_iterator; +class WasmObjectFile; + +using section_iterator = content_iterator<SectionRef>; /// This is a value type class that represents a single relocation in the list /// of relocations in the object file. class RelocationRef { DataRefImpl RelocationPimpl; - const ObjectFile *OwningObject; + const ObjectFile *OwningObject = nullptr; public: - RelocationRef() : OwningObject(nullptr) { } - + RelocationRef() = default; RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner); bool operator==(const RelocationRef &Other) const; @@ -65,18 +72,19 @@ public: DataRefImpl getRawDataRefImpl() const; const ObjectFile *getObject() const; }; -typedef content_iterator<RelocationRef> relocation_iterator; + +using relocation_iterator = content_iterator<RelocationRef>; /// This is a value type class that represents a single section in the list of /// sections in the object file. class SectionRef { friend class SymbolRef; + DataRefImpl SectionPimpl; - const ObjectFile *OwningObject; + const ObjectFile *OwningObject = nullptr; public: - SectionRef() : OwningObject(nullptr) { } - + SectionRef() = default; SectionRef(DataRefImpl SectionP, const ObjectFile *Owner); bool operator==(const SectionRef &Other) const; @@ -119,8 +127,6 @@ class SymbolRef : public BasicSymbolRef { friend class SectionRef; public: - SymbolRef() : BasicSymbolRef() {} - enum Type { ST_Unknown, // Type not specified ST_Data, @@ -130,6 +136,7 @@ public: ST_Other }; + SymbolRef() = default; SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner); SymbolRef(const BasicSymbolRef &B) : BasicSymbolRef(B) { assert(isa<ObjectFile>(BasicSymbolRef::getObject())); @@ -179,8 +186,6 @@ public: /// to create. class ObjectFile : public SymbolicFile { virtual void anchor(); - ObjectFile() = delete; - ObjectFile(const ObjectFile &other) = delete; protected: ObjectFile(unsigned int Type, MemoryBufferRef Source); @@ -198,6 +203,7 @@ protected: // Implementations assume that the DataRefImpl is valid and has not been // modified externally. It's UB otherwise. friend class SymbolRef; + virtual Expected<StringRef> getSymbolName(DataRefImpl Symb) const = 0; std::error_code printSymbolName(raw_ostream &OS, DataRefImpl Symb) const override; @@ -211,6 +217,7 @@ protected: // Same as above for SectionRef. friend class SectionRef; + virtual void moveSectionNext(DataRefImpl &Sec) const = 0; virtual std::error_code getSectionName(DataRefImpl Sec, StringRef &Res) const = 0; @@ -242,12 +249,15 @@ protected: uint64_t getSymbolValue(DataRefImpl Symb) const; public: + ObjectFile() = delete; + ObjectFile(const ObjectFile &other) = delete; + uint64_t getCommonSymbolSize(DataRefImpl Symb) const { assert(getSymbolFlags(Symb) & SymbolRef::SF_Common); return getCommonSymbolSizeImpl(Symb); } - typedef iterator_range<symbol_iterator> symbol_iterator_range; + using symbol_iterator_range = iterator_range<symbol_iterator>; symbol_iterator_range symbols() const { return symbol_iterator_range(symbol_begin(), symbol_end()); } @@ -255,7 +265,7 @@ public: virtual section_iterator section_begin() const = 0; virtual section_iterator section_end() const = 0; - typedef iterator_range<section_iterator> section_iterator_range; + using section_iterator_range = iterator_range<section_iterator>; section_iterator_range sections() const { return section_iterator_range(section_begin(), section_end()); } @@ -297,7 +307,6 @@ public: return createObjectFile(Object, sys::fs::file_magic::unknown); } - static inline bool classof(const Binary *v) { return v->isObject(); } @@ -354,7 +363,6 @@ inline const ObjectFile *SymbolRef::getObject() const { return cast<ObjectFile>(O); } - /// SectionRef inline SectionRef::SectionRef(DataRefImpl SectionP, const ObjectFile *Owner) @@ -479,8 +487,8 @@ inline const ObjectFile *RelocationRef::getObject() const { return OwningObject; } - } // end namespace object + } // end namespace llvm -#endif +#endif // LLVM_OBJECT_OBJECTFILE_H diff --git a/include/llvm/Object/SymbolicFile.h b/include/llvm/Object/SymbolicFile.h index ef0f96f7834ab..f4be4bfdb1a38 100644 --- a/include/llvm/Object/SymbolicFile.h +++ b/include/llvm/Object/SymbolicFile.h @@ -14,10 +14,19 @@ #ifndef LLVM_OBJECT_SYMBOLICFILE_H #define LLVM_OBJECT_SYMBOLICFILE_H +#include "llvm/ADT/iterator_range.h" +#include "llvm/ADT/StringRef.h" #include "llvm/Object/Binary.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/FileSystem.h" #include "llvm/Support/Format.h" +#include "llvm/Support/MemoryBuffer.h" #include <cinttypes> -#include <utility> +#include <cstdint> +#include <cstring> +#include <iterator> +#include <memory> +#include <system_error> namespace llvm { namespace object { @@ -29,6 +38,7 @@ union DataRefImpl { uint32_t a, b; } d; uintptr_t p; + DataRefImpl() { std::memset(this, 0, sizeof(DataRefImpl)); } }; @@ -87,7 +97,7 @@ class SymbolicFile; /// symbols in the object file. class BasicSymbolRef { DataRefImpl SymbolPimpl; - const SymbolicFile *OwningObject; + const SymbolicFile *OwningObject = nullptr; public: enum Flags : unsigned { @@ -108,7 +118,7 @@ public: // (IR only) }; - BasicSymbolRef() : OwningObject(nullptr) { } + BasicSymbolRef() = default; BasicSymbolRef(DataRefImpl SymbolP, const SymbolicFile *Owner); bool operator==(const BasicSymbolRef &Other) const; @@ -125,12 +135,12 @@ public: const SymbolicFile *getObject() const; }; -typedef content_iterator<BasicSymbolRef> basic_symbol_iterator; +using basic_symbol_iterator = content_iterator<BasicSymbolRef>; class SymbolicFile : public Binary { public: - ~SymbolicFile() override; SymbolicFile(unsigned int Type, MemoryBufferRef Source); + ~SymbolicFile() override; // virtual interface. virtual void moveSymbolNext(DataRefImpl &Symb) const = 0; @@ -145,7 +155,7 @@ public: virtual basic_symbol_iterator symbol_end() const = 0; // convenience wrappers. - typedef iterator_range<basic_symbol_iterator> basic_symbol_iterator_range; + using basic_symbol_iterator_range = iterator_range<basic_symbol_iterator>; basic_symbol_iterator_range symbols() const { return basic_symbol_iterator_range(symbol_begin(), symbol_end()); } @@ -199,7 +209,7 @@ inline const SymbolicFile *BasicSymbolRef::getObject() const { return OwningObject; } -} -} +} // end namespace object +} // end namespace llvm -#endif +#endif // LLVM_OBJECT_SYMBOLICFILE_H diff --git a/include/llvm/ObjectYAML/DWARFYAML.h b/include/llvm/ObjectYAML/DWARFYAML.h index ec34de1f08814..3f39cfc7bb3d7 100644 --- a/include/llvm/ObjectYAML/DWARFYAML.h +++ b/include/llvm/ObjectYAML/DWARFYAML.h @@ -236,7 +236,7 @@ template <> struct MappingTraits<DWARFYAML::InitialLength> { static void mapping(IO &IO, DWARFYAML::InitialLength &DWARF); }; -#define HANDLE_DW_TAG(unused, name) \ +#define HANDLE_DW_TAG(unused, name, unused2, unused3) \ io.enumCase(value, "DW_TAG_" #name, dwarf::DW_TAG_##name); template <> struct ScalarEnumerationTraits<dwarf::Tag> { @@ -266,7 +266,7 @@ template <> struct ScalarEnumerationTraits<dwarf::LineNumberExtendedOps> { } }; -#define HANDLE_DW_AT(unused, name) \ +#define HANDLE_DW_AT(unused, name, unused2, unused3) \ io.enumCase(value, "DW_AT_" #name, dwarf::DW_AT_##name); template <> struct ScalarEnumerationTraits<dwarf::Attribute> { @@ -276,7 +276,7 @@ template <> struct ScalarEnumerationTraits<dwarf::Attribute> { } }; -#define HANDLE_DW_FORM(unused, name) \ +#define HANDLE_DW_FORM(unused, name, unused2, unused3) \ io.enumCase(value, "DW_FORM_" #name, dwarf::DW_FORM_##name); template <> struct ScalarEnumerationTraits<dwarf::Form> { diff --git a/include/llvm/PassSupport.h b/include/llvm/PassSupport.h index 852d79fbd4435..50e6b498fb462 100644 --- a/include/llvm/PassSupport.h +++ b/include/llvm/PassSupport.h @@ -93,11 +93,7 @@ template <typename PassName> Pass *callTargetMachineCtor(TargetMachine *TM) { /// static RegisterPass<YourPassClassName> tmp("passopt", "My Pass Name"); /// /// This statement will cause your pass to be created by calling the default -/// constructor exposed by the pass. If you have a different constructor that -/// must be called, create a global constructor function (which takes the -/// arguments you need and returns a Pass*) and register your pass like this: -/// -/// static RegisterPass<PassClassName> tmp("passopt", "My Name"); +/// constructor exposed by the pass. /// template <typename passName> struct RegisterPass : public PassInfo { // Register Pass using default constructor... diff --git a/include/llvm/Support/ARMTargetParser.def b/include/llvm/Support/ARMTargetParser.def index 18bf9af432262..32dc57a0fedf5 100644 --- a/include/llvm/Support/ARMTargetParser.def +++ b/include/llvm/Support/ARMTargetParser.def @@ -78,33 +78,33 @@ ARM_ARCH("armv7-a", AK_ARMV7A, "7-A", "v7", ARMBuildAttrs::CPUArch::v7, FK_NEON, ARM::AEK_DSP) ARM_ARCH("armv7ve", AK_ARMV7VE, "7VE", "v7ve", ARMBuildAttrs::CPUArch::v7, FK_NEON, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP)) + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP)) ARM_ARCH("armv7-r", AK_ARMV7R, "7-R", "v7r", ARMBuildAttrs::CPUArch::v7, - FK_NONE, (ARM::AEK_HWDIV | ARM::AEK_DSP)) + FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP)) ARM_ARCH("armv7-m", AK_ARMV7M, "7-M", "v7m", ARMBuildAttrs::CPUArch::v7, - FK_NONE, ARM::AEK_HWDIV) + FK_NONE, ARM::AEK_HWDIVTHUMB) ARM_ARCH("armv7e-m", AK_ARMV7EM, "7E-M", "v7em", ARMBuildAttrs::CPUArch::v7E_M, - FK_NONE, (ARM::AEK_HWDIV | ARM::AEK_DSP)) + FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP)) ARM_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC)) + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC)) ARM_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a", ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC)) + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC)) ARM_ARCH("armv8.2-a", AK_ARMV8_2A, "8.2-A", "v8.2a", ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS)) + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS)) ARM_ARCH("armv8-r", AK_ARMV8R, "8-R", "v8r", ARMBuildAttrs::CPUArch::v8_R, FK_NEON_FP_ARMV8, - (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | + (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC)) ARM_ARCH("armv8-m.base", AK_ARMV8MBaseline, "8-M.Baseline", "v8m.base", - ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIV) + ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIVTHUMB) ARM_ARCH("armv8-m.main", AK_ARMV8MMainline, "8-M.Mainline", "v8m.main", - ARMBuildAttrs::CPUArch::v8_M_Main, FK_FPV5_D16, ARM::AEK_HWDIV) + ARMBuildAttrs::CPUArch::v8_M_Main, FK_FPV5_D16, ARM::AEK_HWDIVTHUMB) // Non-standard Arch names. ARM_ARCH("iwmmxt", AK_IWMMXT, "iwmmxt", "", ARMBuildAttrs::CPUArch::v5TE, FK_NONE, ARM::AEK_NONE) @@ -128,7 +128,7 @@ ARM_ARCH_EXT_NAME("crc", ARM::AEK_CRC, "+crc", "-crc") ARM_ARCH_EXT_NAME("crypto", ARM::AEK_CRYPTO, "+crypto","-crypto") ARM_ARCH_EXT_NAME("dsp", ARM::AEK_DSP, "+dsp", "-dsp") ARM_ARCH_EXT_NAME("fp", ARM::AEK_FP, nullptr, nullptr) -ARM_ARCH_EXT_NAME("idiv", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV), nullptr, nullptr) +ARM_ARCH_EXT_NAME("idiv", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB), nullptr, nullptr) ARM_ARCH_EXT_NAME("mp", ARM::AEK_MP, nullptr, nullptr) ARM_ARCH_EXT_NAME("simd", ARM::AEK_SIMD, nullptr, nullptr) ARM_ARCH_EXT_NAME("sec", ARM::AEK_SEC, nullptr, nullptr) @@ -147,9 +147,9 @@ ARM_ARCH_EXT_NAME("xscale", ARM::AEK_XSCALE, nullptr, nullptr) #endif ARM_HW_DIV_NAME("invalid", ARM::AEK_INVALID) ARM_HW_DIV_NAME("none", ARM::AEK_NONE) -ARM_HW_DIV_NAME("thumb", ARM::AEK_HWDIV) +ARM_HW_DIV_NAME("thumb", ARM::AEK_HWDIVTHUMB) ARM_HW_DIV_NAME("arm", ARM::AEK_HWDIVARM) -ARM_HW_DIV_NAME("arm,thumb", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV)) +ARM_HW_DIV_NAME("arm,thumb", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB)) #undef ARM_HW_DIV_NAME #ifndef ARM_CPU_NAME @@ -205,20 +205,20 @@ ARM_CPU_NAME("cortex-a5", AK_ARMV7A, FK_NEON_VFPV4, false, (ARM::AEK_SEC | ARM::AEK_MP)) ARM_CPU_NAME("cortex-a7", AK_ARMV7A, FK_NEON_VFPV4, false, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV)) + ARM::AEK_HWDIVTHUMB)) ARM_CPU_NAME("cortex-a8", AK_ARMV7A, FK_NEON, true, ARM::AEK_SEC) ARM_CPU_NAME("cortex-a9", AK_ARMV7A, FK_NEON_FP16, false, (ARM::AEK_SEC | ARM::AEK_MP)) ARM_CPU_NAME("cortex-a12", AK_ARMV7A, FK_NEON_VFPV4, false, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV)) + ARM::AEK_HWDIVTHUMB)) ARM_CPU_NAME("cortex-a15", AK_ARMV7A, FK_NEON_VFPV4, false, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV)) + ARM::AEK_HWDIVTHUMB)) ARM_CPU_NAME("cortex-a17", AK_ARMV7A, FK_NEON_VFPV4, false, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV)) + ARM::AEK_HWDIVTHUMB)) ARM_CPU_NAME("krait", AK_ARMV7A, FK_NEON_VFPV4, false, - (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV)) + (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB)) ARM_CPU_NAME("cortex-r4", AK_ARMV7R, FK_NONE, true, ARM::AEK_NONE) ARM_CPU_NAME("cortex-r4f", AK_ARMV7R, FK_VFPV3_D16, false, ARM::AEK_NONE) ARM_CPU_NAME("cortex-r5", AK_ARMV7R, FK_VFPV3_D16, false, @@ -249,7 +249,7 @@ ARM_CPU_NAME("kryo", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC) ARM_CPU_NAME("iwmmxt", AK_IWMMXT, FK_NONE, true, ARM::AEK_NONE) ARM_CPU_NAME("xscale", AK_XSCALE, FK_NONE, true, ARM::AEK_NONE) ARM_CPU_NAME("swift", AK_ARMV7S, FK_NEON_VFPV4, true, - (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV)) + (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB)) // Invalid CPU ARM_CPU_NAME("invalid", AK_INVALID, FK_INVALID, true, ARM::AEK_INVALID) #undef ARM_CPU_NAME diff --git a/include/llvm/Support/ArrayRecycler.h b/include/llvm/Support/ArrayRecycler.h index 4698f12b3bbc8..68696be6bf3d1 100644 --- a/include/llvm/Support/ArrayRecycler.h +++ b/include/llvm/Support/ArrayRecycler.h @@ -47,7 +47,9 @@ template <class T, size_t Align = alignof(T)> class ArrayRecycler { FreeList *Entry = Bucket[Idx]; if (!Entry) return nullptr; + __asan_unpoison_memory_region(Entry, Capacity::get(Idx).getSize()); Bucket[Idx] = Entry->Next; + __msan_allocated_memory(Entry, Capacity::get(Idx).getSize()); return reinterpret_cast<T*>(Entry); } @@ -59,6 +61,7 @@ template <class T, size_t Align = alignof(T)> class ArrayRecycler { Bucket.resize(size_t(Idx) + 1); Entry->Next = Bucket[Idx]; Bucket[Idx] = Entry; + __asan_poison_memory_region(Ptr, Capacity::get(Idx).getSize()); } public: diff --git a/include/llvm/Support/BinaryStreamArray.h b/include/llvm/Support/BinaryStreamArray.h index 3b1301d3cc0bd..21b2474660f2c 100644 --- a/include/llvm/Support/BinaryStreamArray.h +++ b/include/llvm/Support/BinaryStreamArray.h @@ -162,6 +162,11 @@ public: return ThisValue; } + ValueType &operator*() { + assert(Array && !HasError); + return ThisValue; + } + IterType &operator+=(unsigned N) { for (unsigned I = 0; I < N; ++I) { // We are done with the current record, discard it so that we are diff --git a/include/llvm/Support/Dwarf.def b/include/llvm/Support/Dwarf.def index fdbd8ea701166..3df3300de4668 100644 --- a/include/llvm/Support/Dwarf.def +++ b/include/llvm/Support/Dwarf.def @@ -25,27 +25,27 @@ #endif #ifndef HANDLE_DW_TAG -#define HANDLE_DW_TAG(ID, NAME) +#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) #endif #ifndef HANDLE_DW_AT -#define HANDLE_DW_AT(ID, NAME) +#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) #endif #ifndef HANDLE_DW_FORM -#define HANDLE_DW_FORM(ID, NAME) +#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) #endif #ifndef HANDLE_DW_OP -#define HANDLE_DW_OP(ID, NAME) +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) #endif #ifndef HANDLE_DW_LANG -#define HANDLE_DW_LANG(ID, NAME) +#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) #endif #ifndef HANDLE_DW_ATE -#define HANDLE_DW_ATE(ID, NAME) +#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) #endif #ifndef HANDLE_DW_VIRTUALITY @@ -92,591 +92,591 @@ #define HANDLE_DW_UT(ID, NAME) #endif -HANDLE_DW_TAG(0x0000, null) -HANDLE_DW_TAG(0x0001, array_type) -HANDLE_DW_TAG(0x0002, class_type) -HANDLE_DW_TAG(0x0003, entry_point) -HANDLE_DW_TAG(0x0004, enumeration_type) -HANDLE_DW_TAG(0x0005, formal_parameter) -HANDLE_DW_TAG(0x0008, imported_declaration) -HANDLE_DW_TAG(0x000a, label) -HANDLE_DW_TAG(0x000b, lexical_block) -HANDLE_DW_TAG(0x000d, member) -HANDLE_DW_TAG(0x000f, pointer_type) -HANDLE_DW_TAG(0x0010, reference_type) -HANDLE_DW_TAG(0x0011, compile_unit) -HANDLE_DW_TAG(0x0012, string_type) -HANDLE_DW_TAG(0x0013, structure_type) -HANDLE_DW_TAG(0x0015, subroutine_type) -HANDLE_DW_TAG(0x0016, typedef) -HANDLE_DW_TAG(0x0017, union_type) -HANDLE_DW_TAG(0x0018, unspecified_parameters) -HANDLE_DW_TAG(0x0019, variant) -HANDLE_DW_TAG(0x001a, common_block) -HANDLE_DW_TAG(0x001b, common_inclusion) -HANDLE_DW_TAG(0x001c, inheritance) -HANDLE_DW_TAG(0x001d, inlined_subroutine) -HANDLE_DW_TAG(0x001e, module) -HANDLE_DW_TAG(0x001f, ptr_to_member_type) -HANDLE_DW_TAG(0x0020, set_type) -HANDLE_DW_TAG(0x0021, subrange_type) -HANDLE_DW_TAG(0x0022, with_stmt) -HANDLE_DW_TAG(0x0023, access_declaration) -HANDLE_DW_TAG(0x0024, base_type) -HANDLE_DW_TAG(0x0025, catch_block) -HANDLE_DW_TAG(0x0026, const_type) -HANDLE_DW_TAG(0x0027, constant) -HANDLE_DW_TAG(0x0028, enumerator) -HANDLE_DW_TAG(0x0029, file_type) -HANDLE_DW_TAG(0x002a, friend) -HANDLE_DW_TAG(0x002b, namelist) -HANDLE_DW_TAG(0x002c, namelist_item) -HANDLE_DW_TAG(0x002d, packed_type) -HANDLE_DW_TAG(0x002e, subprogram) -HANDLE_DW_TAG(0x002f, template_type_parameter) -HANDLE_DW_TAG(0x0030, template_value_parameter) -HANDLE_DW_TAG(0x0031, thrown_type) -HANDLE_DW_TAG(0x0032, try_block) -HANDLE_DW_TAG(0x0033, variant_part) -HANDLE_DW_TAG(0x0034, variable) -HANDLE_DW_TAG(0x0035, volatile_type) +HANDLE_DW_TAG(0x0000, null, 2, DWARF) +HANDLE_DW_TAG(0x0001, array_type, 2, DWARF) +HANDLE_DW_TAG(0x0002, class_type, 2, DWARF) +HANDLE_DW_TAG(0x0003, entry_point, 2, DWARF) +HANDLE_DW_TAG(0x0004, enumeration_type, 2, DWARF) +HANDLE_DW_TAG(0x0005, formal_parameter, 2, DWARF) +HANDLE_DW_TAG(0x0008, imported_declaration, 2, DWARF) +HANDLE_DW_TAG(0x000a, label, 2, DWARF) +HANDLE_DW_TAG(0x000b, lexical_block, 2, DWARF) +HANDLE_DW_TAG(0x000d, member, 2, DWARF) +HANDLE_DW_TAG(0x000f, pointer_type, 2, DWARF) +HANDLE_DW_TAG(0x0010, reference_type, 2, DWARF) +HANDLE_DW_TAG(0x0011, compile_unit, 2, DWARF) +HANDLE_DW_TAG(0x0012, string_type, 2, DWARF) +HANDLE_DW_TAG(0x0013, structure_type, 2, DWARF) +HANDLE_DW_TAG(0x0015, subroutine_type, 2, DWARF) +HANDLE_DW_TAG(0x0016, typedef, 2, DWARF) +HANDLE_DW_TAG(0x0017, union_type, 2, DWARF) +HANDLE_DW_TAG(0x0018, unspecified_parameters, 2, DWARF) +HANDLE_DW_TAG(0x0019, variant, 2, DWARF) +HANDLE_DW_TAG(0x001a, common_block, 2, DWARF) +HANDLE_DW_TAG(0x001b, common_inclusion, 2, DWARF) +HANDLE_DW_TAG(0x001c, inheritance, 2, DWARF) +HANDLE_DW_TAG(0x001d, inlined_subroutine, 2, DWARF) +HANDLE_DW_TAG(0x001e, module, 2, DWARF) +HANDLE_DW_TAG(0x001f, ptr_to_member_type, 2, DWARF) +HANDLE_DW_TAG(0x0020, set_type, 2, DWARF) +HANDLE_DW_TAG(0x0021, subrange_type, 2, DWARF) +HANDLE_DW_TAG(0x0022, with_stmt, 2, DWARF) +HANDLE_DW_TAG(0x0023, access_declaration, 2, DWARF) +HANDLE_DW_TAG(0x0024, base_type, 2, DWARF) +HANDLE_DW_TAG(0x0025, catch_block, 2, DWARF) +HANDLE_DW_TAG(0x0026, const_type, 2, DWARF) +HANDLE_DW_TAG(0x0027, constant, 2, DWARF) +HANDLE_DW_TAG(0x0028, enumerator, 2, DWARF) +HANDLE_DW_TAG(0x0029, file_type, 2, DWARF) +HANDLE_DW_TAG(0x002a, friend, 2, DWARF) +HANDLE_DW_TAG(0x002b, namelist, 2, DWARF) +HANDLE_DW_TAG(0x002c, namelist_item, 2, DWARF) +HANDLE_DW_TAG(0x002d, packed_type, 2, DWARF) +HANDLE_DW_TAG(0x002e, subprogram, 2, DWARF) +HANDLE_DW_TAG(0x002f, template_type_parameter, 2, DWARF) +HANDLE_DW_TAG(0x0030, template_value_parameter, 2, DWARF) +HANDLE_DW_TAG(0x0031, thrown_type, 2, DWARF) +HANDLE_DW_TAG(0x0032, try_block, 2, DWARF) +HANDLE_DW_TAG(0x0033, variant_part, 2, DWARF) +HANDLE_DW_TAG(0x0034, variable, 2, DWARF) +HANDLE_DW_TAG(0x0035, volatile_type, 2, DWARF) // New in DWARF v3: -HANDLE_DW_TAG(0x0036, dwarf_procedure) -HANDLE_DW_TAG(0x0037, restrict_type) -HANDLE_DW_TAG(0x0038, interface_type) -HANDLE_DW_TAG(0x0039, namespace) -HANDLE_DW_TAG(0x003a, imported_module) -HANDLE_DW_TAG(0x003b, unspecified_type) -HANDLE_DW_TAG(0x003c, partial_unit) -HANDLE_DW_TAG(0x003d, imported_unit) -HANDLE_DW_TAG(0x003f, condition) -HANDLE_DW_TAG(0x0040, shared_type) +HANDLE_DW_TAG(0x0036, dwarf_procedure, 3, DWARF) +HANDLE_DW_TAG(0x0037, restrict_type, 3, DWARF) +HANDLE_DW_TAG(0x0038, interface_type, 3, DWARF) +HANDLE_DW_TAG(0x0039, namespace, 3, DWARF) +HANDLE_DW_TAG(0x003a, imported_module, 3, DWARF) +HANDLE_DW_TAG(0x003b, unspecified_type, 3, DWARF) +HANDLE_DW_TAG(0x003c, partial_unit, 3, DWARF) +HANDLE_DW_TAG(0x003d, imported_unit, 3, DWARF) +HANDLE_DW_TAG(0x003f, condition, 3, DWARF) +HANDLE_DW_TAG(0x0040, shared_type, 3, DWARF) // New in DWARF v4: -HANDLE_DW_TAG(0x0041, type_unit) -HANDLE_DW_TAG(0x0042, rvalue_reference_type) -HANDLE_DW_TAG(0x0043, template_alias) +HANDLE_DW_TAG(0x0041, type_unit, 4, DWARF) +HANDLE_DW_TAG(0x0042, rvalue_reference_type, 4, DWARF) +HANDLE_DW_TAG(0x0043, template_alias, 4, DWARF) // New in DWARF v5: -HANDLE_DW_TAG(0x0044, coarray_type) -HANDLE_DW_TAG(0x0045, generic_subrange) -HANDLE_DW_TAG(0x0046, dynamic_type) -HANDLE_DW_TAG(0x0047, atomic_type) -HANDLE_DW_TAG(0x0048, call_site) -HANDLE_DW_TAG(0x0049, call_site_parameter) -HANDLE_DW_TAG(0x004a, skeleton_unit) -HANDLE_DW_TAG(0x004b, immutable_type) +HANDLE_DW_TAG(0x0044, coarray_type, 5, DWARF) +HANDLE_DW_TAG(0x0045, generic_subrange, 5, DWARF) +HANDLE_DW_TAG(0x0046, dynamic_type, 5, DWARF) +HANDLE_DW_TAG(0x0047, atomic_type, 5, DWARF) +HANDLE_DW_TAG(0x0048, call_site, 5, DWARF) +HANDLE_DW_TAG(0x0049, call_site_parameter, 5, DWARF) +HANDLE_DW_TAG(0x004a, skeleton_unit, 5, DWARF) +HANDLE_DW_TAG(0x004b, immutable_type, 5, DWARF) // Vendor extensions: -HANDLE_DW_TAG(0x4081, MIPS_loop) -HANDLE_DW_TAG(0x4101, format_label) -HANDLE_DW_TAG(0x4102, function_template) -HANDLE_DW_TAG(0x4103, class_template) -HANDLE_DW_TAG(0x4106, GNU_template_template_param) -HANDLE_DW_TAG(0x4107, GNU_template_parameter_pack) -HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack) -HANDLE_DW_TAG(0x4200, APPLE_property) -HANDLE_DW_TAG(0xb000, BORLAND_property) -HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string) -HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array) -HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set) -HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant) +HANDLE_DW_TAG(0x4081, MIPS_loop, 0, MIPS) +HANDLE_DW_TAG(0x4101, format_label, 0, GNU) +HANDLE_DW_TAG(0x4102, function_template, 0, GNU) +HANDLE_DW_TAG(0x4103, class_template, 0, GNU) +HANDLE_DW_TAG(0x4106, GNU_template_template_param, 0, GNU) +HANDLE_DW_TAG(0x4107, GNU_template_parameter_pack, 0, GNU) +HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack, 0, GNU) +HANDLE_DW_TAG(0x4200, APPLE_property, 0, APPLE) +HANDLE_DW_TAG(0xb000, BORLAND_property, 0, BORLAND) +HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string, 0, BORLAND) +HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array, 0, BORLAND) +HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set, 0, BORLAND) +HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant, 0, BORLAND) // Attributes. -HANDLE_DW_AT(0x01, sibling) -HANDLE_DW_AT(0x02, location) -HANDLE_DW_AT(0x03, name) -HANDLE_DW_AT(0x09, ordering) -HANDLE_DW_AT(0x0b, byte_size) -HANDLE_DW_AT(0x0c, bit_offset) -HANDLE_DW_AT(0x0d, bit_size) -HANDLE_DW_AT(0x10, stmt_list) -HANDLE_DW_AT(0x11, low_pc) -HANDLE_DW_AT(0x12, high_pc) -HANDLE_DW_AT(0x13, language) -HANDLE_DW_AT(0x15, discr) -HANDLE_DW_AT(0x16, discr_value) -HANDLE_DW_AT(0x17, visibility) -HANDLE_DW_AT(0x18, import) -HANDLE_DW_AT(0x19, string_length) -HANDLE_DW_AT(0x1a, common_reference) -HANDLE_DW_AT(0x1b, comp_dir) -HANDLE_DW_AT(0x1c, const_value) -HANDLE_DW_AT(0x1d, containing_type) -HANDLE_DW_AT(0x1e, default_value) -HANDLE_DW_AT(0x20, inline) -HANDLE_DW_AT(0x21, is_optional) -HANDLE_DW_AT(0x22, lower_bound) -HANDLE_DW_AT(0x25, producer) -HANDLE_DW_AT(0x27, prototyped) -HANDLE_DW_AT(0x2a, return_addr) -HANDLE_DW_AT(0x2c, start_scope) -HANDLE_DW_AT(0x2e, bit_stride) -HANDLE_DW_AT(0x2f, upper_bound) -HANDLE_DW_AT(0x31, abstract_origin) -HANDLE_DW_AT(0x32, accessibility) -HANDLE_DW_AT(0x33, address_class) -HANDLE_DW_AT(0x34, artificial) -HANDLE_DW_AT(0x35, base_types) -HANDLE_DW_AT(0x36, calling_convention) -HANDLE_DW_AT(0x37, count) -HANDLE_DW_AT(0x38, data_member_location) -HANDLE_DW_AT(0x39, decl_column) -HANDLE_DW_AT(0x3a, decl_file) -HANDLE_DW_AT(0x3b, decl_line) -HANDLE_DW_AT(0x3c, declaration) -HANDLE_DW_AT(0x3d, discr_list) -HANDLE_DW_AT(0x3e, encoding) -HANDLE_DW_AT(0x3f, external) -HANDLE_DW_AT(0x40, frame_base) -HANDLE_DW_AT(0x41, friend) -HANDLE_DW_AT(0x42, identifier_case) -HANDLE_DW_AT(0x43, macro_info) -HANDLE_DW_AT(0x44, namelist_item) -HANDLE_DW_AT(0x45, priority) -HANDLE_DW_AT(0x46, segment) -HANDLE_DW_AT(0x47, specification) -HANDLE_DW_AT(0x48, static_link) -HANDLE_DW_AT(0x49, type) -HANDLE_DW_AT(0x4a, use_location) -HANDLE_DW_AT(0x4b, variable_parameter) -HANDLE_DW_AT(0x4c, virtuality) -HANDLE_DW_AT(0x4d, vtable_elem_location) +HANDLE_DW_AT(0x01, sibling, 2, DWARF) +HANDLE_DW_AT(0x02, location, 2, DWARF) +HANDLE_DW_AT(0x03, name, 2, DWARF) +HANDLE_DW_AT(0x09, ordering, 2, DWARF) +HANDLE_DW_AT(0x0b, byte_size, 2, DWARF) +HANDLE_DW_AT(0x0c, bit_offset, 2, DWARF) +HANDLE_DW_AT(0x0d, bit_size, 2, DWARF) +HANDLE_DW_AT(0x10, stmt_list, 2, DWARF) +HANDLE_DW_AT(0x11, low_pc, 2, DWARF) +HANDLE_DW_AT(0x12, high_pc, 2, DWARF) +HANDLE_DW_AT(0x13, language, 2, DWARF) +HANDLE_DW_AT(0x15, discr, 2, DWARF) +HANDLE_DW_AT(0x16, discr_value, 2, DWARF) +HANDLE_DW_AT(0x17, visibility, 2, DWARF) +HANDLE_DW_AT(0x18, import, 2, DWARF) +HANDLE_DW_AT(0x19, string_length, 2, DWARF) +HANDLE_DW_AT(0x1a, common_reference, 2, DWARF) +HANDLE_DW_AT(0x1b, comp_dir, 2, DWARF) +HANDLE_DW_AT(0x1c, const_value, 2, DWARF) +HANDLE_DW_AT(0x1d, containing_type, 2, DWARF) +HANDLE_DW_AT(0x1e, default_value, 2, DWARF) +HANDLE_DW_AT(0x20, inline, 2, DWARF) +HANDLE_DW_AT(0x21, is_optional, 2, DWARF) +HANDLE_DW_AT(0x22, lower_bound, 2, DWARF) +HANDLE_DW_AT(0x25, producer, 2, DWARF) +HANDLE_DW_AT(0x27, prototyped, 2, DWARF) +HANDLE_DW_AT(0x2a, return_addr, 2, DWARF) +HANDLE_DW_AT(0x2c, start_scope, 2, DWARF) +HANDLE_DW_AT(0x2e, bit_stride, 2, DWARF) +HANDLE_DW_AT(0x2f, upper_bound, 2, DWARF) +HANDLE_DW_AT(0x31, abstract_origin, 2, DWARF) +HANDLE_DW_AT(0x32, accessibility, 2, DWARF) +HANDLE_DW_AT(0x33, address_class, 2, DWARF) +HANDLE_DW_AT(0x34, artificial, 2, DWARF) +HANDLE_DW_AT(0x35, base_types, 2, DWARF) +HANDLE_DW_AT(0x36, calling_convention, 2, DWARF) +HANDLE_DW_AT(0x37, count, 2, DWARF) +HANDLE_DW_AT(0x38, data_member_location, 2, DWARF) +HANDLE_DW_AT(0x39, decl_column, 2, DWARF) +HANDLE_DW_AT(0x3a, decl_file, 2, DWARF) +HANDLE_DW_AT(0x3b, decl_line, 2, DWARF) +HANDLE_DW_AT(0x3c, declaration, 2, DWARF) +HANDLE_DW_AT(0x3d, discr_list, 2, DWARF) +HANDLE_DW_AT(0x3e, encoding, 2, DWARF) +HANDLE_DW_AT(0x3f, external, 2, DWARF) +HANDLE_DW_AT(0x40, frame_base, 2, DWARF) +HANDLE_DW_AT(0x41, friend, 2, DWARF) +HANDLE_DW_AT(0x42, identifier_case, 2, DWARF) +HANDLE_DW_AT(0x43, macro_info, 2, DWARF) +HANDLE_DW_AT(0x44, namelist_item, 2, DWARF) +HANDLE_DW_AT(0x45, priority, 2, DWARF) +HANDLE_DW_AT(0x46, segment, 2, DWARF) +HANDLE_DW_AT(0x47, specification, 2, DWARF) +HANDLE_DW_AT(0x48, static_link, 2, DWARF) +HANDLE_DW_AT(0x49, type, 2, DWARF) +HANDLE_DW_AT(0x4a, use_location, 2, DWARF) +HANDLE_DW_AT(0x4b, variable_parameter, 2, DWARF) +HANDLE_DW_AT(0x4c, virtuality, 2, DWARF) +HANDLE_DW_AT(0x4d, vtable_elem_location, 2, DWARF) // New in DWARF v3: -HANDLE_DW_AT(0x4e, allocated) -HANDLE_DW_AT(0x4f, associated) -HANDLE_DW_AT(0x50, data_location) -HANDLE_DW_AT(0x51, byte_stride) -HANDLE_DW_AT(0x52, entry_pc) -HANDLE_DW_AT(0x53, use_UTF8) -HANDLE_DW_AT(0x54, extension) -HANDLE_DW_AT(0x55, ranges) -HANDLE_DW_AT(0x56, trampoline) -HANDLE_DW_AT(0x57, call_column) -HANDLE_DW_AT(0x58, call_file) -HANDLE_DW_AT(0x59, call_line) -HANDLE_DW_AT(0x5a, description) -HANDLE_DW_AT(0x5b, binary_scale) -HANDLE_DW_AT(0x5c, decimal_scale) -HANDLE_DW_AT(0x5d, small) -HANDLE_DW_AT(0x5e, decimal_sign) -HANDLE_DW_AT(0x5f, digit_count) -HANDLE_DW_AT(0x60, picture_string) -HANDLE_DW_AT(0x61, mutable) -HANDLE_DW_AT(0x62, threads_scaled) -HANDLE_DW_AT(0x63, explicit) -HANDLE_DW_AT(0x64, object_pointer) -HANDLE_DW_AT(0x65, endianity) -HANDLE_DW_AT(0x66, elemental) -HANDLE_DW_AT(0x67, pure) -HANDLE_DW_AT(0x68, recursive) +HANDLE_DW_AT(0x4e, allocated, 3, DWARF) +HANDLE_DW_AT(0x4f, associated, 3, DWARF) +HANDLE_DW_AT(0x50, data_location, 3, DWARF) +HANDLE_DW_AT(0x51, byte_stride, 3, DWARF) +HANDLE_DW_AT(0x52, entry_pc, 3, DWARF) +HANDLE_DW_AT(0x53, use_UTF8, 3, DWARF) +HANDLE_DW_AT(0x54, extension, 3, DWARF) +HANDLE_DW_AT(0x55, ranges, 3, DWARF) +HANDLE_DW_AT(0x56, trampoline, 3, DWARF) +HANDLE_DW_AT(0x57, call_column, 3, DWARF) +HANDLE_DW_AT(0x58, call_file, 3, DWARF) +HANDLE_DW_AT(0x59, call_line, 3, DWARF) +HANDLE_DW_AT(0x5a, description, 3, DWARF) +HANDLE_DW_AT(0x5b, binary_scale, 3, DWARF) +HANDLE_DW_AT(0x5c, decimal_scale, 3, DWARF) +HANDLE_DW_AT(0x5d, small, 3, DWARF) +HANDLE_DW_AT(0x5e, decimal_sign, 3, DWARF) +HANDLE_DW_AT(0x5f, digit_count, 3, DWARF) +HANDLE_DW_AT(0x60, picture_string, 3, DWARF) +HANDLE_DW_AT(0x61, mutable, 3, DWARF) +HANDLE_DW_AT(0x62, threads_scaled, 3, DWARF) +HANDLE_DW_AT(0x63, explicit, 3, DWARF) +HANDLE_DW_AT(0x64, object_pointer, 3, DWARF) +HANDLE_DW_AT(0x65, endianity, 3, DWARF) +HANDLE_DW_AT(0x66, elemental, 3, DWARF) +HANDLE_DW_AT(0x67, pure, 3, DWARF) +HANDLE_DW_AT(0x68, recursive, 3, DWARF) // New in DWARF v4: -HANDLE_DW_AT(0x69, signature) -HANDLE_DW_AT(0x6a, main_subprogram) -HANDLE_DW_AT(0x6b, data_bit_offset) -HANDLE_DW_AT(0x6c, const_expr) -HANDLE_DW_AT(0x6d, enum_class) -HANDLE_DW_AT(0x6e, linkage_name) +HANDLE_DW_AT(0x69, signature, 4, DWARF) +HANDLE_DW_AT(0x6a, main_subprogram, 4, DWARF) +HANDLE_DW_AT(0x6b, data_bit_offset, 4, DWARF) +HANDLE_DW_AT(0x6c, const_expr, 4, DWARF) +HANDLE_DW_AT(0x6d, enum_class, 4, DWARF) +HANDLE_DW_AT(0x6e, linkage_name, 4, DWARF) // New in DWARF v5: -HANDLE_DW_AT(0x6f, string_length_bit_size) -HANDLE_DW_AT(0x70, string_length_byte_size) -HANDLE_DW_AT(0x71, rank) -HANDLE_DW_AT(0x72, str_offsets_base) -HANDLE_DW_AT(0x73, addr_base) -HANDLE_DW_AT(0x74, rnglists_base) -HANDLE_DW_AT(0x75, dwo_id) ///< Retracted from DWARF 5. -HANDLE_DW_AT(0x76, dwo_name) -HANDLE_DW_AT(0x77, reference) -HANDLE_DW_AT(0x78, rvalue_reference) -HANDLE_DW_AT(0x79, macros) -HANDLE_DW_AT(0x7a, call_all_calls) -HANDLE_DW_AT(0x7b, call_all_source_calls) -HANDLE_DW_AT(0x7c, call_all_tail_calls) -HANDLE_DW_AT(0x7d, call_return_pc) -HANDLE_DW_AT(0x7e, call_value) -HANDLE_DW_AT(0x7f, call_origin) -HANDLE_DW_AT(0x80, call_parameter) -HANDLE_DW_AT(0x81, call_pc) -HANDLE_DW_AT(0x82, call_tail_call) -HANDLE_DW_AT(0x83, call_target) -HANDLE_DW_AT(0x84, call_target_clobbered) -HANDLE_DW_AT(0x85, call_data_location) -HANDLE_DW_AT(0x86, call_data_value) -HANDLE_DW_AT(0x87, noreturn) -HANDLE_DW_AT(0x88, alignment) -HANDLE_DW_AT(0x89, export_symbols) -HANDLE_DW_AT(0x8a, deleted) -HANDLE_DW_AT(0x8b, defaulted) -HANDLE_DW_AT(0x8c, loclists_base) +HANDLE_DW_AT(0x6f, string_length_bit_size, 5, DWARF) +HANDLE_DW_AT(0x70, string_length_byte_size, 5, DWARF) +HANDLE_DW_AT(0x71, rank, 5, DWARF) +HANDLE_DW_AT(0x72, str_offsets_base, 5, DWARF) +HANDLE_DW_AT(0x73, addr_base, 5, DWARF) +HANDLE_DW_AT(0x74, rnglists_base, 5, DWARF) +HANDLE_DW_AT(0x75, dwo_id, 0, DWARF) ///< Retracted from DWARF v5. +HANDLE_DW_AT(0x76, dwo_name, 5, DWARF) +HANDLE_DW_AT(0x77, reference, 5, DWARF) +HANDLE_DW_AT(0x78, rvalue_reference, 5, DWARF) +HANDLE_DW_AT(0x79, macros, 5, DWARF) +HANDLE_DW_AT(0x7a, call_all_calls, 5, DWARF) +HANDLE_DW_AT(0x7b, call_all_source_calls, 5, DWARF) +HANDLE_DW_AT(0x7c, call_all_tail_calls, 5, DWARF) +HANDLE_DW_AT(0x7d, call_return_pc, 5, DWARF) +HANDLE_DW_AT(0x7e, call_value, 5, DWARF) +HANDLE_DW_AT(0x7f, call_origin, 5, DWARF) +HANDLE_DW_AT(0x80, call_parameter, 5, DWARF) +HANDLE_DW_AT(0x81, call_pc, 5, DWARF) +HANDLE_DW_AT(0x82, call_tail_call, 5, DWARF) +HANDLE_DW_AT(0x83, call_target, 5, DWARF) +HANDLE_DW_AT(0x84, call_target_clobbered, 5, DWARF) +HANDLE_DW_AT(0x85, call_data_location, 5, DWARF) +HANDLE_DW_AT(0x86, call_data_value, 5, DWARF) +HANDLE_DW_AT(0x87, noreturn, 5, DWARF) +HANDLE_DW_AT(0x88, alignment, 5, DWARF) +HANDLE_DW_AT(0x89, export_symbols, 5, DWARF) +HANDLE_DW_AT(0x8a, deleted, 5, DWARF) +HANDLE_DW_AT(0x8b, defaulted, 5, DWARF) +HANDLE_DW_AT(0x8c, loclists_base, 5, DWARF) // Vendor extensions: -HANDLE_DW_AT(0x2002, MIPS_loop_begin) -HANDLE_DW_AT(0x2003, MIPS_tail_loop_begin) -HANDLE_DW_AT(0x2004, MIPS_epilog_begin) -HANDLE_DW_AT(0x2005, MIPS_loop_unroll_factor) -HANDLE_DW_AT(0x2006, MIPS_software_pipeline_depth) -HANDLE_DW_AT(0x2007, MIPS_linkage_name) -HANDLE_DW_AT(0x2008, MIPS_stride) -HANDLE_DW_AT(0x2009, MIPS_abstract_name) -HANDLE_DW_AT(0x200a, MIPS_clone_origin) -HANDLE_DW_AT(0x200b, MIPS_has_inlines) -HANDLE_DW_AT(0x200c, MIPS_stride_byte) -HANDLE_DW_AT(0x200d, MIPS_stride_elem) -HANDLE_DW_AT(0x200e, MIPS_ptr_dopetype) -HANDLE_DW_AT(0x200f, MIPS_allocatable_dopetype) -HANDLE_DW_AT(0x2010, MIPS_assumed_shape_dopetype) +HANDLE_DW_AT(0x2002, MIPS_loop_begin, 0, MIPS) +HANDLE_DW_AT(0x2003, MIPS_tail_loop_begin, 0, MIPS) +HANDLE_DW_AT(0x2004, MIPS_epilog_begin, 0, MIPS) +HANDLE_DW_AT(0x2005, MIPS_loop_unroll_factor, 0, MIPS) +HANDLE_DW_AT(0x2006, MIPS_software_pipeline_depth, 0, MIPS) +HANDLE_DW_AT(0x2007, MIPS_linkage_name, 0, MIPS) +HANDLE_DW_AT(0x2008, MIPS_stride, 0, MIPS) +HANDLE_DW_AT(0x2009, MIPS_abstract_name, 0, MIPS) +HANDLE_DW_AT(0x200a, MIPS_clone_origin, 0, MIPS) +HANDLE_DW_AT(0x200b, MIPS_has_inlines, 0, MIPS) +HANDLE_DW_AT(0x200c, MIPS_stride_byte, 0, MIPS) +HANDLE_DW_AT(0x200d, MIPS_stride_elem, 0, MIPS) +HANDLE_DW_AT(0x200e, MIPS_ptr_dopetype, 0, MIPS) +HANDLE_DW_AT(0x200f, MIPS_allocatable_dopetype, 0, MIPS) +HANDLE_DW_AT(0x2010, MIPS_assumed_shape_dopetype, 0, MIPS) // This one appears to have only been implemented by Open64 for // fortran and may conflict with other extensions. -HANDLE_DW_AT(0x2011, MIPS_assumed_size) +HANDLE_DW_AT(0x2011, MIPS_assumed_size, 0, MIPS) // GNU extensions -HANDLE_DW_AT(0x2101, sf_names) -HANDLE_DW_AT(0x2102, src_info) -HANDLE_DW_AT(0x2103, mac_info) -HANDLE_DW_AT(0x2104, src_coords) -HANDLE_DW_AT(0x2105, body_begin) -HANDLE_DW_AT(0x2106, body_end) -HANDLE_DW_AT(0x2107, GNU_vector) -HANDLE_DW_AT(0x2110, GNU_template_name) -HANDLE_DW_AT(0x210f, GNU_odr_signature) -HANDLE_DW_AT(0x2119, GNU_macros) +HANDLE_DW_AT(0x2101, sf_names, 0, GNU) +HANDLE_DW_AT(0x2102, src_info, 0, GNU) +HANDLE_DW_AT(0x2103, mac_info, 0, GNU) +HANDLE_DW_AT(0x2104, src_coords, 0, GNU) +HANDLE_DW_AT(0x2105, body_begin, 0, GNU) +HANDLE_DW_AT(0x2106, body_end, 0, GNU) +HANDLE_DW_AT(0x2107, GNU_vector, 0, GNU) +HANDLE_DW_AT(0x2110, GNU_template_name, 0, GNU) +HANDLE_DW_AT(0x210f, GNU_odr_signature, 0, GNU) +HANDLE_DW_AT(0x2119, GNU_macros, 0, GNU) // Extensions for Fission proposal. -HANDLE_DW_AT(0x2130, GNU_dwo_name) -HANDLE_DW_AT(0x2131, GNU_dwo_id) -HANDLE_DW_AT(0x2132, GNU_ranges_base) -HANDLE_DW_AT(0x2133, GNU_addr_base) -HANDLE_DW_AT(0x2134, GNU_pubnames) -HANDLE_DW_AT(0x2135, GNU_pubtypes) -HANDLE_DW_AT(0x2136, GNU_discriminator) +HANDLE_DW_AT(0x2130, GNU_dwo_name, 0, GNU) +HANDLE_DW_AT(0x2131, GNU_dwo_id, 0, GNU) +HANDLE_DW_AT(0x2132, GNU_ranges_base, 0, GNU) +HANDLE_DW_AT(0x2133, GNU_addr_base, 0, GNU) +HANDLE_DW_AT(0x2134, GNU_pubnames, 0, GNU) +HANDLE_DW_AT(0x2135, GNU_pubtypes, 0, GNU) +HANDLE_DW_AT(0x2136, GNU_discriminator, 0, GNU) // Borland extensions. -HANDLE_DW_AT(0x3b11, BORLAND_property_read) -HANDLE_DW_AT(0x3b12, BORLAND_property_write) -HANDLE_DW_AT(0x3b13, BORLAND_property_implements) -HANDLE_DW_AT(0x3b14, BORLAND_property_index) -HANDLE_DW_AT(0x3b15, BORLAND_property_default) -HANDLE_DW_AT(0x3b20, BORLAND_Delphi_unit) -HANDLE_DW_AT(0x3b21, BORLAND_Delphi_class) -HANDLE_DW_AT(0x3b22, BORLAND_Delphi_record) -HANDLE_DW_AT(0x3b23, BORLAND_Delphi_metaclass) -HANDLE_DW_AT(0x3b24, BORLAND_Delphi_constructor) -HANDLE_DW_AT(0x3b25, BORLAND_Delphi_destructor) -HANDLE_DW_AT(0x3b26, BORLAND_Delphi_anonymous_method) -HANDLE_DW_AT(0x3b27, BORLAND_Delphi_interface) -HANDLE_DW_AT(0x3b28, BORLAND_Delphi_ABI) -HANDLE_DW_AT(0x3b29, BORLAND_Delphi_return) -HANDLE_DW_AT(0x3b30, BORLAND_Delphi_frameptr) -HANDLE_DW_AT(0x3b31, BORLAND_closure) +HANDLE_DW_AT(0x3b11, BORLAND_property_read, 0, BORLAND) +HANDLE_DW_AT(0x3b12, BORLAND_property_write, 0, BORLAND) +HANDLE_DW_AT(0x3b13, BORLAND_property_implements, 0, BORLAND) +HANDLE_DW_AT(0x3b14, BORLAND_property_index, 0, BORLAND) +HANDLE_DW_AT(0x3b15, BORLAND_property_default, 0, BORLAND) +HANDLE_DW_AT(0x3b20, BORLAND_Delphi_unit, 0, BORLAND) +HANDLE_DW_AT(0x3b21, BORLAND_Delphi_class, 0, BORLAND) +HANDLE_DW_AT(0x3b22, BORLAND_Delphi_record, 0, BORLAND) +HANDLE_DW_AT(0x3b23, BORLAND_Delphi_metaclass, 0, BORLAND) +HANDLE_DW_AT(0x3b24, BORLAND_Delphi_constructor, 0, BORLAND) +HANDLE_DW_AT(0x3b25, BORLAND_Delphi_destructor, 0, BORLAND) +HANDLE_DW_AT(0x3b26, BORLAND_Delphi_anonymous_method, 0, BORLAND) +HANDLE_DW_AT(0x3b27, BORLAND_Delphi_interface, 0, BORLAND) +HANDLE_DW_AT(0x3b28, BORLAND_Delphi_ABI, 0, BORLAND) +HANDLE_DW_AT(0x3b29, BORLAND_Delphi_return, 0, BORLAND) +HANDLE_DW_AT(0x3b30, BORLAND_Delphi_frameptr, 0, BORLAND) +HANDLE_DW_AT(0x3b31, BORLAND_closure, 0, BORLAND) // LLVM project extensions. -HANDLE_DW_AT(0x3e00, LLVM_include_path) -HANDLE_DW_AT(0x3e01, LLVM_config_macros) -HANDLE_DW_AT(0x3e02, LLVM_isysroot) +HANDLE_DW_AT(0x3e00, LLVM_include_path, 0, LLVM) +HANDLE_DW_AT(0x3e01, LLVM_config_macros, 0, LLVM) +HANDLE_DW_AT(0x3e02, LLVM_isysroot, 0, LLVM) // Apple extensions. -HANDLE_DW_AT(0x3fe1, APPLE_optimized) -HANDLE_DW_AT(0x3fe2, APPLE_flags) -HANDLE_DW_AT(0x3fe3, APPLE_isa) -HANDLE_DW_AT(0x3fe4, APPLE_block) -HANDLE_DW_AT(0x3fe5, APPLE_major_runtime_vers) -HANDLE_DW_AT(0x3fe6, APPLE_runtime_class) -HANDLE_DW_AT(0x3fe7, APPLE_omit_frame_ptr) -HANDLE_DW_AT(0x3fe8, APPLE_property_name) -HANDLE_DW_AT(0x3fe9, APPLE_property_getter) -HANDLE_DW_AT(0x3fea, APPLE_property_setter) -HANDLE_DW_AT(0x3feb, APPLE_property_attribute) -HANDLE_DW_AT(0x3fec, APPLE_objc_complete_type) -HANDLE_DW_AT(0x3fed, APPLE_property) +HANDLE_DW_AT(0x3fe1, APPLE_optimized, 0, APPLE) +HANDLE_DW_AT(0x3fe2, APPLE_flags, 0, APPLE) +HANDLE_DW_AT(0x3fe3, APPLE_isa, 0, APPLE) +HANDLE_DW_AT(0x3fe4, APPLE_block, 0, APPLE) +HANDLE_DW_AT(0x3fe5, APPLE_major_runtime_vers, 0, APPLE) +HANDLE_DW_AT(0x3fe6, APPLE_runtime_class, 0, APPLE) +HANDLE_DW_AT(0x3fe7, APPLE_omit_frame_ptr, 0, APPLE) +HANDLE_DW_AT(0x3fe8, APPLE_property_name, 0, APPLE) +HANDLE_DW_AT(0x3fe9, APPLE_property_getter, 0, APPLE) +HANDLE_DW_AT(0x3fea, APPLE_property_setter, 0, APPLE) +HANDLE_DW_AT(0x3feb, APPLE_property_attribute, 0, APPLE) +HANDLE_DW_AT(0x3fec, APPLE_objc_complete_type, 0, APPLE) +HANDLE_DW_AT(0x3fed, APPLE_property, 0, APPLE) // Attribute form encodings. -HANDLE_DW_FORM(0x01, addr) -HANDLE_DW_FORM(0x03, block2) -HANDLE_DW_FORM(0x04, block4) -HANDLE_DW_FORM(0x05, data2) -HANDLE_DW_FORM(0x06, data4) -HANDLE_DW_FORM(0x07, data8) -HANDLE_DW_FORM(0x08, string) -HANDLE_DW_FORM(0x09, block) -HANDLE_DW_FORM(0x0a, block1) -HANDLE_DW_FORM(0x0b, data1) -HANDLE_DW_FORM(0x0c, flag) -HANDLE_DW_FORM(0x0d, sdata) -HANDLE_DW_FORM(0x0e, strp) -HANDLE_DW_FORM(0x0f, udata) -HANDLE_DW_FORM(0x10, ref_addr) -HANDLE_DW_FORM(0x11, ref1) -HANDLE_DW_FORM(0x12, ref2) -HANDLE_DW_FORM(0x13, ref4) -HANDLE_DW_FORM(0x14, ref8) -HANDLE_DW_FORM(0x15, ref_udata) -HANDLE_DW_FORM(0x16, indirect) +HANDLE_DW_FORM(0x01, addr, 2, DWARF) +HANDLE_DW_FORM(0x03, block2, 2, DWARF) +HANDLE_DW_FORM(0x04, block4, 2, DWARF) +HANDLE_DW_FORM(0x05, data2, 2, DWARF) +HANDLE_DW_FORM(0x06, data4, 2, DWARF) +HANDLE_DW_FORM(0x07, data8, 2, DWARF) +HANDLE_DW_FORM(0x08, string, 2, DWARF) +HANDLE_DW_FORM(0x09, block, 2, DWARF) +HANDLE_DW_FORM(0x0a, block1, 2, DWARF) +HANDLE_DW_FORM(0x0b, data1, 2, DWARF) +HANDLE_DW_FORM(0x0c, flag, 2, DWARF) +HANDLE_DW_FORM(0x0d, sdata, 2, DWARF) +HANDLE_DW_FORM(0x0e, strp, 2, DWARF) +HANDLE_DW_FORM(0x0f, udata, 2, DWARF) +HANDLE_DW_FORM(0x10, ref_addr, 2, DWARF) +HANDLE_DW_FORM(0x11, ref1, 2, DWARF) +HANDLE_DW_FORM(0x12, ref2, 2, DWARF) +HANDLE_DW_FORM(0x13, ref4, 2, DWARF) +HANDLE_DW_FORM(0x14, ref8, 2, DWARF) +HANDLE_DW_FORM(0x15, ref_udata, 2, DWARF) +HANDLE_DW_FORM(0x16, indirect, 2, DWARF) // New in DWARF v4: -HANDLE_DW_FORM(0x17, sec_offset) -HANDLE_DW_FORM(0x18, exprloc) -HANDLE_DW_FORM(0x19, flag_present) +HANDLE_DW_FORM(0x17, sec_offset, 4, DWARF) +HANDLE_DW_FORM(0x18, exprloc, 4, DWARF) +HANDLE_DW_FORM(0x19, flag_present, 4, DWARF) // This was defined out of sequence. -HANDLE_DW_FORM(0x20, ref_sig8) +HANDLE_DW_FORM(0x20, ref_sig8, 4, DWARF) // New in DWARF v5: -HANDLE_DW_FORM(0x1a, strx) -HANDLE_DW_FORM(0x1b, addrx) -HANDLE_DW_FORM(0x1c, ref_sup4) -HANDLE_DW_FORM(0x1d, strp_sup) -HANDLE_DW_FORM(0x1e, data16) -HANDLE_DW_FORM(0x1f, line_strp) -HANDLE_DW_FORM(0x21, implicit_const) -HANDLE_DW_FORM(0x22, loclistx) -HANDLE_DW_FORM(0x23, rnglistx) -HANDLE_DW_FORM(0x24, ref_sup8) -HANDLE_DW_FORM(0x25, strx1) -HANDLE_DW_FORM(0x26, strx2) -HANDLE_DW_FORM(0x27, strx3) -HANDLE_DW_FORM(0x28, strx4) -HANDLE_DW_FORM(0x29, addrx1) -HANDLE_DW_FORM(0x2a, addrx2) -HANDLE_DW_FORM(0x2b, addrx3) -HANDLE_DW_FORM(0x2c, addrx4) +HANDLE_DW_FORM(0x1a, strx, 5, DWARF) +HANDLE_DW_FORM(0x1b, addrx, 5, DWARF) +HANDLE_DW_FORM(0x1c, ref_sup4, 5, DWARF) +HANDLE_DW_FORM(0x1d, strp_sup, 5, DWARF) +HANDLE_DW_FORM(0x1e, data16, 5, DWARF) +HANDLE_DW_FORM(0x1f, line_strp, 5, DWARF) +HANDLE_DW_FORM(0x21, implicit_const, 5, DWARF) +HANDLE_DW_FORM(0x22, loclistx, 5, DWARF) +HANDLE_DW_FORM(0x23, rnglistx, 5, DWARF) +HANDLE_DW_FORM(0x24, ref_sup8, 5, DWARF) +HANDLE_DW_FORM(0x25, strx1, 5, DWARF) +HANDLE_DW_FORM(0x26, strx2, 5, DWARF) +HANDLE_DW_FORM(0x27, strx3, 5, DWARF) +HANDLE_DW_FORM(0x28, strx4, 5, DWARF) +HANDLE_DW_FORM(0x29, addrx1, 5, DWARF) +HANDLE_DW_FORM(0x2a, addrx2, 5, DWARF) +HANDLE_DW_FORM(0x2b, addrx3, 5, DWARF) +HANDLE_DW_FORM(0x2c, addrx4, 5, DWARF) // Extensions for Fission proposal -HANDLE_DW_FORM(0x1f01, GNU_addr_index) -HANDLE_DW_FORM(0x1f02, GNU_str_index) +HANDLE_DW_FORM(0x1f01, GNU_addr_index, 0, GNU) +HANDLE_DW_FORM(0x1f02, GNU_str_index, 0, GNU) // Alternate debug sections proposal (output of "dwz" tool). -HANDLE_DW_FORM(0x1f20, GNU_ref_alt) -HANDLE_DW_FORM(0x1f21, GNU_strp_alt) +HANDLE_DW_FORM(0x1f20, GNU_ref_alt, 0, GNU) +HANDLE_DW_FORM(0x1f21, GNU_strp_alt, 0, GNU) // DWARF Expression operators. -HANDLE_DW_OP(0x03, addr) -HANDLE_DW_OP(0x06, deref) -HANDLE_DW_OP(0x08, const1u) -HANDLE_DW_OP(0x09, const1s) -HANDLE_DW_OP(0x0a, const2u) -HANDLE_DW_OP(0x0b, const2s) -HANDLE_DW_OP(0x0c, const4u) -HANDLE_DW_OP(0x0d, const4s) -HANDLE_DW_OP(0x0e, const8u) -HANDLE_DW_OP(0x0f, const8s) -HANDLE_DW_OP(0x10, constu) -HANDLE_DW_OP(0x11, consts) -HANDLE_DW_OP(0x12, dup) -HANDLE_DW_OP(0x13, drop) -HANDLE_DW_OP(0x14, over) -HANDLE_DW_OP(0x15, pick) -HANDLE_DW_OP(0x16, swap) -HANDLE_DW_OP(0x17, rot) -HANDLE_DW_OP(0x18, xderef) -HANDLE_DW_OP(0x19, abs) -HANDLE_DW_OP(0x1a, and) -HANDLE_DW_OP(0x1b, div) -HANDLE_DW_OP(0x1c, minus) -HANDLE_DW_OP(0x1d, mod) -HANDLE_DW_OP(0x1e, mul) -HANDLE_DW_OP(0x1f, neg) -HANDLE_DW_OP(0x20, not) -HANDLE_DW_OP(0x21, or) -HANDLE_DW_OP(0x22, plus) -HANDLE_DW_OP(0x23, plus_uconst) -HANDLE_DW_OP(0x24, shl) -HANDLE_DW_OP(0x25, shr) -HANDLE_DW_OP(0x26, shra) -HANDLE_DW_OP(0x27, xor) -HANDLE_DW_OP(0x28, bra) -HANDLE_DW_OP(0x29, eq) -HANDLE_DW_OP(0x2a, ge) -HANDLE_DW_OP(0x2b, gt) -HANDLE_DW_OP(0x2c, le) -HANDLE_DW_OP(0x2d, lt) -HANDLE_DW_OP(0x2e, ne) -HANDLE_DW_OP(0x2f, skip) -HANDLE_DW_OP(0x30, lit0) -HANDLE_DW_OP(0x31, lit1) -HANDLE_DW_OP(0x32, lit2) -HANDLE_DW_OP(0x33, lit3) -HANDLE_DW_OP(0x34, lit4) -HANDLE_DW_OP(0x35, lit5) -HANDLE_DW_OP(0x36, lit6) -HANDLE_DW_OP(0x37, lit7) -HANDLE_DW_OP(0x38, lit8) -HANDLE_DW_OP(0x39, lit9) -HANDLE_DW_OP(0x3a, lit10) -HANDLE_DW_OP(0x3b, lit11) -HANDLE_DW_OP(0x3c, lit12) -HANDLE_DW_OP(0x3d, lit13) -HANDLE_DW_OP(0x3e, lit14) -HANDLE_DW_OP(0x3f, lit15) -HANDLE_DW_OP(0x40, lit16) -HANDLE_DW_OP(0x41, lit17) -HANDLE_DW_OP(0x42, lit18) -HANDLE_DW_OP(0x43, lit19) -HANDLE_DW_OP(0x44, lit20) -HANDLE_DW_OP(0x45, lit21) -HANDLE_DW_OP(0x46, lit22) -HANDLE_DW_OP(0x47, lit23) -HANDLE_DW_OP(0x48, lit24) -HANDLE_DW_OP(0x49, lit25) -HANDLE_DW_OP(0x4a, lit26) -HANDLE_DW_OP(0x4b, lit27) -HANDLE_DW_OP(0x4c, lit28) -HANDLE_DW_OP(0x4d, lit29) -HANDLE_DW_OP(0x4e, lit30) -HANDLE_DW_OP(0x4f, lit31) -HANDLE_DW_OP(0x50, reg0) -HANDLE_DW_OP(0x51, reg1) -HANDLE_DW_OP(0x52, reg2) -HANDLE_DW_OP(0x53, reg3) -HANDLE_DW_OP(0x54, reg4) -HANDLE_DW_OP(0x55, reg5) -HANDLE_DW_OP(0x56, reg6) -HANDLE_DW_OP(0x57, reg7) -HANDLE_DW_OP(0x58, reg8) -HANDLE_DW_OP(0x59, reg9) -HANDLE_DW_OP(0x5a, reg10) -HANDLE_DW_OP(0x5b, reg11) -HANDLE_DW_OP(0x5c, reg12) -HANDLE_DW_OP(0x5d, reg13) -HANDLE_DW_OP(0x5e, reg14) -HANDLE_DW_OP(0x5f, reg15) -HANDLE_DW_OP(0x60, reg16) -HANDLE_DW_OP(0x61, reg17) -HANDLE_DW_OP(0x62, reg18) -HANDLE_DW_OP(0x63, reg19) -HANDLE_DW_OP(0x64, reg20) -HANDLE_DW_OP(0x65, reg21) -HANDLE_DW_OP(0x66, reg22) -HANDLE_DW_OP(0x67, reg23) -HANDLE_DW_OP(0x68, reg24) -HANDLE_DW_OP(0x69, reg25) -HANDLE_DW_OP(0x6a, reg26) -HANDLE_DW_OP(0x6b, reg27) -HANDLE_DW_OP(0x6c, reg28) -HANDLE_DW_OP(0x6d, reg29) -HANDLE_DW_OP(0x6e, reg30) -HANDLE_DW_OP(0x6f, reg31) -HANDLE_DW_OP(0x70, breg0) -HANDLE_DW_OP(0x71, breg1) -HANDLE_DW_OP(0x72, breg2) -HANDLE_DW_OP(0x73, breg3) -HANDLE_DW_OP(0x74, breg4) -HANDLE_DW_OP(0x75, breg5) -HANDLE_DW_OP(0x76, breg6) -HANDLE_DW_OP(0x77, breg7) -HANDLE_DW_OP(0x78, breg8) -HANDLE_DW_OP(0x79, breg9) -HANDLE_DW_OP(0x7a, breg10) -HANDLE_DW_OP(0x7b, breg11) -HANDLE_DW_OP(0x7c, breg12) -HANDLE_DW_OP(0x7d, breg13) -HANDLE_DW_OP(0x7e, breg14) -HANDLE_DW_OP(0x7f, breg15) -HANDLE_DW_OP(0x80, breg16) -HANDLE_DW_OP(0x81, breg17) -HANDLE_DW_OP(0x82, breg18) -HANDLE_DW_OP(0x83, breg19) -HANDLE_DW_OP(0x84, breg20) -HANDLE_DW_OP(0x85, breg21) -HANDLE_DW_OP(0x86, breg22) -HANDLE_DW_OP(0x87, breg23) -HANDLE_DW_OP(0x88, breg24) -HANDLE_DW_OP(0x89, breg25) -HANDLE_DW_OP(0x8a, breg26) -HANDLE_DW_OP(0x8b, breg27) -HANDLE_DW_OP(0x8c, breg28) -HANDLE_DW_OP(0x8d, breg29) -HANDLE_DW_OP(0x8e, breg30) -HANDLE_DW_OP(0x8f, breg31) -HANDLE_DW_OP(0x90, regx) -HANDLE_DW_OP(0x91, fbreg) -HANDLE_DW_OP(0x92, bregx) -HANDLE_DW_OP(0x93, piece) -HANDLE_DW_OP(0x94, deref_size) -HANDLE_DW_OP(0x95, xderef_size) -HANDLE_DW_OP(0x96, nop) +HANDLE_DW_OP(0x03, addr, 2, DWARF) +HANDLE_DW_OP(0x06, deref, 2, DWARF) +HANDLE_DW_OP(0x08, const1u, 2, DWARF) +HANDLE_DW_OP(0x09, const1s, 2, DWARF) +HANDLE_DW_OP(0x0a, const2u, 2, DWARF) +HANDLE_DW_OP(0x0b, const2s, 2, DWARF) +HANDLE_DW_OP(0x0c, const4u, 2, DWARF) +HANDLE_DW_OP(0x0d, const4s, 2, DWARF) +HANDLE_DW_OP(0x0e, const8u, 2, DWARF) +HANDLE_DW_OP(0x0f, const8s, 2, DWARF) +HANDLE_DW_OP(0x10, constu, 2, DWARF) +HANDLE_DW_OP(0x11, consts, 2, DWARF) +HANDLE_DW_OP(0x12, dup, 2, DWARF) +HANDLE_DW_OP(0x13, drop, 2, DWARF) +HANDLE_DW_OP(0x14, over, 2, DWARF) +HANDLE_DW_OP(0x15, pick, 2, DWARF) +HANDLE_DW_OP(0x16, swap, 2, DWARF) +HANDLE_DW_OP(0x17, rot, 2, DWARF) +HANDLE_DW_OP(0x18, xderef, 2, DWARF) +HANDLE_DW_OP(0x19, abs, 2, DWARF) +HANDLE_DW_OP(0x1a, and, 2, DWARF) +HANDLE_DW_OP(0x1b, div, 2, DWARF) +HANDLE_DW_OP(0x1c, minus, 2, DWARF) +HANDLE_DW_OP(0x1d, mod, 2, DWARF) +HANDLE_DW_OP(0x1e, mul, 2, DWARF) +HANDLE_DW_OP(0x1f, neg, 2, DWARF) +HANDLE_DW_OP(0x20, not, 2, DWARF) +HANDLE_DW_OP(0x21, or, 2, DWARF) +HANDLE_DW_OP(0x22, plus, 2, DWARF) +HANDLE_DW_OP(0x23, plus_uconst, 2, DWARF) +HANDLE_DW_OP(0x24, shl, 2, DWARF) +HANDLE_DW_OP(0x25, shr, 2, DWARF) +HANDLE_DW_OP(0x26, shra, 2, DWARF) +HANDLE_DW_OP(0x27, xor, 2, DWARF) +HANDLE_DW_OP(0x28, bra, 2, DWARF) +HANDLE_DW_OP(0x29, eq, 2, DWARF) +HANDLE_DW_OP(0x2a, ge, 2, DWARF) +HANDLE_DW_OP(0x2b, gt, 2, DWARF) +HANDLE_DW_OP(0x2c, le, 2, DWARF) +HANDLE_DW_OP(0x2d, lt, 2, DWARF) +HANDLE_DW_OP(0x2e, ne, 2, DWARF) +HANDLE_DW_OP(0x2f, skip, 2, DWARF) +HANDLE_DW_OP(0x30, lit0, 2, DWARF) +HANDLE_DW_OP(0x31, lit1, 2, DWARF) +HANDLE_DW_OP(0x32, lit2, 2, DWARF) +HANDLE_DW_OP(0x33, lit3, 2, DWARF) +HANDLE_DW_OP(0x34, lit4, 2, DWARF) +HANDLE_DW_OP(0x35, lit5, 2, DWARF) +HANDLE_DW_OP(0x36, lit6, 2, DWARF) +HANDLE_DW_OP(0x37, lit7, 2, DWARF) +HANDLE_DW_OP(0x38, lit8, 2, DWARF) +HANDLE_DW_OP(0x39, lit9, 2, DWARF) +HANDLE_DW_OP(0x3a, lit10, 2, DWARF) +HANDLE_DW_OP(0x3b, lit11, 2, DWARF) +HANDLE_DW_OP(0x3c, lit12, 2, DWARF) +HANDLE_DW_OP(0x3d, lit13, 2, DWARF) +HANDLE_DW_OP(0x3e, lit14, 2, DWARF) +HANDLE_DW_OP(0x3f, lit15, 2, DWARF) +HANDLE_DW_OP(0x40, lit16, 2, DWARF) +HANDLE_DW_OP(0x41, lit17, 2, DWARF) +HANDLE_DW_OP(0x42, lit18, 2, DWARF) +HANDLE_DW_OP(0x43, lit19, 2, DWARF) +HANDLE_DW_OP(0x44, lit20, 2, DWARF) +HANDLE_DW_OP(0x45, lit21, 2, DWARF) +HANDLE_DW_OP(0x46, lit22, 2, DWARF) +HANDLE_DW_OP(0x47, lit23, 2, DWARF) +HANDLE_DW_OP(0x48, lit24, 2, DWARF) +HANDLE_DW_OP(0x49, lit25, 2, DWARF) +HANDLE_DW_OP(0x4a, lit26, 2, DWARF) +HANDLE_DW_OP(0x4b, lit27, 2, DWARF) +HANDLE_DW_OP(0x4c, lit28, 2, DWARF) +HANDLE_DW_OP(0x4d, lit29, 2, DWARF) +HANDLE_DW_OP(0x4e, lit30, 2, DWARF) +HANDLE_DW_OP(0x4f, lit31, 2, DWARF) +HANDLE_DW_OP(0x50, reg0, 2, DWARF) +HANDLE_DW_OP(0x51, reg1, 2, DWARF) +HANDLE_DW_OP(0x52, reg2, 2, DWARF) +HANDLE_DW_OP(0x53, reg3, 2, DWARF) +HANDLE_DW_OP(0x54, reg4, 2, DWARF) +HANDLE_DW_OP(0x55, reg5, 2, DWARF) +HANDLE_DW_OP(0x56, reg6, 2, DWARF) +HANDLE_DW_OP(0x57, reg7, 2, DWARF) +HANDLE_DW_OP(0x58, reg8, 2, DWARF) +HANDLE_DW_OP(0x59, reg9, 2, DWARF) +HANDLE_DW_OP(0x5a, reg10, 2, DWARF) +HANDLE_DW_OP(0x5b, reg11, 2, DWARF) +HANDLE_DW_OP(0x5c, reg12, 2, DWARF) +HANDLE_DW_OP(0x5d, reg13, 2, DWARF) +HANDLE_DW_OP(0x5e, reg14, 2, DWARF) +HANDLE_DW_OP(0x5f, reg15, 2, DWARF) +HANDLE_DW_OP(0x60, reg16, 2, DWARF) +HANDLE_DW_OP(0x61, reg17, 2, DWARF) +HANDLE_DW_OP(0x62, reg18, 2, DWARF) +HANDLE_DW_OP(0x63, reg19, 2, DWARF) +HANDLE_DW_OP(0x64, reg20, 2, DWARF) +HANDLE_DW_OP(0x65, reg21, 2, DWARF) +HANDLE_DW_OP(0x66, reg22, 2, DWARF) +HANDLE_DW_OP(0x67, reg23, 2, DWARF) +HANDLE_DW_OP(0x68, reg24, 2, DWARF) +HANDLE_DW_OP(0x69, reg25, 2, DWARF) +HANDLE_DW_OP(0x6a, reg26, 2, DWARF) +HANDLE_DW_OP(0x6b, reg27, 2, DWARF) +HANDLE_DW_OP(0x6c, reg28, 2, DWARF) +HANDLE_DW_OP(0x6d, reg29, 2, DWARF) +HANDLE_DW_OP(0x6e, reg30, 2, DWARF) +HANDLE_DW_OP(0x6f, reg31, 2, DWARF) +HANDLE_DW_OP(0x70, breg0, 2, DWARF) +HANDLE_DW_OP(0x71, breg1, 2, DWARF) +HANDLE_DW_OP(0x72, breg2, 2, DWARF) +HANDLE_DW_OP(0x73, breg3, 2, DWARF) +HANDLE_DW_OP(0x74, breg4, 2, DWARF) +HANDLE_DW_OP(0x75, breg5, 2, DWARF) +HANDLE_DW_OP(0x76, breg6, 2, DWARF) +HANDLE_DW_OP(0x77, breg7, 2, DWARF) +HANDLE_DW_OP(0x78, breg8, 2, DWARF) +HANDLE_DW_OP(0x79, breg9, 2, DWARF) +HANDLE_DW_OP(0x7a, breg10, 2, DWARF) +HANDLE_DW_OP(0x7b, breg11, 2, DWARF) +HANDLE_DW_OP(0x7c, breg12, 2, DWARF) +HANDLE_DW_OP(0x7d, breg13, 2, DWARF) +HANDLE_DW_OP(0x7e, breg14, 2, DWARF) +HANDLE_DW_OP(0x7f, breg15, 2, DWARF) +HANDLE_DW_OP(0x80, breg16, 2, DWARF) +HANDLE_DW_OP(0x81, breg17, 2, DWARF) +HANDLE_DW_OP(0x82, breg18, 2, DWARF) +HANDLE_DW_OP(0x83, breg19, 2, DWARF) +HANDLE_DW_OP(0x84, breg20, 2, DWARF) +HANDLE_DW_OP(0x85, breg21, 2, DWARF) +HANDLE_DW_OP(0x86, breg22, 2, DWARF) +HANDLE_DW_OP(0x87, breg23, 2, DWARF) +HANDLE_DW_OP(0x88, breg24, 2, DWARF) +HANDLE_DW_OP(0x89, breg25, 2, DWARF) +HANDLE_DW_OP(0x8a, breg26, 2, DWARF) +HANDLE_DW_OP(0x8b, breg27, 2, DWARF) +HANDLE_DW_OP(0x8c, breg28, 2, DWARF) +HANDLE_DW_OP(0x8d, breg29, 2, DWARF) +HANDLE_DW_OP(0x8e, breg30, 2, DWARF) +HANDLE_DW_OP(0x8f, breg31, 2, DWARF) +HANDLE_DW_OP(0x90, regx, 2, DWARF) +HANDLE_DW_OP(0x91, fbreg, 2, DWARF) +HANDLE_DW_OP(0x92, bregx, 2, DWARF) +HANDLE_DW_OP(0x93, piece, 2, DWARF) +HANDLE_DW_OP(0x94, deref_size, 2, DWARF) +HANDLE_DW_OP(0x95, xderef_size, 2, DWARF) +HANDLE_DW_OP(0x96, nop, 2, DWARF) // New in DWARF v3: -HANDLE_DW_OP(0x97, push_object_address) -HANDLE_DW_OP(0x98, call2) -HANDLE_DW_OP(0x99, call4) -HANDLE_DW_OP(0x9a, call_ref) -HANDLE_DW_OP(0x9b, form_tls_address) -HANDLE_DW_OP(0x9c, call_frame_cfa) -HANDLE_DW_OP(0x9d, bit_piece) +HANDLE_DW_OP(0x97, push_object_address, 3, DWARF) +HANDLE_DW_OP(0x98, call2, 3, DWARF) +HANDLE_DW_OP(0x99, call4, 3, DWARF) +HANDLE_DW_OP(0x9a, call_ref, 3, DWARF) +HANDLE_DW_OP(0x9b, form_tls_address, 3, DWARF) +HANDLE_DW_OP(0x9c, call_frame_cfa, 3, DWARF) +HANDLE_DW_OP(0x9d, bit_piece, 3, DWARF) // New in DWARF v4: -HANDLE_DW_OP(0x9e, implicit_value) -HANDLE_DW_OP(0x9f, stack_value) +HANDLE_DW_OP(0x9e, implicit_value, 4, DWARF) +HANDLE_DW_OP(0x9f, stack_value, 4, DWARF) // New in DWARF v5: -HANDLE_DW_OP(0xa0, implicit_pointer) -HANDLE_DW_OP(0xa1, addrx) -HANDLE_DW_OP(0xa2, constx) -HANDLE_DW_OP(0xa3, entry_value) -HANDLE_DW_OP(0xa4, const_type) -HANDLE_DW_OP(0xa5, regval_type) -HANDLE_DW_OP(0xa6, deref_type) -HANDLE_DW_OP(0xa7, xderef_type) -HANDLE_DW_OP(0xa8, convert) -HANDLE_DW_OP(0xa9, reinterpret) +HANDLE_DW_OP(0xa0, implicit_pointer, 5, DWARF) +HANDLE_DW_OP(0xa1, addrx, 5, DWARF) +HANDLE_DW_OP(0xa2, constx, 5, DWARF) +HANDLE_DW_OP(0xa3, entry_value, 5, DWARF) +HANDLE_DW_OP(0xa4, const_type, 5, DWARF) +HANDLE_DW_OP(0xa5, regval_type, 5, DWARF) +HANDLE_DW_OP(0xa6, deref_type, 5, DWARF) +HANDLE_DW_OP(0xa7, xderef_type, 5, DWARF) +HANDLE_DW_OP(0xa8, convert, 5, DWARF) +HANDLE_DW_OP(0xa9, reinterpret, 5, DWARF) // Vendor extensions: // Extensions for GNU-style thread-local storage. -HANDLE_DW_OP(0xe0, GNU_push_tls_address) +HANDLE_DW_OP(0xe0, GNU_push_tls_address, 0, GNU) // Extensions for Fission proposal. -HANDLE_DW_OP(0xfb, GNU_addr_index) -HANDLE_DW_OP(0xfc, GNU_const_index) +HANDLE_DW_OP(0xfb, GNU_addr_index, 0, GNU) +HANDLE_DW_OP(0xfc, GNU_const_index, 0, GNU) // DWARF languages. -HANDLE_DW_LANG(0x0001, C89) -HANDLE_DW_LANG(0x0002, C) -HANDLE_DW_LANG(0x0003, Ada83) -HANDLE_DW_LANG(0x0004, C_plus_plus) -HANDLE_DW_LANG(0x0005, Cobol74) -HANDLE_DW_LANG(0x0006, Cobol85) -HANDLE_DW_LANG(0x0007, Fortran77) -HANDLE_DW_LANG(0x0008, Fortran90) -HANDLE_DW_LANG(0x0009, Pascal83) -HANDLE_DW_LANG(0x000a, Modula2) +HANDLE_DW_LANG(0x0001, C89, 2, DWARF) +HANDLE_DW_LANG(0x0002, C, 2, DWARF) +HANDLE_DW_LANG(0x0003, Ada83, 2, DWARF) +HANDLE_DW_LANG(0x0004, C_plus_plus, 2, DWARF) +HANDLE_DW_LANG(0x0005, Cobol74, 2, DWARF) +HANDLE_DW_LANG(0x0006, Cobol85, 2, DWARF) +HANDLE_DW_LANG(0x0007, Fortran77, 2, DWARF) +HANDLE_DW_LANG(0x0008, Fortran90, 2, DWARF) +HANDLE_DW_LANG(0x0009, Pascal83, 2, DWARF) +HANDLE_DW_LANG(0x000a, Modula2, 2, DWARF) // New in DWARF v3: -HANDLE_DW_LANG(0x000b, Java) -HANDLE_DW_LANG(0x000c, C99) -HANDLE_DW_LANG(0x000d, Ada95) -HANDLE_DW_LANG(0x000e, Fortran95) -HANDLE_DW_LANG(0x000f, PLI) -HANDLE_DW_LANG(0x0010, ObjC) -HANDLE_DW_LANG(0x0011, ObjC_plus_plus) -HANDLE_DW_LANG(0x0012, UPC) -HANDLE_DW_LANG(0x0013, D) +HANDLE_DW_LANG(0x000b, Java, 3, DWARF) +HANDLE_DW_LANG(0x000c, C99, 3, DWARF) +HANDLE_DW_LANG(0x000d, Ada95, 3, DWARF) +HANDLE_DW_LANG(0x000e, Fortran95, 3, DWARF) +HANDLE_DW_LANG(0x000f, PLI, 3, DWARF) +HANDLE_DW_LANG(0x0010, ObjC, 3, DWARF) +HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 3, DWARF) +HANDLE_DW_LANG(0x0012, UPC, 3, DWARF) +HANDLE_DW_LANG(0x0013, D, 3, DWARF) // New in DWARF v4: -HANDLE_DW_LANG(0x0014, Python) +HANDLE_DW_LANG(0x0014, Python, 4, DWARF) // New in DWARF v5: -HANDLE_DW_LANG(0x0015, OpenCL) -HANDLE_DW_LANG(0x0016, Go) -HANDLE_DW_LANG(0x0017, Modula3) -HANDLE_DW_LANG(0x0018, Haskell) -HANDLE_DW_LANG(0x0019, C_plus_plus_03) -HANDLE_DW_LANG(0x001a, C_plus_plus_11) -HANDLE_DW_LANG(0x001b, OCaml) -HANDLE_DW_LANG(0x001c, Rust) -HANDLE_DW_LANG(0x001d, C11) -HANDLE_DW_LANG(0x001e, Swift) -HANDLE_DW_LANG(0x001f, Julia) -HANDLE_DW_LANG(0x0020, Dylan) -HANDLE_DW_LANG(0x0021, C_plus_plus_14) -HANDLE_DW_LANG(0x0022, Fortran03) -HANDLE_DW_LANG(0x0023, Fortran08) -HANDLE_DW_LANG(0x0024, RenderScript) -HANDLE_DW_LANG(0x0025, BLISS) +HANDLE_DW_LANG(0x0015, OpenCL, 5, DWARF) +HANDLE_DW_LANG(0x0016, Go, 5, DWARF) +HANDLE_DW_LANG(0x0017, Modula3, 5, DWARF) +HANDLE_DW_LANG(0x0018, Haskell, 5, DWARF) +HANDLE_DW_LANG(0x0019, C_plus_plus_03, 5, DWARF) +HANDLE_DW_LANG(0x001a, C_plus_plus_11, 5, DWARF) +HANDLE_DW_LANG(0x001b, OCaml, 5, DWARF) +HANDLE_DW_LANG(0x001c, Rust, 5, DWARF) +HANDLE_DW_LANG(0x001d, C11, 5, DWARF) +HANDLE_DW_LANG(0x001e, Swift, 5, DWARF) +HANDLE_DW_LANG(0x001f, Julia, 5, DWARF) +HANDLE_DW_LANG(0x0020, Dylan, 5, DWARF) +HANDLE_DW_LANG(0x0021, C_plus_plus_14, 5, DWARF) +HANDLE_DW_LANG(0x0022, Fortran03, 5, DWARF) +HANDLE_DW_LANG(0x0023, Fortran08, 5, DWARF) +HANDLE_DW_LANG(0x0024, RenderScript, 5, DWARF) +HANDLE_DW_LANG(0x0025, BLISS, 5, DWARF) // Vendor extensions: -HANDLE_DW_LANG(0x8001, Mips_Assembler) -HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript) -HANDLE_DW_LANG(0xb000, BORLAND_Delphi) +HANDLE_DW_LANG(0x8001, Mips_Assembler, 0, MIPS) +HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, GOOGLE) +HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, BORLAND) // DWARF attribute type encodings. -HANDLE_DW_ATE(0x01, address) -HANDLE_DW_ATE(0x02, boolean) -HANDLE_DW_ATE(0x03, complex_float) -HANDLE_DW_ATE(0x04, float) -HANDLE_DW_ATE(0x05, signed) -HANDLE_DW_ATE(0x06, signed_char) -HANDLE_DW_ATE(0x07, unsigned) -HANDLE_DW_ATE(0x08, unsigned_char) +HANDLE_DW_ATE(0x01, address, 2, DWARF) +HANDLE_DW_ATE(0x02, boolean, 2, DWARF) +HANDLE_DW_ATE(0x03, complex_float, 2, DWARF) +HANDLE_DW_ATE(0x04, float, 2, DWARF) +HANDLE_DW_ATE(0x05, signed, 2, DWARF) +HANDLE_DW_ATE(0x06, signed_char, 2, DWARF) +HANDLE_DW_ATE(0x07, unsigned, 2, DWARF) +HANDLE_DW_ATE(0x08, unsigned_char, 2, DWARF) // New in DWARF v3: -HANDLE_DW_ATE(0x09, imaginary_float) -HANDLE_DW_ATE(0x0a, packed_decimal) -HANDLE_DW_ATE(0x0b, numeric_string) -HANDLE_DW_ATE(0x0c, edited) -HANDLE_DW_ATE(0x0d, signed_fixed) -HANDLE_DW_ATE(0x0e, unsigned_fixed) -HANDLE_DW_ATE(0x0f, decimal_float) +HANDLE_DW_ATE(0x09, imaginary_float, 3, DWARF) +HANDLE_DW_ATE(0x0a, packed_decimal, 3, DWARF) +HANDLE_DW_ATE(0x0b, numeric_string, 3, DWARF) +HANDLE_DW_ATE(0x0c, edited, 3, DWARF) +HANDLE_DW_ATE(0x0d, signed_fixed, 3, DWARF) +HANDLE_DW_ATE(0x0e, unsigned_fixed, 3, DWARF) +HANDLE_DW_ATE(0x0f, decimal_float, 3, DWARF) // New in DWARF v4: -HANDLE_DW_ATE(0x10, UTF) +HANDLE_DW_ATE(0x10, UTF, 4, DWARF) // New in DWARF v5: -HANDLE_DW_ATE(0x11, UCS) -HANDLE_DW_ATE(0x12, ASCII) +HANDLE_DW_ATE(0x11, UCS, 5, DWARF) +HANDLE_DW_ATE(0x12, ASCII, 5, DWARF) // DWARF virtuality codes. HANDLE_DW_VIRTUALITY(0x00, none) diff --git a/include/llvm/Support/Dwarf.h b/include/llvm/Support/Dwarf.h index 84056682924eb..3061b7b5fa0f0 100644 --- a/include/llvm/Support/Dwarf.h +++ b/include/llvm/Support/Dwarf.h @@ -46,7 +46,15 @@ enum LLVMConstants : uint32_t { DWARF_VERSION = 4, // Default dwarf version we output. DW_PUBTYPES_VERSION = 2, // Section version number for .debug_pubtypes. DW_PUBNAMES_VERSION = 2, // Section version number for .debug_pubnames. - DW_ARANGES_VERSION = 2 // Section version number for .debug_aranges. + DW_ARANGES_VERSION = 2, // Section version number for .debug_aranges. + // Identifiers we use to distinguish vendor extensions. + DWARF_VENDOR_DWARF = 0, // Defined in v2 or later of the DWARF standard. + DWARF_VENDOR_APPLE = 1, + DWARF_VENDOR_BORLAND = 2, + DWARF_VENDOR_GNU = 3, + DWARF_VENDOR_GOOGLE = 4, + DWARF_VENDOR_LLVM = 5, + DWARF_VENDOR_MIPS = 6 }; // Special ID values that distinguish a CIE from a FDE in DWARF CFI. @@ -55,7 +63,7 @@ const uint32_t DW_CIE_ID = UINT32_MAX; const uint64_t DW64_CIE_ID = UINT64_MAX; enum Tag : uint16_t { -#define HANDLE_DW_TAG(ID, NAME) DW_TAG_##NAME = ID, +#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) DW_TAG_##NAME = ID, #include "llvm/Support/Dwarf.def" DW_TAG_lo_user = 0x4080, DW_TAG_hi_user = 0xffff, @@ -92,20 +100,20 @@ inline bool isType(Tag T) { /// Attributes. enum Attribute : uint16_t { -#define HANDLE_DW_AT(ID, NAME) DW_AT_##NAME = ID, +#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) DW_AT_##NAME = ID, #include "llvm/Support/Dwarf.def" DW_AT_lo_user = 0x2000, DW_AT_hi_user = 0x3fff, }; enum Form : uint16_t { -#define HANDLE_DW_FORM(ID, NAME) DW_FORM_##NAME = ID, +#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) DW_FORM_##NAME = ID, #include "llvm/Support/Dwarf.def" DW_FORM_lo_user = 0x1f00, ///< Not specified by DWARF. }; enum LocationAtom { -#define HANDLE_DW_OP(ID, NAME) DW_OP_##NAME = ID, +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) DW_OP_##NAME = ID, #include "llvm/Support/Dwarf.def" DW_OP_lo_user = 0xe0, DW_OP_hi_user = 0xff, @@ -113,7 +121,7 @@ enum LocationAtom { }; enum TypeKind { -#define HANDLE_DW_ATE(ID, NAME) DW_ATE_##NAME = ID, +#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) DW_ATE_##NAME = ID, #include "llvm/Support/Dwarf.def" DW_ATE_lo_user = 0x80, DW_ATE_hi_user = 0xff @@ -164,7 +172,7 @@ enum DefaultedMemberAttribute { }; enum SourceLanguage { -#define HANDLE_DW_LANG(ID, NAME) DW_LANG_##NAME = ID, +#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) DW_LANG_##NAME = ID, #include "llvm/Support/Dwarf.def" DW_LANG_lo_user = 0x8000, DW_LANG_hi_user = 0xffff @@ -220,8 +228,8 @@ enum LineNumberExtendedOps { DW_LNE_hi_user = 0xff }; -enum LinerNumberEntryFormat { -#define HANDLE_DW_LNCT(ID, NAME) DW_DEFAULTED_##NAME = ID, +enum LineNumberEntryFormat { +#define HANDLE_DW_LNCT(ID, NAME) DW_LNCT_##NAME = ID, #include "llvm/Support/Dwarf.def" DW_LNCT_lo_user = 0x2000, DW_LNCT_hi_user = 0x3fff, @@ -406,6 +414,40 @@ unsigned getAttributeEncoding(StringRef EncodingString); unsigned getMacinfo(StringRef MacinfoString); /// @} +/// \defgroup DwarfConstantsVersioning Dwarf version for constants +/// +/// For constants defined by DWARF, returns the DWARF version when the constant +/// was first defined. For vendor extensions, if there is a version-related +/// policy for when to emit it, returns a version number for that policy. +/// Otherwise returns 0. +/// +/// @{ +unsigned TagVersion(Tag T); +unsigned AttributeVersion(Attribute A); +unsigned FormVersion(Form F); +unsigned OperationVersion(LocationAtom O); +unsigned AttributeEncodingVersion(TypeKind E); +unsigned LanguageVersion(SourceLanguage L); +/// @} + +/// \defgroup DwarfConstantsVendor Dwarf "vendor" for constants +/// +/// These functions return an identifier describing "who" defined the constant, +/// either the DWARF standard itself or the vendor who defined the extension. +/// +/// @{ +unsigned TagVendor(Tag T); +unsigned AttributeVendor(Attribute A); +unsigned FormVendor(Form F); +unsigned OperationVendor(LocationAtom O); +unsigned AttributeEncodingVendor(TypeKind E); +unsigned LanguageVendor(SourceLanguage L); +/// @} + +/// Tells whether the specified form is defined in the specified version, +/// or is an extension if extensions are allowed. +bool isValidFormForVersion(Form F, unsigned Version, bool ExtensionsOk = true); + /// \brief Returns the symbolic string representing Val when used as a value /// for attribute Attr. StringRef AttributeValueString(uint16_t Attr, unsigned Val); diff --git a/include/llvm/Support/GenericDomTree.h b/include/llvm/Support/GenericDomTree.h index 20f3ffdf3aab7..eb7c27d2ffa5b 100644 --- a/include/llvm/Support/GenericDomTree.h +++ b/include/llvm/Support/GenericDomTree.h @@ -276,32 +276,25 @@ protected: // NewBB is split and now it has one successor. Update dominator tree to // reflect this change. - template <class N, class GraphT> - void Split(DominatorTreeBaseByGraphTraits<GraphT> &DT, - typename GraphT::NodeRef NewBB) { + template <class N> + void Split(typename GraphTraits<N>::NodeRef NewBB) { + using GraphT = GraphTraits<N>; + using NodeRef = typename GraphT::NodeRef; assert(std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"); - typename GraphT::NodeRef NewBBSucc = *GraphT::child_begin(NewBB); + NodeRef NewBBSucc = *GraphT::child_begin(NewBB); - std::vector<typename GraphT::NodeRef> PredBlocks; - typedef GraphTraits<Inverse<N>> InvTraits; - for (typename InvTraits::ChildIteratorType - PI = InvTraits::child_begin(NewBB), - PE = InvTraits::child_end(NewBB); - PI != PE; ++PI) - PredBlocks.push_back(*PI); + std::vector<NodeRef> PredBlocks; + for (const auto Pred : children<Inverse<N>>(NewBB)) + PredBlocks.push_back(Pred); assert(!PredBlocks.empty() && "No predblocks?"); bool NewBBDominatesNewBBSucc = true; - for (typename InvTraits::ChildIteratorType - PI = InvTraits::child_begin(NewBBSucc), - E = InvTraits::child_end(NewBBSucc); - PI != E; ++PI) { - typename InvTraits::NodeRef ND = *PI; - if (ND != NewBB && !DT.dominates(NewBBSucc, ND) && - DT.isReachableFromEntry(ND)) { + for (const auto Pred : children<Inverse<N>>(NewBBSucc)) { + if (Pred != NewBB && !dominates(NewBBSucc, Pred) && + isReachableFromEntry(Pred)) { NewBBDominatesNewBBSucc = false; break; } @@ -312,7 +305,7 @@ protected: NodeT *NewBBIDom = nullptr; unsigned i = 0; for (i = 0; i < PredBlocks.size(); ++i) - if (DT.isReachableFromEntry(PredBlocks[i])) { + if (isReachableFromEntry(PredBlocks[i])) { NewBBIDom = PredBlocks[i]; break; } @@ -324,18 +317,18 @@ protected: return; for (i = i + 1; i < PredBlocks.size(); ++i) { - if (DT.isReachableFromEntry(PredBlocks[i])) - NewBBIDom = DT.findNearestCommonDominator(NewBBIDom, PredBlocks[i]); + if (isReachableFromEntry(PredBlocks[i])) + NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]); } // Create the new dominator tree node... and set the idom of NewBB. - DomTreeNodeBase<NodeT> *NewBBNode = DT.addNewBlock(NewBB, NewBBIDom); + DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom); // If NewBB strictly dominates other blocks, then it is now the immediate // dominator of NewBBSucc. Update the dominator tree as appropriate. if (NewBBDominatesNewBBSucc) { - DomTreeNodeBase<NodeT> *NewBBSuccNode = DT.getNode(NewBBSucc); - DT.changeImmediateDominator(NewBBSuccNode, NewBBNode); + DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc); + changeImmediateDominator(NewBBSuccNode, NewBBNode); } } @@ -379,7 +372,7 @@ public: if (DomTreeNodes.size() != OtherDomTreeNodes.size()) return true; - for (const auto &DomTreeNode : this->DomTreeNodes) { + for (const auto &DomTreeNode : DomTreeNodes) { NodeT *BB = DomTreeNode.first; typename DomTreeNodeMapType::const_iterator OI = OtherDomTreeNodes.find(BB); @@ -663,10 +656,9 @@ public: /// tree to reflect this change. void splitBlock(NodeT *NewBB) { if (this->IsPostDominators) - this->Split<Inverse<NodeT *>, GraphTraits<Inverse<NodeT *>>>(*this, - NewBB); + Split<Inverse<NodeT *>>(NewBB); else - this->Split<NodeT *, GraphTraits<NodeT *>>(*this, NewBB); + Split<NodeT *>(NewBB); } /// print - Convert to human readable form @@ -677,7 +669,7 @@ public: o << "Inorder PostDominator Tree: "; else o << "Inorder Dominator Tree: "; - if (!this->DFSInfoValid) + if (!DFSInfoValid) o << "DFSNumbers invalid: " << SlowQueries << " slow queries."; o << "\n"; @@ -712,12 +704,12 @@ protected: // immediate dominator. NodeT *IDom = getIDom(BB); - assert(IDom || this->DomTreeNodes[nullptr]); + assert(IDom || DomTreeNodes[nullptr]); DomTreeNodeBase<NodeT> *IDomNode = getNodeForBlock(IDom); // Add a new tree node for this NodeT, and link it as a child of // IDomNode - return (this->DomTreeNodes[BB] = IDomNode->addChild( + return (DomTreeNodes[BB] = IDomNode->addChild( llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get(); } @@ -780,7 +772,7 @@ public: template <class FT> void recalculate(FT &F) { typedef GraphTraits<FT *> TraitsTy; reset(); - this->Vertex.push_back(nullptr); + Vertex.push_back(nullptr); if (!this->IsPostDominators) { // Initialize root diff --git a/include/llvm/Support/GraphWriter.h b/include/llvm/Support/GraphWriter.h index 7555d5b31a8d6..c318fea536511 100644 --- a/include/llvm/Support/GraphWriter.h +++ b/include/llvm/Support/GraphWriter.h @@ -143,10 +143,9 @@ public: void writeNodes() { // Loop over the graph, printing it out... - for (node_iterator I = GTraits::nodes_begin(G), E = GTraits::nodes_end(G); - I != E; ++I) - if (!isNodeHidden(*I)) - writeNode(*I); + for (const auto Node : nodes<GraphType>(G)) + if (!isNodeHidden(Node)) + writeNode(Node); } bool isNodeHidden(NodeRef Node) { diff --git a/include/llvm/Support/LowLevelTypeImpl.h b/include/llvm/Support/LowLevelTypeImpl.h index 02df4d806f13b..e18e58b7b5b23 100644 --- a/include/llvm/Support/LowLevelTypeImpl.h +++ b/include/llvm/Support/LowLevelTypeImpl.h @@ -39,100 +39,123 @@ class raw_ostream; class LLT { public: - enum TypeKind : uint16_t { - Invalid, - Scalar, - Pointer, - Vector, - }; - /// Get a low-level scalar or aggregate "bag of bits". static LLT scalar(unsigned SizeInBits) { assert(SizeInBits > 0 && "invalid scalar size"); - return LLT{Scalar, 1, SizeInBits}; + return LLT{/*isPointer=*/false, /*isVector=*/false, /*NumElements=*/0, + SizeInBits, /*AddressSpace=*/0}; } /// Get a low-level pointer in the given address space (defaulting to 0). static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) { - return LLT{Pointer, AddressSpace, SizeInBits}; + assert(SizeInBits > 0 && "invalid pointer size"); + return LLT{/*isPointer=*/true, /*isVector=*/false, /*NumElements=*/0, + SizeInBits, AddressSpace}; } /// Get a low-level vector of some number of elements and element width. /// \p NumElements must be at least 2. static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) { assert(NumElements > 1 && "invalid number of vector elements"); - return LLT{Vector, NumElements, ScalarSizeInBits}; + assert(ScalarSizeInBits > 0 && "invalid vector element size"); + return LLT{/*isPointer=*/false, /*isVector=*/true, NumElements, + ScalarSizeInBits, /*AddressSpace=*/0}; } /// Get a low-level vector of some number of elements and element type. static LLT vector(uint16_t NumElements, LLT ScalarTy) { assert(NumElements > 1 && "invalid number of vector elements"); - assert(ScalarTy.isScalar() && "invalid vector element type"); - return LLT{Vector, NumElements, ScalarTy.getSizeInBits()}; + assert(!ScalarTy.isVector() && "invalid vector element type"); + return LLT{ScalarTy.isPointer(), /*isVector=*/true, NumElements, + ScalarTy.getSizeInBits(), + ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0}; } - explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits) - : SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) { - assert((Kind != Vector || ElementsOrAddrSpace > 1) && - "invalid number of vector elements"); + explicit LLT(bool isPointer, bool isVector, uint16_t NumElements, + unsigned SizeInBits, unsigned AddressSpace) { + init(isPointer, isVector, NumElements, SizeInBits, AddressSpace); } - - explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {} + explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {} explicit LLT(MVT VT); - bool isValid() const { return Kind != Invalid; } + bool isValid() const { return RawData != 0; } - bool isScalar() const { return Kind == Scalar; } + bool isScalar() const { return isValid() && !IsPointer && !IsVector; } - bool isPointer() const { return Kind == Pointer; } + bool isPointer() const { return isValid() && IsPointer && !IsVector; } - bool isVector() const { return Kind == Vector; } + bool isVector() const { return isValid() && IsVector; } /// Returns the number of elements in a vector LLT. Must only be called on /// vector types. uint16_t getNumElements() const { - assert(isVector() && "cannot get number of elements on scalar/aggregate"); - return ElementsOrAddrSpace; + assert(IsVector && "cannot get number of elements on scalar/aggregate"); + if (!IsPointer) + return getFieldValue(VectorElementsFieldInfo); + else + return getFieldValue(PointerVectorElementsFieldInfo); } /// Returns the total size of the type. Must only be called on sized types. unsigned getSizeInBits() const { if (isPointer() || isScalar()) - return SizeInBits; - return SizeInBits * ElementsOrAddrSpace; + return getScalarSizeInBits(); + return getScalarSizeInBits() * getNumElements(); } unsigned getScalarSizeInBits() const { - return SizeInBits; + assert(RawData != 0 && "Invalid Type"); + if (!IsVector) { + if (!IsPointer) + return getFieldValue(ScalarSizeFieldInfo); + else + return getFieldValue(PointerSizeFieldInfo); + } else { + if (!IsPointer) + return getFieldValue(VectorSizeFieldInfo); + else + return getFieldValue(PointerVectorSizeFieldInfo); + } } unsigned getAddressSpace() const { - assert(isPointer() && "cannot get address space of non-pointer type"); - return ElementsOrAddrSpace; + assert(RawData != 0 && "Invalid Type"); + assert(IsPointer && "cannot get address space of non-pointer type"); + if (!IsVector) + return getFieldValue(PointerAddressSpaceFieldInfo); + else + return getFieldValue(PointerVectorAddressSpaceFieldInfo); } /// Returns the vector's element type. Only valid for vector types. LLT getElementType() const { assert(isVector() && "cannot get element type of scalar/aggregate"); - return scalar(SizeInBits); + if (IsPointer) + return pointer(getAddressSpace(), getScalarSizeInBits()); + else + return scalar(getScalarSizeInBits()); } /// Get a low-level type with half the size of the original, by halving the /// size of the scalar type involved. For example `s32` will become `s16`, /// `<2 x s32>` will become `<2 x s16>`. LLT halfScalarSize() const { - assert(!isPointer() && getScalarSizeInBits() > 1 && + assert(!IsPointer && getScalarSizeInBits() > 1 && getScalarSizeInBits() % 2 == 0 && "cannot half size of this type"); - return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2}; + return LLT{/*isPointer=*/false, IsVector ? true : false, + IsVector ? getNumElements() : (uint16_t)0, + getScalarSizeInBits() / 2, /*AddressSpace=*/0}; } /// Get a low-level type with twice the size of the original, by doubling the /// size of the scalar type involved. For example `s32` will become `s64`, /// `<2 x s32>` will become `<2 x s64>`. LLT doubleScalarSize() const { - assert(!isPointer() && "cannot change size of this type"); - return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2}; + assert(!IsPointer && "cannot change size of this type"); + return LLT{/*isPointer=*/false, IsVector ? true : false, + IsVector ? getNumElements() : (uint16_t)0, + getScalarSizeInBits() * 2, /*AddressSpace=*/0}; } /// Get a low-level type with half the size of the original, by halving the @@ -140,13 +163,13 @@ public: /// a vector type with an even number of elements. For example `<4 x s32>` /// will become `<2 x s32>`, `<2 x s32>` will become `s32`. LLT halfElements() const { - assert(isVector() && ElementsOrAddrSpace % 2 == 0 && - "cannot half odd vector"); - if (ElementsOrAddrSpace == 2) - return scalar(SizeInBits); + assert(isVector() && getNumElements() % 2 == 0 && "cannot half odd vector"); + if (getNumElements() == 2) + return scalar(getScalarSizeInBits()); - return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2), - SizeInBits}; + return LLT{/*isPointer=*/false, /*isVector=*/true, + (uint16_t)(getNumElements() / 2), getScalarSizeInBits(), + /*AddressSpace=*/0}; } /// Get a low-level type with twice the size of the original, by doubling the @@ -154,25 +177,105 @@ public: /// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling /// the number of elements in sN produces <2 x sN>. LLT doubleElements() const { - assert(!isPointer() && "cannot double elements in pointer"); - return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2), - SizeInBits}; + return LLT{IsPointer ? true : false, /*isVector=*/true, + (uint16_t)(getNumElements() * 2), getScalarSizeInBits(), + IsPointer ? getAddressSpace() : 0}; } void print(raw_ostream &OS) const; bool operator==(const LLT &RHS) const { - return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits && - ElementsOrAddrSpace == RHS.ElementsOrAddrSpace; + return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector && + RHS.RawData == RawData; } bool operator!=(const LLT &RHS) const { return !(*this == RHS); } friend struct DenseMapInfo<LLT>; + private: - unsigned SizeInBits; - uint16_t ElementsOrAddrSpace; - TypeKind Kind; + /// LLT is packed into 64 bits as follows: + /// isPointer : 1 + /// isVector : 1 + /// with 62 bits remaining for Kind-specific data, packed in bitfields + /// as described below. As there isn't a simple portable way to pack bits + /// into bitfields, here the different fields in the packed structure is + /// described in static const *Field variables. Each of these variables + /// is a 2-element array, with the first element describing the bitfield size + /// and the second element describing the bitfield offset. + typedef int BitFieldInfo[2]; + /// + /// This is how the bitfields are packed per Kind: + /// * Invalid: + /// gets encoded as RawData == 0, as that is an invalid encoding, since for + /// valid encodings, SizeInBits/SizeOfElement must be larger than 0. + /// * Non-pointer scalar (isPointer == 0 && isVector == 0): + /// SizeInBits: 32; + static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 0}; + /// * Pointer (isPointer == 1 && isVector == 0): + /// SizeInBits: 16; + /// AddressSpace: 23; + static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 0}; + static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{ + 23, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]}; + /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1): + /// NumElements: 16; + /// SizeOfElement: 32; + static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 0}; + static const constexpr BitFieldInfo VectorSizeFieldInfo{ + 32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]}; + /// * Vector-of-pointer (isPointer == 1 && isVector == 1): + /// NumElements: 16; + /// SizeOfElement: 16; + /// AddressSpace: 23; + static const constexpr BitFieldInfo PointerVectorElementsFieldInfo{16, 0}; + static const constexpr BitFieldInfo PointerVectorSizeFieldInfo{ + 16, + PointerVectorElementsFieldInfo[1] + PointerVectorElementsFieldInfo[0]}; + static const constexpr BitFieldInfo PointerVectorAddressSpaceFieldInfo{ + 23, PointerVectorSizeFieldInfo[1] + PointerVectorSizeFieldInfo[0]}; + + uint64_t IsPointer : 1; + uint64_t IsVector : 1; + uint64_t RawData : 62; + + static uint64_t getMask(const BitFieldInfo FieldInfo) { + const int FieldSizeInBits = FieldInfo[0]; + return (((uint64_t)1) << FieldSizeInBits) - 1; + } + static uint64_t maskAndShift(uint64_t Val, uint64_t Mask, uint8_t Shift) { + assert(Val <= Mask && "Value too large for field"); + return (Val & Mask) << Shift; + } + static uint64_t maskAndShift(uint64_t Val, const BitFieldInfo FieldInfo) { + return maskAndShift(Val, getMask(FieldInfo), FieldInfo[1]); + } + uint64_t getFieldValue(const BitFieldInfo FieldInfo) const { + return getMask(FieldInfo) & (RawData >> FieldInfo[1]); + } + + void init(bool IsPointer, bool IsVector, uint16_t NumElements, + unsigned SizeInBits, unsigned AddressSpace) { + this->IsPointer = IsPointer; + this->IsVector = IsVector; + if (!IsVector) { + if (!IsPointer) + RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo); + else + RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) | + maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo); + } else { + assert(NumElements > 1 && "invalid number of vector elements"); + if (!IsPointer) + RawData = maskAndShift(NumElements, VectorElementsFieldInfo) | + maskAndShift(SizeInBits, VectorSizeFieldInfo); + else + RawData = + maskAndShift(NumElements, PointerVectorElementsFieldInfo) | + maskAndShift(SizeInBits, PointerVectorSizeFieldInfo) | + maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo); + } + } }; inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) { @@ -182,14 +285,18 @@ inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) { template<> struct DenseMapInfo<LLT> { static inline LLT getEmptyKey() { - return LLT{LLT::Invalid, 0, -1u}; + LLT Invalid; + Invalid.IsPointer = true; + return Invalid; } static inline LLT getTombstoneKey() { - return LLT{LLT::Invalid, 0, -2u}; + LLT Invalid; + Invalid.IsVector = true; + return Invalid; } static inline unsigned getHashValue(const LLT &Ty) { - uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) | - ((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind; + uint64_t Val = ((uint64_t)Ty.RawData) << 2 | ((uint64_t)Ty.IsPointer) << 1 | + ((uint64_t)Ty.IsVector); return DenseMapInfo<uint64_t>::getHashValue(Val); } static bool isEqual(const LLT &LHS, const LLT &RHS) { diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h index 19380b23d9d24..994456f9a6819 100644 --- a/include/llvm/Support/MathExtras.h +++ b/include/llvm/Support/MathExtras.h @@ -18,6 +18,7 @@ #include "llvm/Support/SwapByteOrder.h" #include <algorithm> #include <cassert> +#include <climits> #include <cstring> #include <type_traits> #include <limits> @@ -198,6 +199,21 @@ template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { return countTrailingZeros(Val, ZB_Undefined); } +/// \brief Create a bitmask with the N right-most bits set to 1, and all other +/// bits set to 0. Only unsigned types are allowed. +template <typename T> T maskTrailingOnes(unsigned N) { + static_assert(std::is_unsigned<T>::value, "Invalid type!"); + const unsigned Bits = CHAR_BIT * sizeof(T); + assert(N <= Bits && "Invalid bit index"); + return N == 0 ? 0 : (T(-1) >> (Bits - N)); +} + +/// \brief Create a bitmask with the N left-most bits set to 1, and all other +/// bits set to 0. Only unsigned types are allowed. +template <typename T> T maskLeadingOnes(unsigned N) { + return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N); +} + /// \brief Get the index of the last set bit starting from the least /// significant bit. /// diff --git a/include/llvm/Support/Recycler.h b/include/llvm/Support/Recycler.h index 1523aad38d46f..53db2e86d12d3 100644 --- a/include/llvm/Support/Recycler.h +++ b/include/llvm/Support/Recycler.h @@ -42,13 +42,16 @@ class Recycler { FreeNode *pop_val() { auto *Val = FreeList; + __asan_unpoison_memory_region(Val, Size); FreeList = FreeList->Next; + __msan_allocated_memory(Val, Size); return Val; } void push(FreeNode *N) { N->Next = FreeList; FreeList = N; + __asan_poison_memory_region(N, Size); } public: diff --git a/include/llvm/Support/Regex.h b/include/llvm/Support/Regex.h index 83db80359ee21..f498835bcb582 100644 --- a/include/llvm/Support/Regex.h +++ b/include/llvm/Support/Regex.h @@ -57,7 +57,7 @@ namespace llvm { /// isValid - returns the error encountered during regex compilation, or /// matching, if any. - bool isValid(std::string &Error); + bool isValid(std::string &Error) const; /// getNumMatches - In a valid regex, return the number of parenthesized /// matches it contains. The number filled in by match will include this diff --git a/include/llvm/Support/TargetParser.h b/include/llvm/Support/TargetParser.h index 68e6b27658102..f29cc40ffdd55 100644 --- a/include/llvm/Support/TargetParser.h +++ b/include/llvm/Support/TargetParser.h @@ -75,7 +75,7 @@ enum ArchExtKind : unsigned { AEK_CRC = 0x2, AEK_CRYPTO = 0x4, AEK_FP = 0x8, - AEK_HWDIV = 0x10, + AEK_HWDIVTHUMB = 0x10, AEK_HWDIVARM = 0x20, AEK_MP = 0x40, AEK_SIMD = 0x80, diff --git a/include/llvm/TableGen/StringToOffsetTable.h b/include/llvm/TableGen/StringToOffsetTable.h index aaf2a356ffab6..4b11e889ea6c7 100644 --- a/include/llvm/TableGen/StringToOffsetTable.h +++ b/include/llvm/TableGen/StringToOffsetTable.h @@ -27,6 +27,8 @@ class StringToOffsetTable { std::string AggregateString; public: + bool Empty() const { return StringOffset.empty(); } + unsigned GetOrAddStringOffset(StringRef Str, bool appendZero = true) { auto IterBool = StringOffset.insert(std::make_pair(Str, AggregateString.size())); diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 85297ae837c56..24039ea10816e 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -230,6 +230,12 @@ public: return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); } + /// Return the type for frame index, which is determined by + /// the alloca address space specified through the data layout. + MVT getFrameIndexTy(const DataLayout &DL) const { + return getPointerTy(DL, DL.getAllocaAddrSpace()); + } + /// EVT is not used in-tree, but is used by out-of-tree target. /// A documentation for this function would be nice... virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const; @@ -2807,7 +2813,7 @@ public: /// Return true if the target may be able emit the call instruction as a tail /// call. This is used by optimization passes to determine if it's profitable /// to duplicate return instructions to enable tailcall optimization. - virtual bool mayBeEmittedAsTailCall(CallInst *) const { + virtual bool mayBeEmittedAsTailCall(const CallInst *) const { return false; } diff --git a/include/llvm/Transforms/Utils/CodeExtractor.h b/include/llvm/Transforms/Utils/CodeExtractor.h index a2978663a4d14..a602498e5f221 100644 --- a/include/llvm/Transforms/Utils/CodeExtractor.h +++ b/include/llvm/Transforms/Utils/CodeExtractor.h @@ -65,14 +65,6 @@ template <typename T> class ArrayRef; /// Blocks containing EHPads, allocas, invokes, or vastarts are not valid. static bool isBlockValidForExtraction(const BasicBlock &BB); - /// \brief Create a code extractor for a single basic block. - /// - /// In this formation, we don't require a dominator tree. The given basic - /// block is set up for extraction. - CodeExtractor(BasicBlock *BB, bool AggregateArgs = false, - BlockFrequencyInfo *BFI = nullptr, - BranchProbabilityInfo *BPI = nullptr); - /// \brief Create a code extractor for a sequence of blocks. /// /// Given a sequence of basic blocks where the first block in the sequence @@ -91,14 +83,6 @@ template <typename T> class ArrayRef; BlockFrequencyInfo *BFI = nullptr, BranchProbabilityInfo *BPI = nullptr); - /// \brief Create a code extractor for a region node. - /// - /// Behaves just like the generic code sequence constructor, but uses the - /// block sequence of the region node passed in. - CodeExtractor(DominatorTree &DT, const RegionNode &RN, - bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr, - BranchProbabilityInfo *BPI = nullptr); - /// \brief Perform the extraction, returning the new function. /// /// Returns zero when called on a CodeExtractor instance where isEligible diff --git a/include/llvm/XRay/InstrumentationMap.h b/include/llvm/XRay/InstrumentationMap.h index f7286c52ff42e..0342da0a2f0fa 100644 --- a/include/llvm/XRay/InstrumentationMap.h +++ b/include/llvm/XRay/InstrumentationMap.h @@ -59,6 +59,7 @@ struct YAMLXRaySledEntry { yaml::Hex64 Function; SledEntry::FunctionKinds Kind; bool AlwaysInstrument; + std::string FunctionName; }; /// The InstrumentationMap represents the computed function id's and indicated @@ -115,6 +116,7 @@ template <> struct MappingTraits<xray::YAMLXRaySledEntry> { IO.mapRequired("function", Entry.Function); IO.mapRequired("kind", Entry.Kind); IO.mapRequired("always-instrument", Entry.AlwaysInstrument); + IO.mapOptional("function-name", Entry.FunctionName); } static constexpr bool flow = true; diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 09582cf9a71d0..3db041cc0fa6f 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -808,7 +808,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // well. Or alternatively, replace all of this with inaccessiblememonly once // that's implemented fully. auto *Inst = CS.getInstruction(); - if (isMallocLikeFn(Inst, &TLI) || isCallocLikeFn(Inst, &TLI)) { + if (isMallocOrCallocLikeFn(Inst, &TLI)) { // Be conservative if the accessed pointer may alias the allocation - // fallback to the generic handling below. if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias) @@ -925,9 +925,8 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, const DataLayout &DL) { assert(GEP1->getPointerOperand()->stripPointerCasts() == - GEP2->getPointerOperand()->stripPointerCasts() && - GEP1->getPointerOperand()->getType() == - GEP2->getPointerOperand()->getType() && + GEP2->getPointerOperand()->stripPointerCasts() && + GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && "Expected GEPs with the same pointer operand"); // Try to determine whether GEP1 and GEP2 index through arrays, into structs, @@ -1186,9 +1185,8 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, // just the same underlying object), see if that tells us anything about // the resulting pointers. if (GEP1->getPointerOperand()->stripPointerCasts() == - GEP2->getPointerOperand()->stripPointerCasts() && - GEP1->getPointerOperand()->getType() == - GEP2->getPointerOperand()->getType()) { + GEP2->getPointerOperand()->stripPointerCasts() && + GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) { AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL); // If we couldn't find anything interesting, don't abandon just yet. if (R != MayAlias) diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp index 5935dec15c701..0dc4475ca0e29 100644 --- a/lib/Analysis/BranchProbabilityInfo.cpp +++ b/lib/Analysis/BranchProbabilityInfo.cpp @@ -72,6 +72,32 @@ static const uint32_t UR_TAKEN_WEIGHT = 1; /// easily subsume it. static const uint32_t UR_NONTAKEN_WEIGHT = 1024*1024 - 1; +/// \brief Returns the branch probability for unreachable edge according to +/// heuristic. +/// +/// This is the branch probability being taken to a block that terminates +/// (eventually) in unreachable. These are predicted as unlikely as possible. +static BranchProbability getUnreachableProbability(uint64_t UnreachableCount) { + assert(UnreachableCount > 0 && "UnreachableCount must be > 0"); + return BranchProbability::getBranchProbability( + UR_TAKEN_WEIGHT, + (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * UnreachableCount); +} + +/// \brief Returns the branch probability for reachable edge according to +/// heuristic. +/// +/// This is the branch probability not being taken toward a block that +/// terminates (eventually) in unreachable. Such a branch is essentially never +/// taken. Set the weight to an absurdly high value so that nested loops don't +/// easily subsume it. +static BranchProbability getReachableProbability(uint64_t ReachableCount) { + assert(ReachableCount > 0 && "ReachableCount must be > 0"); + return BranchProbability::getBranchProbability( + UR_NONTAKEN_WEIGHT, + (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * ReachableCount); +} + /// \brief Weight for a branch taken going into a cold block. /// /// This is the weight for a branch taken toward a block marked @@ -179,7 +205,11 @@ BranchProbabilityInfo::updatePostDominatedByColdCall(const BasicBlock *BB) { /// unreachable-terminated block as extremely unlikely. bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) { const TerminatorInst *TI = BB->getTerminator(); - if (TI->getNumSuccessors() == 0) + assert(TI->getNumSuccessors() > 1 && "expected more than one successor!"); + + // Return false here so that edge weights for InvokeInst could be decided + // in calcInvokeHeuristics(). + if (isa<InvokeInst>(TI)) return false; SmallVector<unsigned, 4> UnreachableEdges; @@ -191,14 +221,8 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) { else ReachableEdges.push_back(I.getSuccessorIndex()); - // Skip probabilities if this block has a single successor or if all were - // reachable. - if (TI->getNumSuccessors() == 1 || UnreachableEdges.empty()) - return false; - - // Return false here so that edge weights for InvokeInst could be decided - // in calcInvokeHeuristics(). - if (isa<InvokeInst>(TI)) + // Skip probabilities if all were reachable. + if (UnreachableEdges.empty()) return false; if (ReachableEdges.empty()) { @@ -208,12 +232,8 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) { return true; } - auto UnreachableProb = BranchProbability::getBranchProbability( - UR_TAKEN_WEIGHT, (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * - uint64_t(UnreachableEdges.size())); - auto ReachableProb = BranchProbability::getBranchProbability( - UR_NONTAKEN_WEIGHT, - (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * uint64_t(ReachableEdges.size())); + auto UnreachableProb = getUnreachableProbability(UnreachableEdges.size()); + auto ReachableProb = getReachableProbability(ReachableEdges.size()); for (unsigned SuccIdx : UnreachableEdges) setEdgeProbability(BB, SuccIdx, UnreachableProb); @@ -224,11 +244,12 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) { } // Propagate existing explicit probabilities from either profile data or -// 'expect' intrinsic processing. +// 'expect' intrinsic processing. Examine metadata against unreachable +// heuristic. The probability of the edge coming to unreachable block is +// set to min of metadata and unreachable heuristic. bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { const TerminatorInst *TI = BB->getTerminator(); - if (TI->getNumSuccessors() == 1) - return false; + assert(TI->getNumSuccessors() > 1 && "expected more than one successor!"); if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) return false; @@ -249,6 +270,8 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { // be scaled to fit in 32 bits. uint64_t WeightSum = 0; SmallVector<uint32_t, 2> Weights; + SmallVector<unsigned, 2> UnreachableIdxs; + SmallVector<unsigned, 2> ReachableIdxs; Weights.reserve(TI->getNumSuccessors()); for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) { ConstantInt *Weight = @@ -259,6 +282,10 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { "Too many bits for uint32_t"); Weights.push_back(Weight->getZExtValue()); WeightSum += Weights.back(); + if (PostDominatedByUnreachable.count(TI->getSuccessor(i - 1))) + UnreachableIdxs.push_back(i - 1); + else + ReachableIdxs.push_back(i - 1); } assert(Weights.size() == TI->getNumSuccessors() && "Checked above"); @@ -267,20 +294,52 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { uint64_t ScalingFactor = (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1; - WeightSum = 0; - for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) { - Weights[i] /= ScalingFactor; - WeightSum += Weights[i]; + if (ScalingFactor > 1) { + WeightSum = 0; + for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) { + Weights[i] /= ScalingFactor; + WeightSum += Weights[i]; + } } - if (WeightSum == 0) { - for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) - setEdgeProbability(BB, i, {1, e}); - } else { + if (WeightSum == 0 || ReachableIdxs.size() == 0) { for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) - setEdgeProbability(BB, i, {Weights[i], static_cast<uint32_t>(WeightSum)}); + Weights[i] = 1; + WeightSum = TI->getNumSuccessors(); + } + + // Set the probability. + SmallVector<BranchProbability, 2> BP; + for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) + BP.push_back({ Weights[i], static_cast<uint32_t>(WeightSum) }); + + // Examine the metadata against unreachable heuristic. + // If the unreachable heuristic is more strong then we use it for this edge. + if (UnreachableIdxs.size() > 0 && ReachableIdxs.size() > 0) { + auto ToDistribute = BranchProbability::getZero(); + auto UnreachableProb = getUnreachableProbability(UnreachableIdxs.size()); + for (auto i : UnreachableIdxs) + if (UnreachableProb < BP[i]) { + ToDistribute += BP[i] - UnreachableProb; + BP[i] = UnreachableProb; + } + + // If we modified the probability of some edges then we must distribute + // the difference between reachable blocks. + if (ToDistribute > BranchProbability::getZero()) { + BranchProbability PerEdge = ToDistribute / ReachableIdxs.size(); + for (auto i : ReachableIdxs) { + BP[i] += PerEdge; + ToDistribute -= PerEdge; + } + // Tail goes to the first reachable edge. + BP[ReachableIdxs[0]] += ToDistribute; + } } + for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) + setEdgeProbability(BB, i, BP[i]); + assert(WeightSum <= UINT32_MAX && "Expected weights to scale down to 32 bits"); @@ -297,7 +356,11 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { /// Return false, otherwise. bool BranchProbabilityInfo::calcColdCallHeuristics(const BasicBlock *BB) { const TerminatorInst *TI = BB->getTerminator(); - if (TI->getNumSuccessors() == 0) + assert(TI->getNumSuccessors() > 1 && "expected more than one successor!"); + + // Return false here so that edge weights for InvokeInst could be decided + // in calcInvokeHeuristics(). + if (isa<InvokeInst>(TI)) return false; // Determine which successors are post-dominated by a cold block. @@ -309,13 +372,8 @@ bool BranchProbabilityInfo::calcColdCallHeuristics(const BasicBlock *BB) { else NormalEdges.push_back(I.getSuccessorIndex()); - // Return false here so that edge weights for InvokeInst could be decided - // in calcInvokeHeuristics(). - if (isa<InvokeInst>(TI)) - return false; - - // Skip probabilities if this block has a single successor. - if (TI->getNumSuccessors() == 1 || ColdEdges.empty()) + // Skip probabilities if no cold edges. + if (ColdEdges.empty()) return false; if (NormalEdges.empty()) { @@ -698,10 +756,13 @@ void BranchProbabilityInfo::calculate(const Function &F, const LoopInfo &LI) { DEBUG(dbgs() << "Computing probabilities for " << BB->getName() << "\n"); updatePostDominatedByUnreachable(BB); updatePostDominatedByColdCall(BB); - if (calcUnreachableHeuristics(BB)) + // If there is no at least two successors, no sense to set probability. + if (BB->getTerminator()->getNumSuccessors() < 2) continue; if (calcMetadataWeights(BB)) continue; + if (calcUnreachableHeuristics(BB)) + continue; if (calcColdCallHeuristics(BB)) continue; if (calcLoopBranchHeuristics(BB, LI)) diff --git a/lib/Analysis/CFLGraph.h b/lib/Analysis/CFLGraph.h index e526e0e16aa7b..75726e84569b3 100644 --- a/lib/Analysis/CFLGraph.h +++ b/lib/Analysis/CFLGraph.h @@ -400,8 +400,7 @@ template <typename CFLAA> class CFLGraphBuilder { // TODO: address other common library functions such as realloc(), // strdup(), // etc. - if (isMallocLikeFn(Inst, &TLI) || isCallocLikeFn(Inst, &TLI) || - isFreeCall(Inst, &TLI)) + if (isMallocOrCallocLikeFn(Inst, &TLI) || isFreeCall(Inst, &TLI)) return; // TODO: Add support for noalias args/all the other fun function diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index e12f640394e65..2259fbaeb982d 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -75,20 +75,16 @@ static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned); static Value *SimplifyCastInst(unsigned, Value *, Type *, const Query &, unsigned); -/// For a boolean type, or a vector of boolean type, return false, or -/// a vector with every element false, as appropriate for the type. +/// For a boolean type or a vector of boolean type, return false or a vector +/// with every element false. static Constant *getFalse(Type *Ty) { - assert(Ty->getScalarType()->isIntegerTy(1) && - "Expected i1 type or a vector of i1!"); - return Constant::getNullValue(Ty); + return ConstantInt::getFalse(Ty); } -/// For a boolean type, or a vector of boolean type, return true, or -/// a vector with every element true, as appropriate for the type. +/// For a boolean type or a vector of boolean type, return true or a vector +/// with every element true. static Constant *getTrue(Type *Ty) { - assert(Ty->getScalarType()->isIntegerTy(1) && - "Expected i1 type or a vector of i1!"); - return Constant::getAllOnesValue(Ty); + return ConstantInt::getTrue(Ty); } /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? @@ -572,11 +568,11 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, match(Op1, m_Not(m_Specific(Op0)))) return Constant::getAllOnesValue(Ty); - // add nsw/nuw (xor Y, signbit), signbit --> Y + // add nsw/nuw (xor Y, signmask), signmask --> Y // The no-wrapping add guarantees that the top bit will be set by the add. // Therefore, the xor must be clearing the already set sign bit of Y. - if ((isNSW || isNUW) && match(Op1, m_SignBit()) && - match(Op0, m_Xor(m_Value(Y), m_SignBit()))) + if ((isNSW || isNUW) && match(Op1, m_SignMask()) && + match(Op0, m_Xor(m_Value(Y), m_SignMask()))) return Y; /// i1 add -> xor. @@ -1085,7 +1081,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) && match(Op1, m_ConstantInt(C2))) { bool Overflow; - C1->getValue().umul_ov(C2->getValue(), Overflow); + (void)C1->getValue().umul_ov(C2->getValue(), Overflow); if (Overflow) return Constant::getNullValue(Op0->getType()); } @@ -2823,7 +2819,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, return ConstantInt::getTrue(RHS->getContext()); } } - if (CIVal->isSignBit() && *CI2Val == 1) { + if (CIVal->isSignMask() && *CI2Val == 1) { if (Pred == ICmpInst::ICMP_UGT) return ConstantInt::getFalse(RHS->getContext()); if (Pred == ICmpInst::ICMP_ULE) @@ -3800,6 +3796,8 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, Type *GEPTy = PointerType::get(LastType, AS); if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) GEPTy = VectorType::get(GEPTy, VT->getNumElements()); + else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType())) + GEPTy = VectorType::get(GEPTy, VT->getNumElements()); if (isa<UndefValue>(Ops[0])) return UndefValue::get(GEPTy); @@ -4082,6 +4080,60 @@ Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, RecursionLimit); } +/// For the given destination element of a shuffle, peek through shuffles to +/// match a root vector source operand that contains that element in the same +/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s). +static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, + Constant *Mask, Value *RootVec, int RootElt, + unsigned MaxRecurse) { + if (!MaxRecurse--) + return nullptr; + + // Bail out if any mask value is undefined. That kind of shuffle may be + // simplified further based on demanded bits or other folds. + int MaskVal = ShuffleVectorInst::getMaskValue(Mask, RootElt); + if (MaskVal == -1) + return nullptr; + + // The mask value chooses which source operand we need to look at next. + Value *SourceOp; + int InVecNumElts = Op0->getType()->getVectorNumElements(); + if (MaskVal < InVecNumElts) { + RootElt = MaskVal; + SourceOp = Op0; + } else { + RootElt = MaskVal - InVecNumElts; + SourceOp = Op1; + } + + // If the source operand is a shuffle itself, look through it to find the + // matching root vector. + if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) { + return foldIdentityShuffles( + DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1), + SourceShuf->getMask(), RootVec, RootElt, MaxRecurse); + } + + // TODO: Look through bitcasts? What if the bitcast changes the vector element + // size? + + // The source operand is not a shuffle. Initialize the root vector value for + // this shuffle if that has not been done yet. + if (!RootVec) + RootVec = SourceOp; + + // Give up as soon as a source operand does not match the existing root value. + if (RootVec != SourceOp) + return nullptr; + + // The element must be coming from the same lane in the source vector + // (although it may have crossed lanes in intermediate shuffles). + if (RootElt != DestElt) + return nullptr; + + return RootVec; +} + static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, Type *RetTy, const Query &Q, unsigned MaxRecurse) { @@ -4126,7 +4178,28 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, OpShuf->getMask()->getSplatValue()) return Op1; - return nullptr; + // Don't fold a shuffle with undef mask elements. This may get folded in a + // better way using demanded bits or other analysis. + // TODO: Should we allow this? + for (unsigned i = 0; i != MaskNumElts; ++i) + if (ShuffleVectorInst::getMaskValue(Mask, i) == -1) + return nullptr; + + // Check if every element of this shuffle can be mapped back to the + // corresponding element of a single root vector. If so, we don't need this + // shuffle. This handles simple identity shuffles as well as chains of + // shuffles that may widen/narrow and/or move elements across lanes and back. + Value *RootVec = nullptr; + for (unsigned i = 0; i != MaskNumElts; ++i) { + // Note that recursion is limited for each vector element, so if any element + // exceeds the limit, this will fail to simplify. + RootVec = foldIdentityShuffles(i, Op0, Op1, Mask, RootVec, i, MaxRecurse); + + // We can't replace a widening/narrowing shuffle with one of its operands. + if (!RootVec || RootVec->getType() != RetTy) + return nullptr; + } + return RootVec; } /// Given operands for a ShuffleVectorInst, fold the result or return null. diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp index b8c4449047238..7983d62c2f7aa 100644 --- a/lib/Analysis/MemoryBuiltins.cpp +++ b/lib/Analysis/MemoryBuiltins.cpp @@ -37,6 +37,7 @@ enum AllocType : uint8_t { CallocLike = 1<<2, // allocates + bzero ReallocLike = 1<<3, // reallocates StrDupLike = 1<<4, + MallocOrCallocLike = MallocLike | CallocLike, AllocLike = MallocLike | CallocLike | StrDupLike, AnyAlloc = AllocLike | ReallocLike }; @@ -77,8 +78,8 @@ static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { // TODO: Handle "int posix_memalign(void **, size_t, size_t)" }; -static Function *getCalledFunction(const Value *V, bool LookThroughBitCast, - bool &IsNoBuiltin) { +static const Function *getCalledFunction(const Value *V, bool LookThroughBitCast, + bool &IsNoBuiltin) { // Don't care about intrinsics in this case. if (isa<IntrinsicInst>(V)) return nullptr; @@ -86,13 +87,13 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast, if (LookThroughBitCast) V = V->stripPointerCasts(); - CallSite CS(const_cast<Value*>(V)); + ImmutableCallSite CS(V); if (!CS.getInstruction()) return nullptr; IsNoBuiltin = CS.isNoBuiltin(); - Function *Callee = CS.getCalledFunction(); + const Function *Callee = CS.getCalledFunction(); if (!Callee || !Callee->isDeclaration()) return nullptr; return Callee; @@ -220,6 +221,14 @@ bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, } /// \brief Tests if a value is a call or invoke to a library function that +/// allocates memory similiar to malloc or calloc. +bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { + return getAllocationData(V, MallocOrCallocLike, TLI, + LookThroughBitCast).hasValue(); +} + +/// \brief Tests if a value is a call or invoke to a library function that /// allocates memory (either malloc, calloc, or strdup like). bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { diff --git a/lib/Analysis/MemorySSA.cpp b/lib/Analysis/MemorySSA.cpp index 910170561abf6..2480fe44d5c0e 100644 --- a/lib/Analysis/MemorySSA.cpp +++ b/lib/Analysis/MemorySSA.cpp @@ -1291,7 +1291,6 @@ void MemorySSA::buildMemorySSA() { // could just look up the memory access for every possible instruction in the // stream. SmallPtrSet<BasicBlock *, 32> DefiningBlocks; - SmallPtrSet<BasicBlock *, 32> DefUseBlocks; // Go through each block, figure out where defs occur, and chain together all // the accesses. for (BasicBlock &B : F) { @@ -1316,8 +1315,6 @@ void MemorySSA::buildMemorySSA() { } if (InsertIntoDef) DefiningBlocks.insert(&B); - if (Accesses) - DefUseBlocks.insert(&B); } placePHINodes(DefiningBlocks, BBNumbers); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index ca32cf3c7c342..700c383a9dd43 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -1093,7 +1093,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, APInt Mult(W, i); unsigned TwoFactors = Mult.countTrailingZeros(); T += TwoFactors; - Mult = Mult.lshr(TwoFactors); + Mult.lshrInPlace(TwoFactors); OddFactorial *= Mult; } @@ -1276,7 +1276,8 @@ static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, namespace { struct ExtendOpTraitsBase { - typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *); + typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)( + const SCEV *, Type *, ScalarEvolution::ExtendCacheTy &Cache); }; // Used to make code generic over signed and unsigned overflow. @@ -1305,8 +1306,9 @@ struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { } }; -const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< - SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; +const ExtendOpTraitsBase::GetExtendExprTy + ExtendOpTraits<SCEVSignExtendExpr>::GetExtendExpr = + &ScalarEvolution::getSignExtendExprCached; template <> struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { @@ -1321,8 +1323,9 @@ struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { } }; -const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< - SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; +const ExtendOpTraitsBase::GetExtendExprTy + ExtendOpTraits<SCEVZeroExtendExpr>::GetExtendExpr = + &ScalarEvolution::getZeroExtendExprCached; } // The recurrence AR has been shown to have no signed/unsigned wrap or something @@ -1334,7 +1337,8 @@ const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< // "sext/zext(PostIncAR)" template <typename ExtendOpTy> static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, - ScalarEvolution *SE) { + ScalarEvolution *SE, + ScalarEvolution::ExtendCacheTy &Cache) { auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; @@ -1381,9 +1385,9 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); const SCEV *OperandExtendedStart = - SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy), - (SE->*GetExtendExpr)(Step, WideTy)); - if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) { + SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Cache), + (SE->*GetExtendExpr)(Step, WideTy, Cache)); + if ((SE->*GetExtendExpr)(Start, WideTy, Cache) == OperandExtendedStart) { if (PreAR && AR->getNoWrapFlags(WrapType)) { // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then @@ -1408,15 +1412,17 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, // Get the normalized zero or sign extended expression for this AddRec's Start. template <typename ExtendOpTy> static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, - ScalarEvolution *SE) { + ScalarEvolution *SE, + ScalarEvolution::ExtendCacheTy &Cache) { auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; - const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE); + const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Cache); if (!PreStart) - return (SE->*GetExtendExpr)(AR->getStart(), Ty); + return (SE->*GetExtendExpr)(AR->getStart(), Ty, Cache); - return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty), - (SE->*GetExtendExpr)(PreStart, Ty)); + return SE->getAddExpr( + (SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, Cache), + (SE->*GetExtendExpr)(PreStart, Ty, Cache)); } // Try to prove away overflow by looking at "nearby" add recurrences. A @@ -1496,8 +1502,31 @@ bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, return false; } -const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, - Type *Ty) { +const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty) { + // Use the local cache to prevent exponential behavior of + // getZeroExtendExprImpl. + ExtendCacheTy Cache; + return getZeroExtendExprCached(Op, Ty, Cache); +} + +/// Query \p Cache before calling getZeroExtendExprImpl. If there is no +/// related entry in the \p Cache, call getZeroExtendExprImpl and save +/// the result in the \p Cache. +const SCEV *ScalarEvolution::getZeroExtendExprCached(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache) { + auto It = Cache.find({Op, Ty}); + if (It != Cache.end()) + return It->second; + const SCEV *ZExt = getZeroExtendExprImpl(Op, Ty, Cache); + auto InsertResult = Cache.insert({{Op, Ty}, ZExt}); + assert(InsertResult.second && "Expect the key was not in the cache"); + (void)InsertResult; + return ZExt; +} + +/// The real implementation of getZeroExtendExpr. +const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && @@ -1507,11 +1536,11 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) return getConstant( - cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); + cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); // zext(zext(x)) --> zext(x) if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) - return getZeroExtendExpr(SZ->getOperand(), Ty); + return getZeroExtendExprCached(SZ->getOperand(), Ty, Cache); // Before doing any expensive analysis, check to see if we've already // computed a SCEV for this Op and Ty. @@ -1555,8 +1584,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, // we don't need to do any further analysis. if (AR->hasNoUnsignedWrap()) return getAddRecExpr( - getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), - getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); + getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), + getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags()); // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are @@ -1581,21 +1610,22 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); - const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy); - const SCEV *WideStart = getZeroExtendExpr(Start, WideTy); + const SCEV *ZAdd = + getZeroExtendExprCached(getAddExpr(Start, ZMul), WideTy, Cache); + const SCEV *WideStart = getZeroExtendExprCached(Start, WideTy, Cache); const SCEV *WideMaxBECount = - getZeroExtendExpr(CastedMaxBECount, WideTy); - const SCEV *OperandExtendedAdd = - getAddExpr(WideStart, - getMulExpr(WideMaxBECount, - getZeroExtendExpr(Step, WideTy))); + getZeroExtendExprCached(CastedMaxBECount, WideTy, Cache); + const SCEV *OperandExtendedAdd = getAddExpr( + WideStart, getMulExpr(WideMaxBECount, getZeroExtendExprCached( + Step, WideTy, Cache))); if (ZAdd == OperandExtendedAdd) { // Cache knowledge of AR NUW, which is propagated to this AddRec. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); // Return the expression with the addrec on the outside. return getAddRecExpr( - getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), - getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); + getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), + getZeroExtendExprCached(Step, Ty, Cache), L, + AR->getNoWrapFlags()); } // Similar to above, only this time treat the step value as signed. // This covers loops that count down. @@ -1609,7 +1639,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( - getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), + getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } @@ -1641,8 +1671,9 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); // Return the expression with the addrec on the outside. return getAddRecExpr( - getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), - getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); + getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), + getZeroExtendExprCached(Step, Ty, Cache), L, + AR->getNoWrapFlags()); } } else if (isKnownNegative(Step)) { const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - @@ -1657,7 +1688,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( - getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), + getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } @@ -1666,8 +1697,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); return getAddRecExpr( - getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), - getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); + getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), + getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags()); } } @@ -1678,7 +1709,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, // commute the zero extension with the addition operation. SmallVector<const SCEV *, 4> Ops; for (const auto *Op : SA->operands()) - Ops.push_back(getZeroExtendExpr(Op, Ty)); + Ops.push_back(getZeroExtendExprCached(Op, Ty, Cache)); return getAddExpr(Ops, SCEV::FlagNUW); } } @@ -1692,8 +1723,31 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, return S; } -const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, - Type *Ty) { +const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty) { + // Use the local cache to prevent exponential behavior of + // getSignExtendExprImpl. + ExtendCacheTy Cache; + return getSignExtendExprCached(Op, Ty, Cache); +} + +/// Query \p Cache before calling getSignExtendExprImpl. If there is no +/// related entry in the \p Cache, call getSignExtendExprImpl and save +/// the result in the \p Cache. +const SCEV *ScalarEvolution::getSignExtendExprCached(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache) { + auto It = Cache.find({Op, Ty}); + if (It != Cache.end()) + return It->second; + const SCEV *SExt = getSignExtendExprImpl(Op, Ty, Cache); + auto InsertResult = Cache.insert({{Op, Ty}, SExt}); + assert(InsertResult.second && "Expect the key was not in the cache"); + (void)InsertResult; + return SExt; +} + +/// The real implementation of getSignExtendExpr. +const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty, + ExtendCacheTy &Cache) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && @@ -1703,11 +1757,11 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) return getConstant( - cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); + cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); // sext(sext(x)) --> sext(x) if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) - return getSignExtendExpr(SS->getOperand(), Ty); + return getSignExtendExprCached(SS->getOperand(), Ty, Cache); // sext(zext(x)) --> zext(x) if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) @@ -1746,8 +1800,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, const APInt &C2 = SC2->getAPInt(); if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && C2.isPowerOf2()) - return getAddExpr(getSignExtendExpr(SC1, Ty), - getSignExtendExpr(SMul, Ty)); + return getAddExpr(getSignExtendExprCached(SC1, Ty, Cache), + getSignExtendExprCached(SMul, Ty, Cache)); } } } @@ -1758,7 +1812,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, // commute the sign extension with the addition operation. SmallVector<const SCEV *, 4> Ops; for (const auto *Op : SA->operands()) - Ops.push_back(getSignExtendExpr(Op, Ty)); + Ops.push_back(getSignExtendExprCached(Op, Ty, Cache)); return getAddExpr(Ops, SCEV::FlagNSW); } } @@ -1782,8 +1836,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, // we don't need to do any further analysis. if (AR->hasNoSignedWrap()) return getAddRecExpr( - getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), - getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW); + getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), + getSignExtendExprCached(Step, Ty, Cache), L, SCEV::FlagNSW); // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are @@ -1808,21 +1862,22 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no signed overflow. const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); - const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy); - const SCEV *WideStart = getSignExtendExpr(Start, WideTy); + const SCEV *SAdd = + getSignExtendExprCached(getAddExpr(Start, SMul), WideTy, Cache); + const SCEV *WideStart = getSignExtendExprCached(Start, WideTy, Cache); const SCEV *WideMaxBECount = - getZeroExtendExpr(CastedMaxBECount, WideTy); - const SCEV *OperandExtendedAdd = - getAddExpr(WideStart, - getMulExpr(WideMaxBECount, - getSignExtendExpr(Step, WideTy))); + getZeroExtendExpr(CastedMaxBECount, WideTy); + const SCEV *OperandExtendedAdd = getAddExpr( + WideStart, getMulExpr(WideMaxBECount, getSignExtendExprCached( + Step, WideTy, Cache))); if (SAdd == OperandExtendedAdd) { // Cache knowledge of AR NSW, which is propagated to this AddRec. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); // Return the expression with the addrec on the outside. return getAddRecExpr( - getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), - getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); + getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), + getSignExtendExprCached(Step, Ty, Cache), L, + AR->getNoWrapFlags()); } // Similar to above, only this time treat the step value as unsigned. // This covers loops that count up with an unsigned step. @@ -1843,7 +1898,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, // Return the expression with the addrec on the outside. return getAddRecExpr( - getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), + getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } @@ -1875,8 +1930,9 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); return getAddRecExpr( - getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), - getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); + getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), + getSignExtendExprCached(Step, Ty, Cache), L, + AR->getNoWrapFlags()); } } @@ -1890,18 +1946,18 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, const APInt &C2 = SC2->getAPInt(); if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && C2.isPowerOf2()) { - Start = getSignExtendExpr(Start, Ty); + Start = getSignExtendExprCached(Start, Ty, Cache); const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, AR->getNoWrapFlags()); - return getAddExpr(Start, getSignExtendExpr(NewAR, Ty)); + return getAddExpr(Start, getSignExtendExprCached(NewAR, Ty, Cache)); } } if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); return getAddRecExpr( - getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), - getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); + getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), + getSignExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags()); } } @@ -3951,9 +4007,9 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { case Instruction::Xor: if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) - // If the RHS of the xor is a signbit, then this is just an add. - // Instcombine turns add of signbit into xor as a strength reduction step. - if (RHSC->getValue().isSignBit()) + // If the RHS of the xor is a signmask, then this is just an add. + // Instcombine turns add of signmask into xor as a strength reduction step. + if (RHSC->getValue().isSignMask()) return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); return BinaryOp(Op); @@ -5272,28 +5328,12 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { break; case Instruction::Or: - // If the RHS of the Or is a constant, we may have something like: - // X*4+1 which got turned into X*4|1. Handle this as an Add so loop - // optimizations will transparently handle this case. - // - // In order for this transformation to be safe, the LHS must be of the - // form X*(2^n) and the Or constant must be less than 2^n. - if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { - const SCEV *LHS = getSCEV(BO->LHS); - const APInt &CIVal = CI->getValue(); - if (GetMinTrailingZeros(LHS) >= - (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { - // Build a plain add SCEV. - const SCEV *S = getAddExpr(LHS, getSCEV(CI)); - // If the LHS of the add was an addrec and it has no-wrap flags, - // transfer the no-wrap flags, since an or won't introduce a wrap. - if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { - const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); - const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( - OldAR->getNoWrapFlags()); - } - return S; - } + // Use ValueTracking to check whether this is actually an add. + if (haveNoCommonBitsSet(BO->LHS, BO->RHS, getDataLayout(), &AC, + nullptr, &DT)) { + // There aren't any common bits set, so the add can't wrap. + auto Flags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNSW); + return getAddExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); } break; @@ -5329,7 +5369,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { // using an add, which is equivalent, and re-apply the zext. APInt Trunc = CI->getValue().trunc(Z0TySize); if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && - Trunc.isSignBit()) + Trunc.isSignMask()) return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), UTy); } diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index d871e83f222a7..900a2363e60d4 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -292,15 +292,15 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, KnownOne = PossibleSumOne & Known; // Are we still trying to solve for the sign bit? - if (!Known.isNegative()) { + if (!Known.isSignBitSet()) { if (NSW) { // Adding two non-negative numbers, or subtracting a negative number from // a non-negative one, can't wrap into negative. - if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) + if (LHSKnownZero.isSignBitSet() && KnownZero2.isSignBitSet()) KnownZero.setSignBit(); // Adding two negative numbers, or subtracting a non-negative number from // a negative one, can't wrap into non-negative. - else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) + else if (LHSKnownOne.isSignBitSet() && KnownOne2.isSignBitSet()) KnownOne.setSignBit(); } } @@ -322,10 +322,10 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, // The product of a number with itself is non-negative. isKnownNonNegative = true; } else { - bool isKnownNonNegativeOp1 = KnownZero.isNegative(); - bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); - bool isKnownNegativeOp1 = KnownOne.isNegative(); - bool isKnownNegativeOp0 = KnownOne2.isNegative(); + bool isKnownNonNegativeOp1 = KnownZero.isSignBitSet(); + bool isKnownNonNegativeOp0 = KnownZero2.isSignBitSet(); + bool isKnownNegativeOp1 = KnownOne.isSignBitSet(); + bool isKnownNegativeOp0 = KnownOne2.isSignBitSet(); // The product of two numbers with the same sign is non-negative. isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); @@ -361,9 +361,9 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, // which case we prefer to follow the result of the direct computation, // though as the program is invoking undefined behaviour we can choose // whatever we like here. - if (isKnownNonNegative && !KnownOne.isNegative()) + if (isKnownNonNegative && !KnownOne.isSignBitSet()) KnownZero.setSignBit(); - else if (isKnownNegative && !KnownZero.isNegative()) + else if (isKnownNegative && !KnownZero.isSignBitSet()) KnownOne.setSignBit(); } @@ -661,8 +661,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them to known // bits in V shifted to the right by C. - KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); - KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); + RHSKnownZero.lshrInPlace(C->getZExtValue()); + KnownZero |= RHSKnownZero; + RHSKnownOne.lshrInPlace(C->getZExtValue()); + KnownOne |= RHSKnownOne; // assume(~(v << c) = a) } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), m_Value(A))) && @@ -672,8 +674,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them inverted // to known bits in V shifted to the right by C. - KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); - KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); + RHSKnownOne.lshrInPlace(C->getZExtValue()); + KnownZero |= RHSKnownOne; + RHSKnownZero.lshrInPlace(C->getZExtValue()); + KnownOne |= RHSKnownZero; // assume(v >> c = a) } else if (match(Arg, m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), @@ -707,7 +711,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); - if (RHSKnownZero.isNegative()) { + if (RHSKnownZero.isSignBitSet()) { // We know that the sign bit is zero. KnownZero.setSignBit(); } @@ -718,7 +722,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); - if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { + if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isSignBitSet()) { // We know that the sign bit is zero. KnownZero.setSignBit(); } @@ -729,7 +733,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); - if (RHSKnownOne.isNegative()) { + if (RHSKnownOne.isSignBitSet()) { // We know that the sign bit is one. KnownOne.setSignBit(); } @@ -740,7 +744,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); - if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { + if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isSignBitSet()) { // We know that the sign bit is one. KnownOne.setSignBit(); } @@ -990,23 +994,23 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, unsigned MaxHighZeros = 0; if (SPF == SPF_SMAX) { // If both sides are negative, the result is negative. - if (KnownOne.isNegative() && KnownOne2.isNegative()) + if (KnownOne.isSignBitSet() && KnownOne2.isSignBitSet()) // We can derive a lower bound on the result by taking the max of the // leading one bits. MaxHighOnes = std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes()); // If either side is non-negative, the result is non-negative. - else if (KnownZero.isNegative() || KnownZero2.isNegative()) + else if (KnownZero.isSignBitSet() || KnownZero2.isSignBitSet()) MaxHighZeros = 1; } else if (SPF == SPF_SMIN) { // If both sides are non-negative, the result is non-negative. - if (KnownZero.isNegative() && KnownZero2.isNegative()) + if (KnownZero.isSignBitSet() && KnownZero2.isSignBitSet()) // We can derive an upper bound on the result by taking the max of the // leading zero bits. MaxHighZeros = std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes()); // If either side is negative, the result is negative. - else if (KnownOne.isNegative() || KnownOne2.isNegative()) + else if (KnownOne.isSignBitSet() || KnownOne2.isSignBitSet()) MaxHighOnes = 1; } else if (SPF == SPF_UMAX) { // We can derive a lower bound on the result by taking the max of the @@ -1092,14 +1096,14 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, KZResult.setLowBits(ShiftAmt); // Low bits known 0. // If this shift has "nsw" keyword, then the result is either a poison // value or has the same sign bit as the first operand. - if (NSW && KnownZero.isNegative()) + if (NSW && KnownZero.isSignBitSet()) KZResult.setSignBit(); return KZResult; }; auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { APInt KOResult = KnownOne << ShiftAmt; - if (NSW && KnownOne.isNegative()) + if (NSW && KnownOne.isSignBitSet()) KOResult.setSignBit(); return KOResult; }; @@ -1111,10 +1115,11 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, } case Instruction::LShr: { // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 - auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { - return KnownZero.lshr(ShiftAmt) | - // High bits known zero. - APInt::getHighBitsSet(BitWidth, ShiftAmt); + auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { + APInt KZResult = KnownZero.lshr(ShiftAmt); + // High bits known zero. + KZResult.setHighBits(ShiftAmt); + return KZResult; }; auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { @@ -1169,28 +1174,25 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, // If the first operand is non-negative or has all low bits zero, then // the upper bits are all zero. - if (KnownZero2.isNegative() || ((KnownZero2 & LowBits) == LowBits)) + if (KnownZero2.isSignBitSet() || ((KnownZero2 & LowBits) == LowBits)) KnownZero |= ~LowBits; // If the first operand is negative and not all low bits are zero, then // the upper bits are all one. - if (KnownOne2.isNegative() && ((KnownOne2 & LowBits) != 0)) + if (KnownOne2.isSignBitSet() && ((KnownOne2 & LowBits) != 0)) KnownOne |= ~LowBits; assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); + break; } } // The sign bit is the LHS's sign bit, except when the result of the // remainder is zero. - if (KnownZero.isNonNegative()) { - APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); - computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, - Q); - // If it's known zero, our sign bit is also zero. - if (LHSKnownZero.isNegative()) - KnownZero.setSignBit(); - } + computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); + // If it's known zero, our sign bit is also zero. + if (KnownZero2.isSignBitSet()) + KnownZero.setSignBit(); break; case Instruction::URem: { @@ -1331,24 +1333,24 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, // (add non-negative, non-negative) --> non-negative // (add negative, negative) --> negative if (Opcode == Instruction::Add) { - if (KnownZero2.isNegative() && KnownZero3.isNegative()) + if (KnownZero2.isSignBitSet() && KnownZero3.isSignBitSet()) KnownZero.setSignBit(); - else if (KnownOne2.isNegative() && KnownOne3.isNegative()) + else if (KnownOne2.isSignBitSet() && KnownOne3.isSignBitSet()) KnownOne.setSignBit(); } // (sub nsw non-negative, negative) --> non-negative // (sub nsw negative, non-negative) --> negative else if (Opcode == Instruction::Sub && LL == I) { - if (KnownZero2.isNegative() && KnownOne3.isNegative()) + if (KnownZero2.isSignBitSet() && KnownOne3.isSignBitSet()) KnownZero.setSignBit(); - else if (KnownOne2.isNegative() && KnownZero3.isNegative()) + else if (KnownOne2.isSignBitSet() && KnownZero3.isSignBitSet()) KnownOne.setSignBit(); } // (mul nsw non-negative, non-negative) --> non-negative - else if (Opcode == Instruction::Mul && KnownZero2.isNegative() && - KnownZero3.isNegative()) + else if (Opcode == Instruction::Mul && KnownZero2.isSignBitSet() && + KnownZero3.isSignBitSet()) KnownZero.setSignBit(); } @@ -1614,8 +1616,8 @@ void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne, APInt ZeroBits(BitWidth, 0); APInt OneBits(BitWidth, 0); computeKnownBits(V, ZeroBits, OneBits, Depth, Q); - KnownOne = OneBits.isNegative(); - KnownZero = ZeroBits.isNegative(); + KnownOne = OneBits.isSignBitSet(); + KnownZero = ZeroBits.isSignBitSet(); } /// Return true if the given value is known to have exactly one @@ -1638,9 +1640,9 @@ bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, if (match(V, m_Shl(m_One(), m_Value()))) return true; - // (signbit) >>l X is clearly a power of two if the one is not shifted off the - // bottom. If it is shifted off the bottom then the result is undefined. - if (match(V, m_LShr(m_SignBit(), m_Value()))) + // (signmask) >>l X is clearly a power of two if the one is not shifted off + // the bottom. If it is shifted off the bottom then the result is undefined. + if (match(V, m_LShr(m_SignMask(), m_Value()))) return true; // The remaining tests are all recursive, so bail out if we hit the limit. @@ -2241,7 +2243,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, // If we are subtracting one from a positive number, there is no carry // out of the result. - if (KnownZero.isNegative()) + if (KnownZero.isSignBitSet()) return Tmp; } @@ -2265,7 +2267,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, // If the input is known to be positive (the sign bit is known clear), // the output of the NEG has the same number of sign bits as the input. - if (KnownZero.isNegative()) + if (KnownZero.isSignBitSet()) return Tmp2; // Otherwise, we treat this like a SUB. @@ -2322,10 +2324,10 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, // If we know that the sign bit is either zero or one, determine the number of // identical bits in the top of the input value. - if (KnownZero.isNegative()) + if (KnownZero.isSignBitSet()) return std::max(FirstAnswer, KnownZero.countLeadingOnes()); - if (KnownOne.isNegative()) + if (KnownOne.isSignBitSet()) return std::max(FirstAnswer, KnownOne.countLeadingOnes()); // computeKnownBits gave us no extra information about the top bits. @@ -3556,14 +3558,14 @@ OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, // We know the multiply operation doesn't overflow if the maximum values for // each operand will not overflow after we multiply them together. bool MaxOverflow; - LHSMax.umul_ov(RHSMax, MaxOverflow); + (void)LHSMax.umul_ov(RHSMax, MaxOverflow); if (!MaxOverflow) return OverflowResult::NeverOverflows; // We know it always overflows if multiplying the smallest possible values for // the operands also results in overflow. bool MinOverflow; - LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow); + (void)LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow); if (MinOverflow) return OverflowResult::AlwaysOverflows; diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index 58ea9296afda4..c7076ed0dd81d 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -143,27 +143,24 @@ bool LLParser::ValidateEndOfModule() { FnAttrs.removeAttribute(Attribute::Alignment); } - AS = AS.addAttributes( - Context, AttributeList::FunctionIndex, - AttributeList::get(Context, AttributeList::FunctionIndex, FnAttrs)); + AS = AS.addAttributes(Context, AttributeList::FunctionIndex, + AttributeSet::get(Context, FnAttrs)); Fn->setAttributes(AS); } else if (CallInst *CI = dyn_cast<CallInst>(V)) { AttributeList AS = CI->getAttributes(); AttrBuilder FnAttrs(AS.getFnAttributes()); AS = AS.removeAttributes(Context, AttributeList::FunctionIndex); FnAttrs.merge(B); - AS = AS.addAttributes( - Context, AttributeList::FunctionIndex, - AttributeList::get(Context, AttributeList::FunctionIndex, FnAttrs)); + AS = AS.addAttributes(Context, AttributeList::FunctionIndex, + AttributeSet::get(Context, FnAttrs)); CI->setAttributes(AS); } else if (InvokeInst *II = dyn_cast<InvokeInst>(V)) { AttributeList AS = II->getAttributes(); AttrBuilder FnAttrs(AS.getFnAttributes()); AS = AS.removeAttributes(Context, AttributeList::FunctionIndex); FnAttrs.merge(B); - AS = AS.addAttributes( - Context, AttributeList::FunctionIndex, - AttributeList::get(Context, AttributeList::FunctionIndex, FnAttrs)); + AS = AS.addAttributes(Context, AttributeList::FunctionIndex, + AttributeSet::get(Context, FnAttrs)); II->setAttributes(AS); } else { llvm_unreachable("invalid object with forward attribute group reference"); diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp index 24ab7e9a950cc..6d727ce833469 100644 --- a/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/lib/Bitcode/Reader/BitcodeReader.cpp @@ -372,15 +372,27 @@ Expected<std::string> readTriple(BitstreamCursor &Stream) { class BitcodeReaderBase { protected: - BitcodeReaderBase(BitstreamCursor Stream) : Stream(std::move(Stream)) { + BitcodeReaderBase(BitstreamCursor Stream, StringRef Strtab) + : Stream(std::move(Stream)), Strtab(Strtab) { this->Stream.setBlockInfo(&BlockInfo); } BitstreamBlockInfo BlockInfo; BitstreamCursor Stream; + StringRef Strtab; + + /// In version 2 of the bitcode we store names of global values and comdats in + /// a string table rather than in the VST. + bool UseStrtab = false; Expected<unsigned> parseVersionRecord(ArrayRef<uint64_t> Record); + /// If this module uses a string table, pop the reference to the string table + /// and return the referenced string and the rest of the record. Otherwise + /// just return the record itself. + std::pair<StringRef, ArrayRef<uint64_t>> + readNameFromStrtab(ArrayRef<uint64_t> Record); + bool readBlockInfo(); // Contains an arbitrary and optional string identifying the bitcode producer @@ -402,11 +414,22 @@ BitcodeReaderBase::parseVersionRecord(ArrayRef<uint64_t> Record) { if (Record.size() < 1) return error("Invalid record"); unsigned ModuleVersion = Record[0]; - if (ModuleVersion > 1) + if (ModuleVersion > 2) return error("Invalid value"); + UseStrtab = ModuleVersion >= 2; return ModuleVersion; } +std::pair<StringRef, ArrayRef<uint64_t>> +BitcodeReaderBase::readNameFromStrtab(ArrayRef<uint64_t> Record) { + if (!UseStrtab) + return {"", Record}; + // Invalid reference. Let the caller complain about the record being empty. + if (Record[0] + Record[1] > Strtab.size()) + return {"", {}}; + return {StringRef(Strtab.data() + Record[0], Record[1]), Record.slice(2)}; +} + class BitcodeReader : public BitcodeReaderBase, public GVMaterializer { LLVMContext &Context; Module *TheModule = nullptr; @@ -492,8 +515,8 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer { std::vector<std::string> BundleTags; public: - BitcodeReader(BitstreamCursor Stream, StringRef ProducerIdentification, - LLVMContext &Context); + BitcodeReader(BitstreamCursor Stream, StringRef Strtab, + StringRef ProducerIdentification, LLVMContext &Context); Error materializeForwardReferencedFunctions(); @@ -628,7 +651,10 @@ private: Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record, unsigned NameIndex, Triple &TT); + void setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, Function *F, + ArrayRef<uint64_t> Record); Error parseValueSymbolTable(uint64_t Offset = 0); + Error parseGlobalValueSymbolTable(); Error parseConstants(); Error rememberAndSkipFunctionBodies(); Error rememberAndSkipFunctionBody(); @@ -681,12 +707,15 @@ class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase { std::string SourceFileName; public: - ModuleSummaryIndexBitcodeReader( - BitstreamCursor Stream, ModuleSummaryIndex &TheIndex); + ModuleSummaryIndexBitcodeReader(BitstreamCursor Stream, StringRef Strtab, + ModuleSummaryIndex &TheIndex); Error parseModule(StringRef ModulePath); private: + void setValueGUID(uint64_t ValueID, StringRef ValueName, + GlobalValue::LinkageTypes Linkage, + StringRef SourceFileName); Error parseValueSymbolTable( uint64_t Offset, DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap); @@ -716,10 +745,10 @@ std::error_code llvm::errorToErrorCodeAndEmitErrors(LLVMContext &Ctx, return std::error_code(); } -BitcodeReader::BitcodeReader(BitstreamCursor Stream, +BitcodeReader::BitcodeReader(BitstreamCursor Stream, StringRef Strtab, StringRef ProducerIdentification, LLVMContext &Context) - : BitcodeReaderBase(std::move(Stream)), Context(Context), + : BitcodeReaderBase(std::move(Stream), Strtab), Context(Context), ValueList(Context) { this->ProducerIdentification = ProducerIdentification; } @@ -1749,6 +1778,54 @@ static uint64_t jumpToValueSymbolTable(uint64_t Offset, return CurrentBit; } +void BitcodeReader::setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, + Function *F, + ArrayRef<uint64_t> Record) { + // Note that we subtract 1 here because the offset is relative to one word + // before the start of the identification or module block, which was + // historically always the start of the regular bitcode header. + uint64_t FuncWordOffset = Record[1] - 1; + uint64_t FuncBitOffset = FuncWordOffset * 32; + DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta; + // Set the LastFunctionBlockBit to point to the last function block. + // Later when parsing is resumed after function materialization, + // we can simply skip that last function block. + if (FuncBitOffset > LastFunctionBlockBit) + LastFunctionBlockBit = FuncBitOffset; +} + +/// Read a new-style GlobalValue symbol table. +Error BitcodeReader::parseGlobalValueSymbolTable() { + unsigned FuncBitcodeOffsetDelta = + Stream.getAbbrevIDWidth() + bitc::BlockIDWidth; + + if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) + return error("Invalid record"); + + SmallVector<uint64_t, 64> Record; + while (true) { + BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + + switch (Entry.Kind) { + case BitstreamEntry::SubBlock: + case BitstreamEntry::Error: + return error("Malformed block"); + case BitstreamEntry::EndBlock: + return Error::success(); + case BitstreamEntry::Record: + break; + } + + Record.clear(); + switch (Stream.readRecord(Entry.ID, Record)) { + case bitc::VST_CODE_FNENTRY: // [valueid, offset] + setDeferredFunctionInfo(FuncBitcodeOffsetDelta, + cast<Function>(ValueList[Record[0]]), Record); + break; + } + } +} + /// Parse the value symbol table at either the current parsing location or /// at the given bit offset if provided. Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) { @@ -1756,8 +1833,18 @@ Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) { // Pass in the Offset to distinguish between calling for the module-level // VST (where we want to jump to the VST offset) and the function-level // VST (where we don't). - if (Offset > 0) + if (Offset > 0) { CurrentBit = jumpToValueSymbolTable(Offset, Stream); + // If this module uses a string table, read this as a module-level VST. + if (UseStrtab) { + if (Error Err = parseGlobalValueSymbolTable()) + return Err; + Stream.JumpToBit(CurrentBit); + return Error::success(); + } + // Otherwise, the VST will be in a similar format to a function-level VST, + // and will contain symbol names. + } // Compute the delta between the bitcode indices in the VST (the word offset // to the word-aligned ENTER_SUBBLOCK for the function block, and that @@ -1818,23 +1905,10 @@ Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) { return Err; Value *V = ValOrErr.get(); - auto *F = dyn_cast<Function>(V); // Ignore function offsets emitted for aliases of functions in older // versions of LLVM. - if (!F) - break; - - // Note that we subtract 1 here because the offset is relative to one word - // before the start of the identification or module block, which was - // historically always the start of the regular bitcode header. - uint64_t FuncWordOffset = Record[1] - 1; - uint64_t FuncBitOffset = FuncWordOffset * 32; - DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta; - // Set the LastFunctionBlockBit to point to the last function block. - // Later when parsing is resumed after function materialization, - // we can simply skip that last function block. - if (FuncBitOffset > LastFunctionBlockBit) - LastFunctionBlockBit = FuncBitOffset; + if (auto *F = dyn_cast<Function>(V)) + setDeferredFunctionInfo(FuncBitcodeOffsetDelta, F, Record); break; } case bitc::VST_CODE_BBENTRY: { @@ -2557,6 +2631,7 @@ Error BitcodeReader::globalCleanup() { // Look for intrinsic functions which need to be upgraded at some point for (Function &F : *TheModule) { + MDLoader->upgradeDebugIntrinsics(F); Function *NewFn; if (UpgradeIntrinsicFunction(&F, NewFn)) UpgradedIntrinsics[&F] = NewFn; @@ -2626,15 +2701,24 @@ bool BitcodeReaderBase::readBlockInfo() { } Error BitcodeReader::parseComdatRecord(ArrayRef<uint64_t> Record) { - // [selection_kind, name] - if (Record.size() < 2) + // v1: [selection_kind, name] + // v2: [strtab_offset, strtab_size, selection_kind] + StringRef Name; + std::tie(Name, Record) = readNameFromStrtab(Record); + + if (Record.size() < 1) return error("Invalid record"); Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]); - std::string Name; - unsigned ComdatNameSize = Record[1]; - Name.reserve(ComdatNameSize); - for (unsigned i = 0; i != ComdatNameSize; ++i) - Name += (char)Record[2 + i]; + std::string OldFormatName; + if (!UseStrtab) { + if (Record.size() < 2) + return error("Invalid record"); + unsigned ComdatNameSize = Record[1]; + OldFormatName.reserve(ComdatNameSize); + for (unsigned i = 0; i != ComdatNameSize; ++i) + OldFormatName += (char)Record[2 + i]; + Name = OldFormatName; + } Comdat *C = TheModule->getOrInsertComdat(Name); C->setSelectionKind(SK); ComdatList.push_back(C); @@ -2642,9 +2726,13 @@ Error BitcodeReader::parseComdatRecord(ArrayRef<uint64_t> Record) { } Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) { - // [pointer type, isconst, initid, linkage, alignment, section, + // v1: [pointer type, isconst, initid, linkage, alignment, section, // visibility, threadlocal, unnamed_addr, externally_initialized, - // dllstorageclass, comdat] + // dllstorageclass, comdat] (name in VST) + // v2: [strtab_offset, strtab_size, v1] + StringRef Name; + std::tie(Name, Record) = readNameFromStrtab(Record); + if (Record.size() < 6) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); @@ -2692,7 +2780,7 @@ Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) { ExternallyInitialized = Record[9]; GlobalVariable *NewGV = - new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, "", + new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, Name, nullptr, TLM, AddressSpace, ExternallyInitialized); NewGV->setAlignment(Alignment); if (!Section.empty()) @@ -2724,9 +2812,13 @@ Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) { } Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) { - // [type, callingconv, isproto, linkage, paramattr, alignment, section, + // v1: [type, callingconv, isproto, linkage, paramattr, alignment, section, // visibility, gc, unnamed_addr, prologuedata, dllstorageclass, comdat, - // prefixdata] + // prefixdata] (name in VST) + // v2: [strtab_offset, strtab_size, v1] + StringRef Name; + std::tie(Name, Record) = readNameFromStrtab(Record); + if (Record.size() < 8) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); @@ -2742,7 +2834,7 @@ Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) { return error("Invalid calling convention ID"); Function *Func = - Function::Create(FTy, GlobalValue::ExternalLinkage, "", TheModule); + Function::Create(FTy, GlobalValue::ExternalLinkage, Name, TheModule); Func->setCallingConv(CC); bool isProto = Record[2]; @@ -2810,11 +2902,15 @@ Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) { Error BitcodeReader::parseGlobalIndirectSymbolRecord( unsigned BitCode, ArrayRef<uint64_t> Record) { - // ALIAS_OLD: [alias type, aliasee val#, linkage] - // ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility, - // dllstorageclass] - // IFUNC: [alias type, addrspace, aliasee val#, linkage, - // visibility, dllstorageclass] + // v1 ALIAS_OLD: [alias type, aliasee val#, linkage] (name in VST) + // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility, + // dllstorageclass] (name in VST) + // v1 IFUNC: [alias type, addrspace, aliasee val#, linkage, + // visibility, dllstorageclass] (name in VST) + // v2: [strtab_offset, strtab_size, v1] + StringRef Name; + std::tie(Name, Record) = readNameFromStrtab(Record); + bool NewRecord = BitCode != bitc::MODULE_CODE_ALIAS_OLD; if (Record.size() < (3 + (unsigned)NewRecord)) return error("Invalid record"); @@ -2839,10 +2935,10 @@ Error BitcodeReader::parseGlobalIndirectSymbolRecord( GlobalIndirectSymbol *NewGA; if (BitCode == bitc::MODULE_CODE_ALIAS || BitCode == bitc::MODULE_CODE_ALIAS_OLD) - NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage), "", + NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name, TheModule); else - NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage), "", + NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name, nullptr, TheModule); // Old bitcode files didn't have visibility field. // Local linkage must have default visibility. @@ -4570,8 +4666,8 @@ std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const { } ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader( - BitstreamCursor Cursor, ModuleSummaryIndex &TheIndex) - : BitcodeReaderBase(std::move(Cursor)), TheIndex(TheIndex) {} + BitstreamCursor Cursor, StringRef Strtab, ModuleSummaryIndex &TheIndex) + : BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex) {} std::pair<GlobalValue::GUID, GlobalValue::GUID> ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) { @@ -4580,12 +4676,32 @@ ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) { return VGI->second; } +void ModuleSummaryIndexBitcodeReader::setValueGUID( + uint64_t ValueID, StringRef ValueName, GlobalValue::LinkageTypes Linkage, + StringRef SourceFileName) { + std::string GlobalId = + GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName); + auto ValueGUID = GlobalValue::getGUID(GlobalId); + auto OriginalNameID = ValueGUID; + if (GlobalValue::isLocalLinkage(Linkage)) + OriginalNameID = GlobalValue::getGUID(ValueName); + if (PrintSummaryGUIDs) + dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is " + << ValueName << "\n"; + ValueIdToCallGraphGUIDMap[ValueID] = + std::make_pair(ValueGUID, OriginalNameID); +} + // Specialized value symbol table parser used when reading module index // blocks where we don't actually create global values. The parsed information // is saved in the bitcode reader for use when later parsing summaries. Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable( uint64_t Offset, DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap) { + // With a strtab the VST is not required to parse the summary. + if (UseStrtab) + return Error::success(); + assert(Offset > 0 && "Expected non-zero VST offset"); uint64_t CurrentBit = jumpToValueSymbolTable(Offset, Stream); @@ -4627,17 +4743,7 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable( assert(VLI != ValueIdToLinkageMap.end() && "No linkage found for VST entry?"); auto Linkage = VLI->second; - std::string GlobalId = - GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName); - auto ValueGUID = GlobalValue::getGUID(GlobalId); - auto OriginalNameID = ValueGUID; - if (GlobalValue::isLocalLinkage(Linkage)) - OriginalNameID = GlobalValue::getGUID(ValueName); - if (PrintSummaryGUIDs) - dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is " - << ValueName << "\n"; - ValueIdToCallGraphGUIDMap[ValueID] = - std::make_pair(ValueGUID, OriginalNameID); + setValueGUID(ValueID, ValueName, Linkage, SourceFileName); ValueName.clear(); break; } @@ -4651,18 +4757,7 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable( assert(VLI != ValueIdToLinkageMap.end() && "No linkage found for VST entry?"); auto Linkage = VLI->second; - std::string FunctionGlobalId = GlobalValue::getGlobalIdentifier( - ValueName, VLI->second, SourceFileName); - auto FunctionGUID = GlobalValue::getGUID(FunctionGlobalId); - auto OriginalNameID = FunctionGUID; - if (GlobalValue::isLocalLinkage(Linkage)) - OriginalNameID = GlobalValue::getGUID(ValueName); - if (PrintSummaryGUIDs) - dbgs() << "GUID " << FunctionGUID << "(" << OriginalNameID << ") is " - << ValueName << "\n"; - ValueIdToCallGraphGUIDMap[ValueID] = - std::make_pair(FunctionGUID, OriginalNameID); - + setValueGUID(ValueID, ValueName, Linkage, SourceFileName); ValueName.clear(); break; } @@ -4749,6 +4844,11 @@ Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) { switch (BitCode) { default: break; // Default behavior, ignore unknown content. + case bitc::MODULE_CODE_VERSION: { + if (Error Err = parseVersionRecord(Record).takeError()) + return Err; + break; + } /// MODULE_CODE_SOURCE_FILENAME: [namechar x N] case bitc::MODULE_CODE_SOURCE_FILENAME: { SmallString<128> ValueName; @@ -4783,17 +4883,26 @@ Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) { // was historically always the start of the regular bitcode header. VSTOffset = Record[0] - 1; break; - // GLOBALVAR: [pointer type, isconst, initid, linkage, ...] - // FUNCTION: [type, callingconv, isproto, linkage, ...] - // ALIAS: [alias type, addrspace, aliasee val#, linkage, ...] + // v1 GLOBALVAR: [pointer type, isconst, initid, linkage, ...] + // v1 FUNCTION: [type, callingconv, isproto, linkage, ...] + // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, ...] + // v2: [strtab offset, strtab size, v1] case bitc::MODULE_CODE_GLOBALVAR: case bitc::MODULE_CODE_FUNCTION: case bitc::MODULE_CODE_ALIAS: { - if (Record.size() <= 3) + StringRef Name; + ArrayRef<uint64_t> GVRecord; + std::tie(Name, GVRecord) = readNameFromStrtab(Record); + if (GVRecord.size() <= 3) return error("Invalid record"); - uint64_t RawLinkage = Record[3]; + uint64_t RawLinkage = GVRecord[3]; GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage); - ValueIdToLinkageMap[ValueId++] = Linkage; + if (!UseStrtab) { + ValueIdToLinkageMap[ValueId++] = Linkage; + break; + } + + setValueGUID(ValueId++, Name, Linkage, SourceFileName); break; } } @@ -4904,6 +5013,12 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary( switch (BitCode) { default: // Default behavior: ignore. break; + case bitc::FS_VALUE_GUID: { // [valueid, refguid] + uint64_t ValueID = Record[0]; + GlobalValue::GUID RefGUID = Record[1]; + ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID); + break; + } // FS_PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid, // n x (valueid)] // FS_PERMODULE_PROFILE: [valueid, flags, instcount, numrefs, @@ -5208,6 +5323,35 @@ const std::error_category &llvm::BitcodeErrorCategory() { return *ErrorCategory; } +static Expected<StringRef> readStrtab(BitstreamCursor &Stream) { + if (Stream.EnterSubBlock(bitc::STRTAB_BLOCK_ID)) + return error("Invalid record"); + + StringRef Strtab; + while (1) { + BitstreamEntry Entry = Stream.advance(); + switch (Entry.Kind) { + case BitstreamEntry::EndBlock: + return Strtab; + + case BitstreamEntry::Error: + return error("Malformed block"); + + case BitstreamEntry::SubBlock: + if (Stream.SkipBlock()) + return error("Malformed block"); + break; + + case BitstreamEntry::Record: + StringRef Blob; + SmallVector<uint64_t, 1> Record; + if (Stream.readRecord(Entry.ID, Record, &Blob) == bitc::STRTAB_BLOB) + Strtab = Blob; + break; + } + } +} + //===----------------------------------------------------------------------===// // External interface //===----------------------------------------------------------------------===// @@ -5260,6 +5404,22 @@ llvm::getBitcodeModuleList(MemoryBufferRef Buffer) { continue; } + if (Entry.ID == bitc::STRTAB_BLOCK_ID) { + Expected<StringRef> Strtab = readStrtab(Stream); + if (!Strtab) + return Strtab.takeError(); + // This string table is used by every preceding bitcode module that does + // not have its own string table. A bitcode file may have multiple + // string tables if it was created by binary concatenation, for example + // with "llvm-cat -b". + for (auto I = Modules.rbegin(), E = Modules.rend(); I != E; ++I) { + if (!I->Strtab.empty()) + break; + I->Strtab = *Strtab; + } + continue; + } + if (Stream.SkipBlock()) return error("Malformed block"); continue; @@ -5296,8 +5456,8 @@ BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll, } Stream.JumpToBit(ModuleBit); - auto *R = - new BitcodeReader(std::move(Stream), ProducerIdentification, Context); + auto *R = new BitcodeReader(std::move(Stream), Strtab, ProducerIdentification, + Context); std::unique_ptr<Module> M = llvm::make_unique<Module>(ModuleIdentifier, Context); @@ -5332,7 +5492,7 @@ Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() { Stream.JumpToBit(ModuleBit); auto Index = llvm::make_unique<ModuleSummaryIndex>(); - ModuleSummaryIndexBitcodeReader R(std::move(Stream), *Index); + ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, *Index); if (Error Err = R.parseModule(ModuleIdentifier)) return std::move(Err); diff --git a/lib/Bitcode/Reader/MetadataLoader.cpp b/lib/Bitcode/Reader/MetadataLoader.cpp index 274dfe89cce54..d089684a052f5 100644 --- a/lib/Bitcode/Reader/MetadataLoader.cpp +++ b/lib/Bitcode/Reader/MetadataLoader.cpp @@ -54,6 +54,7 @@ #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/ModuleSummaryIndex.h" @@ -452,6 +453,7 @@ class MetadataLoader::MetadataLoaderImpl { bool StripTBAA = false; bool HasSeenOldLoopTags = false; bool NeedUpgradeToDIGlobalVariableExpression = false; + bool NeedDeclareExpressionUpgrade = false; /// True if metadata is being parsed for a module being ThinLTO imported. bool IsImporting = false; @@ -511,6 +513,26 @@ class MetadataLoader::MetadataLoaderImpl { } } + /// Remove a leading DW_OP_deref from DIExpressions in a dbg.declare that + /// describes a function argument. + void upgradeDeclareExpressions(Function &F) { + if (!NeedDeclareExpressionUpgrade) + return; + + for (auto &BB : F) + for (auto &I : BB) + if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) + if (auto *DIExpr = DDI->getExpression()) + if (DIExpr->startsWithDeref() && + dyn_cast_or_null<Argument>(DDI->getAddress())) { + SmallVector<uint64_t, 8> Ops; + Ops.append(std::next(DIExpr->elements_begin()), + DIExpr->elements_end()); + auto *E = DIExpression::get(Context, Ops); + DDI->setOperand(2, MetadataAsValue::get(Context, E)); + } + } + void upgradeDebugInfo() { upgradeCUSubprograms(); upgradeCUVariables(); @@ -565,6 +587,7 @@ public: unsigned size() const { return MetadataList.size(); } void shrinkTo(unsigned N) { MetadataList.shrinkTo(N); } + void upgradeDebugIntrinsics(Function &F) { upgradeDeclareExpressions(F); } }; static Error error(const Twine &Message) { @@ -1520,12 +1543,32 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata( return error("Invalid record"); IsDistinct = Record[0] & 1; - bool HasOpFragment = Record[0] & 2; + uint64_t Version = Record[0] >> 1; auto Elts = MutableArrayRef<uint64_t>(Record).slice(1); - if (!HasOpFragment) - if (unsigned N = Elts.size()) - if (N >= 3 && Elts[N - 3] == dwarf::DW_OP_bit_piece) - Elts[N - 3] = dwarf::DW_OP_LLVM_fragment; + unsigned N = Elts.size(); + // Perform various upgrades. + switch (Version) { + case 0: + if (N >= 3 && Elts[N - 3] == dwarf::DW_OP_bit_piece) + Elts[N - 3] = dwarf::DW_OP_LLVM_fragment; + LLVM_FALLTHROUGH; + case 1: + // Move DW_OP_deref to the end. + if (N && Elts[0] == dwarf::DW_OP_deref) { + auto End = Elts.end(); + if (Elts.size() >= 3 && *std::prev(End, 3) == dwarf::DW_OP_LLVM_fragment) + End = std::prev(End, 3); + std::move(std::next(Elts.begin()), End, Elts.begin()); + *std::prev(End) = dwarf::DW_OP_deref; + } + NeedDeclareExpressionUpgrade = true; + LLVM_FALLTHROUGH; + case 2: + // Up-to-date! + break; + default: + return error("Invalid record"); + } MetadataList.assignValue( GET_OR_DISTINCT(DIExpression, (Context, makeArrayRef(Record).slice(1))), @@ -1858,3 +1901,7 @@ bool MetadataLoader::isStrippingTBAA() { return Pimpl->isStrippingTBAA(); } unsigned MetadataLoader::size() const { return Pimpl->size(); } void MetadataLoader::shrinkTo(unsigned N) { return Pimpl->shrinkTo(N); } + +void MetadataLoader::upgradeDebugIntrinsics(Function &F) { + return Pimpl->upgradeDebugIntrinsics(F); +} diff --git a/lib/Bitcode/Reader/MetadataLoader.h b/lib/Bitcode/Reader/MetadataLoader.h index 442dfc94e4e19..f23dcc06cc949 100644 --- a/lib/Bitcode/Reader/MetadataLoader.h +++ b/lib/Bitcode/Reader/MetadataLoader.h @@ -79,6 +79,9 @@ public: unsigned size() const; void shrinkTo(unsigned N); + + /// Perform bitcode upgrades on llvm.dbg.* calls. + void upgradeDebugIntrinsics(Function &F); }; } diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index 043441bac4dea..1d3cde2f5ddb0 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -28,6 +28,7 @@ #include "llvm/IR/Operator.h" #include "llvm/IR/UseListOrder.h" #include "llvm/IR/ValueSymbolTable.h" +#include "llvm/MC/StringTableBuilder.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/Program.h" @@ -76,26 +77,28 @@ protected: /// The stream created and owned by the client. BitstreamWriter &Stream; - /// Saves the offset of the VSTOffset record that must eventually be - /// backpatched with the offset of the actual VST. - uint64_t VSTOffsetPlaceholder = 0; - public: /// Constructs a BitcodeWriterBase object that writes to the provided /// \p Stream. BitcodeWriterBase(BitstreamWriter &Stream) : Stream(Stream) {} protected: - bool hasVSTOffsetPlaceholder() { return VSTOffsetPlaceholder != 0; } - void writeValueSymbolTableForwardDecl(); void writeBitcodeHeader(); + void writeModuleVersion(); }; +void BitcodeWriterBase::writeModuleVersion() { + // VERSION: [version#] + Stream.EmitRecord(bitc::MODULE_CODE_VERSION, ArrayRef<uint64_t>{2}); +} + /// Class to manage the bitcode writing for a module. class ModuleBitcodeWriter : public BitcodeWriterBase { /// Pointer to the buffer allocated by caller for bitcode writing. const SmallVectorImpl<char> &Buffer; + StringTableBuilder &StrtabBuilder; + /// The Module to write to bitcode. const Module &M; @@ -127,15 +130,20 @@ class ModuleBitcodeWriter : public BitcodeWriterBase { /// Tracks the last value id recorded in the GUIDToValueMap. unsigned GlobalValueId; + /// Saves the offset of the VSTOffset record that must eventually be + /// backpatched with the offset of the actual VST. + uint64_t VSTOffsetPlaceholder = 0; + public: /// Constructs a ModuleBitcodeWriter object for the given Module, /// writing to the provided \p Buffer. ModuleBitcodeWriter(const Module *M, SmallVectorImpl<char> &Buffer, + StringTableBuilder &StrtabBuilder, BitstreamWriter &Stream, bool ShouldPreserveUseListOrder, const ModuleSummaryIndex *Index, bool GenerateHash, ModuleHash *ModHash = nullptr) - : BitcodeWriterBase(Stream), Buffer(Buffer), M(*M), - VE(*M, ShouldPreserveUseListOrder), Index(Index), + : BitcodeWriterBase(Stream), Buffer(Buffer), StrtabBuilder(StrtabBuilder), + M(*M), VE(*M, ShouldPreserveUseListOrder), Index(Index), GenerateHash(GenerateHash), ModHash(ModHash), BitcodeStartBit(Stream.GetCurrentBitNo()) { // Assign ValueIds to any callee values in the index that came from @@ -169,6 +177,7 @@ private: void writeAttributeTable(); void writeTypeTable(); void writeComdats(); + void writeValueSymbolTableForwardDecl(); void writeModuleInfo(); void writeValueAsMetadata(const ValueAsMetadata *MD, SmallVectorImpl<uint64_t> &Record); @@ -261,9 +270,9 @@ private: SmallVectorImpl<uint64_t> &Vals); void writeInstruction(const Instruction &I, unsigned InstID, SmallVectorImpl<unsigned> &Vals); - void writeValueSymbolTable( - const ValueSymbolTable &VST, bool IsModuleLevel = false, - DenseMap<const Function *, uint64_t> *FunctionToBitcodeIndex = nullptr); + void writeFunctionLevelValueSymbolTable(const ValueSymbolTable &VST); + void writeGlobalValueSymbolTable( + DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex); void writeUseList(UseListOrder &&Order); void writeUseListBlock(const Function *F); void @@ -477,7 +486,6 @@ public: private: void writeModStrings(); - void writeCombinedValueSymbolTable(); void writeCombinedGlobalValueSummary(); /// Indicates whether the provided \p ModulePath should be written into @@ -492,15 +500,15 @@ private: const auto &VMI = GUIDToValueIdMap.find(ValGUID); return VMI != GUIDToValueIdMap.end(); } + void assignValueId(GlobalValue::GUID ValGUID) { + unsigned &ValueId = GUIDToValueIdMap[ValGUID]; + if (ValueId == 0) + ValueId = ++GlobalValueId; + } unsigned getValueId(GlobalValue::GUID ValGUID) { - const auto &VMI = GUIDToValueIdMap.find(ValGUID); - // If this GUID doesn't have an entry, assign one. - if (VMI == GUIDToValueIdMap.end()) { - GUIDToValueIdMap[ValGUID] = ++GlobalValueId; - return GlobalValueId; - } else { - return VMI->second; - } + auto VMI = GUIDToValueIdMap.find(ValGUID); + assert(VMI != GUIDToValueIdMap.end()); + return VMI->second; } std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; } }; @@ -1047,13 +1055,10 @@ static unsigned getEncodedUnnamedAddr(const GlobalValue &GV) { void ModuleBitcodeWriter::writeComdats() { SmallVector<unsigned, 64> Vals; for (const Comdat *C : VE.getComdats()) { - // COMDAT: [selection_kind, name] + // COMDAT: [strtab offset, strtab size, selection_kind] + Vals.push_back(StrtabBuilder.add(C->getName())); + Vals.push_back(C->getName().size()); Vals.push_back(getEncodedComdatSelectionKind(*C)); - size_t Size = C->getName().size(); - assert(isUInt<32>(Size)); - Vals.push_back(Size); - for (char Chr : C->getName()) - Vals.push_back((unsigned char)Chr); Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0); Vals.clear(); } @@ -1062,7 +1067,7 @@ void ModuleBitcodeWriter::writeComdats() { /// Write a record that will eventually hold the word offset of the /// module-level VST. For now the offset is 0, which will be backpatched /// after the real VST is written. Saves the bit offset to backpatch. -void BitcodeWriterBase::writeValueSymbolTableForwardDecl() { +void ModuleBitcodeWriter::writeValueSymbolTableForwardDecl() { // Write a placeholder value in for the offset of the real VST, // which is written after the function blocks so that it can include // the offset of each function. The placeholder offset will be @@ -1165,6 +1170,8 @@ void ModuleBitcodeWriter::writeModuleInfo() { // Add an abbrev for common globals with no visibility or thread localness. auto Abbv = std::make_shared<BitCodeAbbrev>(); Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(MaxGlobalType+1))); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddrSpace << 2 @@ -1188,15 +1195,42 @@ void ModuleBitcodeWriter::writeModuleInfo() { SimpleGVarAbbrev = Stream.EmitAbbrev(std::move(Abbv)); } - // Emit the global variable information. SmallVector<unsigned, 64> Vals; + // Emit the module's source file name. + { + StringEncoding Bits = getStringEncoding(M.getSourceFileName().data(), + M.getSourceFileName().size()); + BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8); + if (Bits == SE_Char6) + AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6); + else if (Bits == SE_Fixed7) + AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7); + + // MODULE_CODE_SOURCE_FILENAME: [namechar x N] + auto Abbv = std::make_shared<BitCodeAbbrev>(); + Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(AbbrevOpToUse); + unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + for (const auto P : M.getSourceFileName()) + Vals.push_back((unsigned char)P); + + // Emit the finished record. + Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev); + Vals.clear(); + } + + // Emit the global variable information. for (const GlobalVariable &GV : M.globals()) { unsigned AbbrevToUse = 0; - // GLOBALVAR: [type, isconst, initid, + // GLOBALVAR: [strtab offset, strtab size, type, isconst, initid, // linkage, alignment, section, visibility, threadlocal, // unnamed_addr, externally_initialized, dllstorageclass, // comdat] + Vals.push_back(StrtabBuilder.add(GV.getName())); + Vals.push_back(GV.getName().size()); Vals.push_back(VE.getTypeID(GV.getValueType())); Vals.push_back(GV.getType()->getAddressSpace() << 2 | 2 | GV.isConstant()); Vals.push_back(GV.isDeclaration() ? 0 : @@ -1226,9 +1260,12 @@ void ModuleBitcodeWriter::writeModuleInfo() { // Emit the function proto information. for (const Function &F : M) { - // FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment, - // section, visibility, gc, unnamed_addr, prologuedata, - // dllstorageclass, comdat, prefixdata, personalityfn] + // FUNCTION: [strtab offset, strtab size, type, callingconv, isproto, + // linkage, paramattrs, alignment, section, visibility, gc, + // unnamed_addr, prologuedata, dllstorageclass, comdat, + // prefixdata, personalityfn] + Vals.push_back(StrtabBuilder.add(F.getName())); + Vals.push_back(F.getName().size()); Vals.push_back(VE.getTypeID(F.getFunctionType())); Vals.push_back(F.getCallingConv()); Vals.push_back(F.isDeclaration()); @@ -1255,8 +1292,10 @@ void ModuleBitcodeWriter::writeModuleInfo() { // Emit the alias information. for (const GlobalAlias &A : M.aliases()) { - // ALIAS: [alias type, aliasee val#, linkage, visibility, dllstorageclass, - // threadlocal, unnamed_addr] + // ALIAS: [strtab offset, strtab size, alias type, aliasee val#, linkage, + // visibility, dllstorageclass, threadlocal, unnamed_addr] + Vals.push_back(StrtabBuilder.add(A.getName())); + Vals.push_back(A.getName().size()); Vals.push_back(VE.getTypeID(A.getValueType())); Vals.push_back(A.getType()->getAddressSpace()); Vals.push_back(VE.getValueID(A.getAliasee())); @@ -1272,7 +1311,10 @@ void ModuleBitcodeWriter::writeModuleInfo() { // Emit the ifunc information. for (const GlobalIFunc &I : M.ifuncs()) { - // IFUNC: [ifunc type, address space, resolver val#, linkage, visibility] + // IFUNC: [strtab offset, strtab size, ifunc type, address space, resolver + // val#, linkage, visibility] + Vals.push_back(StrtabBuilder.add(I.getName())); + Vals.push_back(I.getName().size()); Vals.push_back(VE.getTypeID(I.getValueType())); Vals.push_back(I.getType()->getAddressSpace()); Vals.push_back(VE.getValueID(I.getResolver())); @@ -1282,34 +1324,6 @@ void ModuleBitcodeWriter::writeModuleInfo() { Vals.clear(); } - // Emit the module's source file name. - { - StringEncoding Bits = getStringEncoding(M.getSourceFileName().data(), - M.getSourceFileName().size()); - BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8); - if (Bits == SE_Char6) - AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6); - else if (Bits == SE_Fixed7) - AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7); - - // MODULE_CODE_SOURCE_FILENAME: [namechar x N] - auto Abbv = std::make_shared<BitCodeAbbrev>(); - Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); - Abbv->Add(AbbrevOpToUse); - unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv)); - - for (const auto P : M.getSourceFileName()) - Vals.push_back((unsigned char)P); - - // Emit the finished record. - Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev); - Vals.clear(); - } - - // If we have a VST, write the VSTOFFSET record placeholder. - if (M.getValueSymbolTable().empty()) - return; writeValueSymbolTableForwardDecl(); } @@ -1757,9 +1771,8 @@ void ModuleBitcodeWriter::writeDIExpression(const DIExpression *N, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.reserve(N->getElements().size() + 1); - - const uint64_t HasOpFragmentFlag = 1 << 1; - Record.push_back((uint64_t)N->isDistinct() | HasOpFragmentFlag); + const uint64_t Version = 2 << 1; + Record.push_back((uint64_t)N->isDistinct() | Version); Record.append(N->elements_begin(), N->elements_end()); Stream.EmitRecord(bitc::METADATA_EXPRESSION, Record, Abbrev); @@ -2839,77 +2852,59 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I, Vals.clear(); } -/// Emit names for globals/functions etc. \p IsModuleLevel is true when -/// we are writing the module-level VST, where we are including a function -/// bitcode index and need to backpatch the VST forward declaration record. -void ModuleBitcodeWriter::writeValueSymbolTable( - const ValueSymbolTable &VST, bool IsModuleLevel, - DenseMap<const Function *, uint64_t> *FunctionToBitcodeIndex) { - if (VST.empty()) { - // writeValueSymbolTableForwardDecl should have returned early as - // well. Ensure this handling remains in sync by asserting that - // the placeholder offset is not set. - assert(!IsModuleLevel || !hasVSTOffsetPlaceholder()); - return; - } +/// Write a GlobalValue VST to the module. The purpose of this data structure is +/// to allow clients to efficiently find the function body. +void ModuleBitcodeWriter::writeGlobalValueSymbolTable( + DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex) { + // Get the offset of the VST we are writing, and backpatch it into + // the VST forward declaration record. + uint64_t VSTOffset = Stream.GetCurrentBitNo(); + // The BitcodeStartBit was the stream offset of the identification block. + VSTOffset -= bitcodeStartBit(); + assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned"); + // Note that we add 1 here because the offset is relative to one word + // before the start of the identification block, which was historically + // always the start of the regular bitcode header. + Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32 + 1); + + Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); + + auto Abbv = std::make_shared<BitCodeAbbrev>(); + Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset + unsigned FnEntryAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + for (const Function &F : M) { + uint64_t Record[2]; - if (IsModuleLevel && hasVSTOffsetPlaceholder()) { - // Get the offset of the VST we are writing, and backpatch it into - // the VST forward declaration record. - uint64_t VSTOffset = Stream.GetCurrentBitNo(); - // The BitcodeStartBit was the stream offset of the identification block. - VSTOffset -= bitcodeStartBit(); - assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned"); + if (F.isDeclaration()) + continue; + + Record[0] = VE.getValueID(&F); + + // Save the word offset of the function (from the start of the + // actual bitcode written to the stream). + uint64_t BitcodeIndex = FunctionToBitcodeIndex[&F] - bitcodeStartBit(); + assert((BitcodeIndex & 31) == 0 && "function block not 32-bit aligned"); // Note that we add 1 here because the offset is relative to one word // before the start of the identification block, which was historically // always the start of the regular bitcode header. - Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32 + 1); - } - - Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); + Record[1] = BitcodeIndex / 32 + 1; - // For the module-level VST, add abbrev Ids for the VST_CODE_FNENTRY - // records, which are not used in the per-function VSTs. - unsigned FnEntry8BitAbbrev; - unsigned FnEntry7BitAbbrev; - unsigned FnEntry6BitAbbrev; - unsigned GUIDEntryAbbrev; - if (IsModuleLevel && hasVSTOffsetPlaceholder()) { - // 8-bit fixed-width VST_CODE_FNENTRY function strings. - auto Abbv = std::make_shared<BitCodeAbbrev>(); - Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); - FnEntry8BitAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + Stream.EmitRecord(bitc::VST_CODE_FNENTRY, Record, FnEntryAbbrev); + } - // 7-bit fixed width VST_CODE_FNENTRY function strings. - Abbv = std::make_shared<BitCodeAbbrev>(); - Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); - FnEntry7BitAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + Stream.ExitBlock(); +} - // 6-bit char6 VST_CODE_FNENTRY function strings. - Abbv = std::make_shared<BitCodeAbbrev>(); - Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); - FnEntry6BitAbbrev = Stream.EmitAbbrev(std::move(Abbv)); +/// Emit names for arguments, instructions and basic blocks in a function. +void ModuleBitcodeWriter::writeFunctionLevelValueSymbolTable( + const ValueSymbolTable &VST) { + if (VST.empty()) + return; - // FIXME: Change the name of this record as it is now used by - // the per-module index as well. - Abbv = std::make_shared<BitCodeAbbrev>(); - Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_COMBINED_ENTRY)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // refguid - GUIDEntryAbbrev = Stream.EmitAbbrev(std::move(Abbv)); - } + Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); // FIXME: Set up the abbrev, we know how many values there are! // FIXME: We know if the type names can use 7-bit ascii. @@ -2923,38 +2918,13 @@ void ModuleBitcodeWriter::writeValueSymbolTable( unsigned AbbrevToUse = VST_ENTRY_8_ABBREV; NameVals.push_back(VE.getValueID(Name.getValue())); - Function *F = dyn_cast<Function>(Name.getValue()); - // VST_CODE_ENTRY: [valueid, namechar x N] - // VST_CODE_FNENTRY: [valueid, funcoffset, namechar x N] // VST_CODE_BBENTRY: [bbid, namechar x N] unsigned Code; if (isa<BasicBlock>(Name.getValue())) { Code = bitc::VST_CODE_BBENTRY; if (Bits == SE_Char6) AbbrevToUse = VST_BBENTRY_6_ABBREV; - } else if (F && !F->isDeclaration()) { - // Must be the module-level VST, where we pass in the Index and - // have a VSTOffsetPlaceholder. The function-level VST should not - // contain any Function symbols. - assert(FunctionToBitcodeIndex); - assert(hasVSTOffsetPlaceholder()); - - // Save the word offset of the function (from the start of the - // actual bitcode written to the stream). - uint64_t BitcodeIndex = (*FunctionToBitcodeIndex)[F] - bitcodeStartBit(); - assert((BitcodeIndex & 31) == 0 && "function block not 32-bit aligned"); - // Note that we add 1 here because the offset is relative to one word - // before the start of the identification block, which was historically - // always the start of the regular bitcode header. - NameVals.push_back(BitcodeIndex / 32 + 1); - - Code = bitc::VST_CODE_FNENTRY; - AbbrevToUse = FnEntry8BitAbbrev; - if (Bits == SE_Char6) - AbbrevToUse = FnEntry6BitAbbrev; - else if (Bits == SE_Fixed7) - AbbrevToUse = FnEntry7BitAbbrev; } else { Code = bitc::VST_CODE_ENTRY; if (Bits == SE_Char6) @@ -2970,47 +2940,7 @@ void ModuleBitcodeWriter::writeValueSymbolTable( Stream.EmitRecord(Code, NameVals, AbbrevToUse); NameVals.clear(); } - // Emit any GUID valueIDs created for indirect call edges into the - // module-level VST. - if (IsModuleLevel && hasVSTOffsetPlaceholder()) - for (const auto &GI : valueIds()) { - NameVals.push_back(GI.second); - NameVals.push_back(GI.first); - Stream.EmitRecord(bitc::VST_CODE_COMBINED_ENTRY, NameVals, - GUIDEntryAbbrev); - NameVals.clear(); - } - Stream.ExitBlock(); -} - -/// Emit function names and summary offsets for the combined index -/// used by ThinLTO. -void IndexBitcodeWriter::writeCombinedValueSymbolTable() { - assert(hasVSTOffsetPlaceholder() && "Expected non-zero VSTOffsetPlaceholder"); - // Get the offset of the VST we are writing, and backpatch it into - // the VST forward declaration record. - uint64_t VSTOffset = Stream.GetCurrentBitNo(); - assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned"); - Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32); - - Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); - - auto Abbv = std::make_shared<BitCodeAbbrev>(); - Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_COMBINED_ENTRY)); - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid - Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // refguid - unsigned EntryAbbrev = Stream.EmitAbbrev(std::move(Abbv)); - SmallVector<uint64_t, 64> NameVals; - for (const auto &GVI : valueIds()) { - // VST_CODE_COMBINED_ENTRY: [valueid, refguid] - NameVals.push_back(GVI.second); - NameVals.push_back(GVI.first); - - // Emit the finished record. - Stream.EmitRecord(bitc::VST_CODE_COMBINED_ENTRY, NameVals, EntryAbbrev); - NameVals.clear(); - } Stream.ExitBlock(); } @@ -3114,7 +3044,7 @@ void ModuleBitcodeWriter::writeFunction( // Emit names for all the instructions etc. if (auto *Symtab = F.getValueSymbolTable()) - writeValueSymbolTable(*Symtab); + writeFunctionLevelValueSymbolTable(*Symtab); if (NeedsMetadataAttachment) writeFunctionMetadataAttachment(F); @@ -3502,6 +3432,11 @@ void ModuleBitcodeWriter::writePerModuleGlobalValueSummary() { return; } + for (const auto &GVI : valueIds()) { + Stream.EmitRecord(bitc::FS_VALUE_GUID, + ArrayRef<uint64_t>{GVI.second, GVI.first}); + } + // Abbrev for FS_PERMODULE. auto Abbv = std::make_shared<BitCodeAbbrev>(); Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE)); @@ -3594,6 +3529,39 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() { Stream.EnterSubblock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID, 3); Stream.EmitRecord(bitc::FS_VERSION, ArrayRef<uint64_t>{INDEX_VERSION}); + // Create value IDs for undefined references. + for (const auto &I : *this) { + if (auto *VS = dyn_cast<GlobalVarSummary>(I.second)) { + for (auto &RI : VS->refs()) + assignValueId(RI.getGUID()); + continue; + } + + auto *FS = dyn_cast<FunctionSummary>(I.second); + if (!FS) + continue; + for (auto &RI : FS->refs()) + assignValueId(RI.getGUID()); + + for (auto &EI : FS->calls()) { + GlobalValue::GUID GUID = EI.first.getGUID(); + if (!hasValueId(GUID)) { + // For SamplePGO, the indirect call targets for local functions will + // have its original name annotated in profile. We try to find the + // corresponding PGOFuncName as the GUID. + GUID = Index.getGUIDFromOriginalID(GUID); + if (GUID == 0 || !hasValueId(GUID)) + continue; + } + assignValueId(GUID); + } + } + + for (const auto &GVI : valueIds()) { + Stream.EmitRecord(bitc::FS_VALUE_GUID, + ArrayRef<uint64_t>{GVI.second, GVI.first}); + } + // Abbrev for FS_COMBINED. auto Abbv = std::make_shared<BitCodeAbbrev>(); Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED)); @@ -3808,10 +3776,7 @@ void ModuleBitcodeWriter::write() { Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); size_t BlockStartPos = Buffer.size(); - SmallVector<unsigned, 1> Vals; - unsigned CurVersion = 1; - Vals.push_back(CurVersion); - Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals); + writeModuleVersion(); // Emit blockinfo, which defines the standard abbreviations etc. writeBlockInfo(); @@ -3857,8 +3822,7 @@ void ModuleBitcodeWriter::write() { if (Index) writePerModuleGlobalValueSummary(); - writeValueSymbolTable(M.getValueSymbolTable(), - /* IsModuleLevel */ true, &FunctionToBitcodeIndex); + writeGlobalValueSymbolTable(FunctionToBitcodeIndex); writeModuleHash(BlockStartPos); @@ -3946,13 +3910,45 @@ BitcodeWriter::BitcodeWriter(SmallVectorImpl<char> &Buffer) writeBitcodeHeader(*Stream); } -BitcodeWriter::~BitcodeWriter() = default; +BitcodeWriter::~BitcodeWriter() { assert(WroteStrtab); } + +void BitcodeWriter::writeBlob(unsigned Block, unsigned Record, StringRef Blob) { + Stream->EnterSubblock(Block, 3); + + auto Abbv = std::make_shared<BitCodeAbbrev>(); + Abbv->Add(BitCodeAbbrevOp(Record)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); + auto AbbrevNo = Stream->EmitAbbrev(std::move(Abbv)); + + Stream->EmitRecordWithBlob(AbbrevNo, ArrayRef<uint64_t>{Record}, Blob); + + Stream->ExitBlock(); +} + +void BitcodeWriter::writeStrtab() { + assert(!WroteStrtab); + + std::vector<char> Strtab; + StrtabBuilder.finalizeInOrder(); + Strtab.resize(StrtabBuilder.getSize()); + StrtabBuilder.write((uint8_t *)Strtab.data()); + + writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, + {Strtab.data(), Strtab.size()}); + + WroteStrtab = true; +} + +void BitcodeWriter::copyStrtab(StringRef Strtab) { + writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, Strtab); + WroteStrtab = true; +} void BitcodeWriter::writeModule(const Module *M, bool ShouldPreserveUseListOrder, const ModuleSummaryIndex *Index, bool GenerateHash, ModuleHash *ModHash) { - ModuleBitcodeWriter ModuleWriter(M, Buffer, *Stream, + ModuleBitcodeWriter ModuleWriter(M, Buffer, StrtabBuilder, *Stream, ShouldPreserveUseListOrder, Index, GenerateHash, ModHash); ModuleWriter.write(); @@ -3976,6 +3972,7 @@ void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out, BitcodeWriter Writer(Buffer); Writer.writeModule(M, ShouldPreserveUseListOrder, Index, GenerateHash, ModHash); + Writer.writeStrtab(); if (TT.isOSDarwin() || TT.isOSBinFormatMachO()) emitDarwinBCHeaderAndTrailer(Buffer, TT); @@ -3987,13 +3984,7 @@ void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out, void IndexBitcodeWriter::write() { Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); - SmallVector<unsigned, 1> Vals; - unsigned CurVersion = 1; - Vals.push_back(CurVersion); - Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals); - - // If we have a VST, write the VSTOFFSET record placeholder. - writeValueSymbolTableForwardDecl(); + writeModuleVersion(); // Write the module paths in the combined index. writeModStrings(); @@ -4001,10 +3992,6 @@ void IndexBitcodeWriter::write() { // Write the summary combined index records. writeCombinedGlobalValueSummary(); - // Need a special VST writer for the combined index (we don't have a - // real VST and real values when this is invoked). - writeCombinedValueSymbolTable(); - Stream.ExitBlock(); } diff --git a/lib/Bitcode/Writer/LLVMBuild.txt b/lib/Bitcode/Writer/LLVMBuild.txt index a450b38fba2c1..a07c280fa9e3f 100644 --- a/lib/Bitcode/Writer/LLVMBuild.txt +++ b/lib/Bitcode/Writer/LLVMBuild.txt @@ -19,4 +19,4 @@ type = Library name = BitWriter parent = Bitcode -required_libraries = Analysis Core Support +required_libraries = Analysis Core MC Support diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 6c18d56b82723..028c79f3ab6d2 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -834,9 +834,9 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) { OS << " <- "; // The second operand is only an offset if it's an immediate. - bool Deref = MI->getOperand(0).isReg() && MI->getOperand(1).isImm(); - int64_t Offset = Deref ? MI->getOperand(1).getImm() : 0; - + bool Deref = false; + bool MemLoc = MI->getOperand(0).isReg() && MI->getOperand(1).isImm(); + int64_t Offset = MemLoc ? MI->getOperand(1).getImm() : 0; for (unsigned i = 0; i < Expr->getNumElements(); ++i) { uint64_t Op = Expr->getElement(i); if (Op == dwarf::DW_OP_LLVM_fragment) { @@ -844,7 +844,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) { break; } else if (Deref) { // We currently don't support extra Offsets or derefs after the first - // one. Bail out early instead of emitting an incorrect comment + // one. Bail out early instead of emitting an incorrect comment. OS << " [complex expression]"; AP.OutStreamer->emitRawComment(OS.str()); return true; @@ -899,12 +899,12 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) { AP.OutStreamer->emitRawComment(OS.str()); return true; } - if (Deref) + if (MemLoc || Deref) OS << '['; OS << PrintReg(Reg, AP.MF->getSubtarget().getRegisterInfo()); } - if (Deref) + if (MemLoc || Deref) OS << '+' << Offset << ']'; // NOTE: Want this comment at start of line, don't emit with AddComment. @@ -1356,7 +1356,7 @@ bool AsmPrinter::doFinalization(Module &M) { OutContext.getOrCreateSymbol(StringRef("__morestack_addr")); OutStreamer->EmitLabel(AddrSymbol); - unsigned PtrSize = M.getDataLayout().getPointerSize(0); + unsigned PtrSize = MAI->getCodePointerSize(); OutStreamer->EmitSymbolValue(GetExternalSymbolSymbol("__morestack"), PtrSize); } @@ -2246,7 +2246,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) { // chu[nk1 chu][nk2 chu] ... [nkN-1 chunkN] ExtraBits = Realigned.getRawData()[0] & (((uint64_t)-1) >> (64 - ExtraBitsSize)); - Realigned = Realigned.lshr(ExtraBitsSize); + Realigned.lshrInPlace(ExtraBitsSize); } else ExtraBits = Realigned.getRawData()[BitWidth / 64]; } @@ -2781,7 +2781,7 @@ void AsmPrinter::emitXRayTable() { // before the function's end, we assume that this is happening after // the last return instruction. - auto WordSizeBytes = TM.getPointerSize(); + auto WordSizeBytes = MAI->getCodePointerSize(); MCSymbol *Tmp = OutContext.createTempSymbol("xray_synthetic_", true); OutStreamer->EmitCodeAlignment(16); OutStreamer->EmitSymbolValue(Tmp, WordSizeBytes, false); diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index 383b8cddb1a06..2571f68696516 100644 --- a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -1136,7 +1136,7 @@ TypeIndex CodeViewDebug::lowerTypeArray(const DICompositeType *Ty) { DITypeRef ElementTypeRef = Ty->getBaseType(); TypeIndex ElementTypeIndex = getTypeIndex(ElementTypeRef); // IndexType is size_t, which depends on the bitness of the target. - TypeIndex IndexType = Asm->MAI->getPointerSize() == 8 + TypeIndex IndexType = Asm->TM.getPointerSize() == 8 ? TypeIndex(SimpleTypeKind::UInt64Quad) : TypeIndex(SimpleTypeKind::UInt32Long); @@ -1342,8 +1342,8 @@ TypeIndex CodeViewDebug::lowerTypeMemberPointer(const DIDerivedType *Ty) { assert(Ty->getTag() == dwarf::DW_TAG_ptr_to_member_type); TypeIndex ClassTI = getTypeIndex(Ty->getClassType()); TypeIndex PointeeTI = getTypeIndex(Ty->getBaseType(), Ty->getClassType()); - PointerKind PK = Asm->MAI->getPointerSize() == 8 ? PointerKind::Near64 - : PointerKind::Near32; + PointerKind PK = Asm->TM.getPointerSize() == 8 ? PointerKind::Near64 + : PointerKind::Near32; bool IsPMF = isa<DISubroutineType>(Ty->getBaseType()); PointerMode PM = IsPMF ? PointerMode::PointerToMemberFunction : PointerMode::PointerToDataMember; @@ -1458,7 +1458,8 @@ TypeIndex CodeViewDebug::lowerTypeMemberFunction(const DISubroutineType *Ty, } TypeIndex CodeViewDebug::lowerTypeVFTableShape(const DIDerivedType *Ty) { - unsigned VSlotCount = Ty->getSizeInBits() / (8 * Asm->MAI->getPointerSize()); + unsigned VSlotCount = + Ty->getSizeInBits() / (8 * Asm->MAI->getCodePointerSize()); SmallVector<VFTableSlotKind, 4> Slots(VSlotCount, VFTableSlotKind::Near); VFTableShapeRecord VFTSR(Slots); diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp index b510e0ef36ac6..31c2b3b5e752f 100644 --- a/lib/CodeGen/AsmPrinter/DIE.cpp +++ b/lib/CodeGen/AsmPrinter/DIE.cpp @@ -31,6 +31,8 @@ #include "llvm/Support/raw_ostream.h" using namespace llvm; +#define DEBUG_TYPE "dwarfdebug" + //===----------------------------------------------------------------------===// // DIEAbbrevData Implementation //===----------------------------------------------------------------------===// @@ -79,15 +81,22 @@ void DIEAbbrev::Emit(const AsmPrinter *AP) const { dwarf::AttributeString(AttrData.getAttribute()).data()); // Emit form type. +#ifndef NDEBUG + // Could be an assertion, but this way we can see the failing form code + // easily, which helps track down where it came from. + if (!dwarf::isValidFormForVersion(AttrData.getForm(), + AP->getDwarfVersion())) { + DEBUG(dbgs() << "Invalid form " << format("0x%x", AttrData.getForm()) + << " for DWARF version " << AP->getDwarfVersion() << "\n"); + llvm_unreachable("Invalid form for specified DWARF version"); + } +#endif AP->EmitULEB128(AttrData.getForm(), dwarf::FormEncodingString(AttrData.getForm()).data()); // Emit value for DW_FORM_implicit_const. - if (AttrData.getForm() == dwarf::DW_FORM_implicit_const) { - assert(AP->getDwarfVersion() >= 5 && - "DW_FORM_implicit_const is supported starting from DWARFv5"); + if (AttrData.getForm() == dwarf::DW_FORM_implicit_const) AP->EmitSLEB128(AttrData.getValue()); - } } // Mark end of abbreviation. @@ -518,7 +527,7 @@ unsigned DIELabel::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const { if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_sec_offset) return 4; if (Form == dwarf::DW_FORM_strp) return 4; - return AP->getPointerSize(); + return AP->MAI->getCodePointerSize(); } LLVM_DUMP_METHOD @@ -540,7 +549,7 @@ unsigned DIEDelta::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const { if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_sec_offset) return 4; if (Form == dwarf::DW_FORM_strp) return 4; - return AP->getPointerSize(); + return AP->MAI->getCodePointerSize(); } LLVM_DUMP_METHOD @@ -682,7 +691,7 @@ unsigned DIEEntry::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const { return getULEB128Size(Entry->getOffset()); case dwarf::DW_FORM_ref_addr: if (AP->getDwarfVersion() == 2) - return AP->getPointerSize(); + return AP->MAI->getCodePointerSize(); switch (AP->OutStreamer->getContext().getDwarfFormat()) { case dwarf::DWARF32: return 4; @@ -808,7 +817,7 @@ unsigned DIELocList::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const { return 4; if (Form == dwarf::DW_FORM_sec_offset) return 4; - return AP->getPointerSize(); + return AP->MAI->getCodePointerSize(); } /// EmitValue - Emit label value. diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index a550ff2fb90f3..738e062cb93f6 100644 --- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -547,18 +547,19 @@ DIE *DwarfCompileUnit::constructVariableDIEImpl(const DbgVariable &DV, DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); for (auto &Fragment : DV.getFrameIndexExprs()) { unsigned FrameReg = 0; + const DIExpression *Expr = Fragment.Expr; const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering(); int Offset = TFI->getFrameIndexReference(*Asm->MF, Fragment.FI, FrameReg); - DwarfExpr.addFragmentOffset(Fragment.Expr); + DwarfExpr.addFragmentOffset(Expr); SmallVector<uint64_t, 8> Ops; Ops.push_back(dwarf::DW_OP_plus); Ops.push_back(Offset); - Ops.push_back(dwarf::DW_OP_deref); - Ops.append(Fragment.Expr->elements_begin(), Fragment.Expr->elements_end()); - DIExpressionCursor Expr(Ops); + Ops.append(Expr->elements_begin(), Expr->elements_end()); + DIExpressionCursor Cursor(Ops); + DwarfExpr.setMemoryLocationKind(); DwarfExpr.addMachineRegExpression( - *Asm->MF->getSubtarget().getRegisterInfo(), Expr, FrameReg); - DwarfExpr.addExpression(std::move(Expr)); + *Asm->MF->getSubtarget().getRegisterInfo(), Cursor, FrameReg); + DwarfExpr.addExpression(std::move(Cursor)); } addBlock(*VariableDie, dwarf::DW_AT_location, DwarfExpr.finalize()); @@ -779,12 +780,13 @@ void DwarfCompileUnit::addAddress(DIE &Die, dwarf::Attribute Attribute, const MachineLocation &Location) { DIELoc *Loc = new (DIEValueAllocator) DIELoc; DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); + if (Location.isIndirect()) + DwarfExpr.setMemoryLocationKind(); SmallVector<uint64_t, 8> Ops; - if (Location.isIndirect()) { + if (Location.isIndirect() && Location.getOffset()) { Ops.push_back(dwarf::DW_OP_plus); Ops.push_back(Location.getOffset()); - Ops.push_back(dwarf::DW_OP_deref); } DIExpressionCursor Cursor(Ops); const TargetRegisterInfo &TRI = *Asm->MF->getSubtarget().getRegisterInfo(); @@ -807,12 +809,13 @@ void DwarfCompileUnit::addComplexAddress(const DbgVariable &DV, DIE &Die, DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); const DIExpression *DIExpr = DV.getSingleExpression(); DwarfExpr.addFragmentOffset(DIExpr); + if (Location.isIndirect()) + DwarfExpr.setMemoryLocationKind(); SmallVector<uint64_t, 8> Ops; - if (Location.isIndirect()) { + if (Location.isIndirect() && Location.getOffset()) { Ops.push_back(dwarf::DW_OP_plus); Ops.push_back(Location.getOffset()); - Ops.push_back(dwarf::DW_OP_deref); } Ops.append(DIExpr->elements_begin(), DIExpr->elements_end()); DIExpressionCursor Cursor(Ops); diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 5ce1113092088..d72656bcc58d4 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1517,13 +1517,12 @@ static void emitDebugLocValue(const AsmPrinter &AP, const DIBasicType *BT, DwarfExpr.addUnsignedConstant(Value.getInt()); } else if (Value.isLocation()) { MachineLocation Location = Value.getLoc(); - + if (Location.isIndirect()) + DwarfExpr.setMemoryLocationKind(); SmallVector<uint64_t, 8> Ops; - // FIXME: Should this condition be Location.isIndirect() instead? - if (Location.getOffset()) { + if (Location.isIndirect() && Location.getOffset()) { Ops.push_back(dwarf::DW_OP_plus); Ops.push_back(Location.getOffset()); - Ops.push_back(dwarf::DW_OP_deref); } Ops.append(DIExpr->elements_begin(), DIExpr->elements_end()); DIExpressionCursor Cursor(Ops); @@ -1578,7 +1577,7 @@ void DwarfDebug::emitDebugLoc() { // Start the dwarf loc section. Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getDwarfLocSection()); - unsigned char Size = Asm->getDataLayout().getPointerSize(); + unsigned char Size = Asm->MAI->getCodePointerSize(); for (const auto &List : DebugLocs.getLists()) { Asm->OutStreamer->EmitLabel(List.Label); const DwarfCompileUnit *CU = List.CU; @@ -1708,7 +1707,7 @@ void DwarfDebug::emitDebugARanges() { Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getDwarfARangesSection()); - unsigned PtrSize = Asm->getDataLayout().getPointerSize(); + unsigned PtrSize = Asm->MAI->getCodePointerSize(); // Build a list of CUs used. std::vector<DwarfCompileUnit *> CUs; @@ -1791,7 +1790,7 @@ void DwarfDebug::emitDebugRanges() { Asm->getObjFileLowering().getDwarfRangesSection()); // Size for our labels. - unsigned char Size = Asm->getDataLayout().getPointerSize(); + unsigned char Size = Asm->MAI->getCodePointerSize(); // Grab the specific ranges for the compile units in the module. for (const auto &I : CUMap) { diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index debe88f3b1ee1..f65dc151f3019 100644 --- a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -23,9 +23,12 @@ using namespace llvm; void DwarfExpression::addReg(int DwarfReg, const char *Comment) { - assert(DwarfReg >= 0 && "invalid negative dwarf register number"); - if (DwarfReg < 32) { - emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment); + assert(DwarfReg >= 0 && "invalid negative dwarf register number"); + assert((LocationKind == Unknown || LocationKind == Register) && + "location description already locked down"); + LocationKind = Register; + if (DwarfReg < 32) { + emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment); } else { emitOp(dwarf::DW_OP_regx, Comment); emitUnsigned(DwarfReg); @@ -34,6 +37,7 @@ void DwarfExpression::addReg(int DwarfReg, const char *Comment) { void DwarfExpression::addBReg(int DwarfReg, int Offset) { assert(DwarfReg >= 0 && "invalid negative dwarf register number"); + assert(LocationKind != Register && "location description already locked down"); if (DwarfReg < 32) { emitOp(dwarf::DW_OP_breg0 + DwarfReg); } else { @@ -156,18 +160,23 @@ void DwarfExpression::addStackValue() { } void DwarfExpression::addSignedConstant(int64_t Value) { + assert(LocationKind == Implicit || LocationKind == Unknown); + LocationKind = Implicit; emitOp(dwarf::DW_OP_consts); emitSigned(Value); - addStackValue(); } void DwarfExpression::addUnsignedConstant(uint64_t Value) { + assert(LocationKind == Implicit || LocationKind == Unknown); + LocationKind = Implicit; emitOp(dwarf::DW_OP_constu); emitUnsigned(Value); - addStackValue(); } void DwarfExpression::addUnsignedConstant(const APInt &Value) { + assert(LocationKind == Implicit || LocationKind == Unknown); + LocationKind = Implicit; + unsigned Size = Value.getBitWidth(); const uint64_t *Data = Value.getRawData(); @@ -178,7 +187,8 @@ void DwarfExpression::addUnsignedConstant(const APInt &Value) { addUnsignedConstant(*Data++); if (Offset == 0 && Size <= 64) break; - addOpPiece(std::min(Size-Offset, 64u), Offset); + addStackValue(); + addOpPiece(std::min(Size - Offset, 64u), Offset); Offset += 64; } } @@ -206,7 +216,7 @@ bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI, } // Handle simple register locations. - if (!HasComplexExpression) { + if (LocationKind != Memory && !HasComplexExpression) { for (auto &Reg : DwarfRegs) { if (Reg.DwarfRegNo >= 0) addReg(Reg.DwarfRegNo, Reg.Comment); @@ -216,62 +226,65 @@ bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI, return true; } + // Don't emit locations that cannot be expressed without DW_OP_stack_value. + if (DwarfVersion < 4) + if (std::any_of(ExprCursor.begin(), ExprCursor.end(), + [](DIExpression::ExprOperand Op) -> bool { + return Op.getOp() == dwarf::DW_OP_stack_value; + })) { + DwarfRegs.clear(); + return false; + } + assert(DwarfRegs.size() == 1); auto Reg = DwarfRegs[0]; - bool FBReg = isFrameRegister(TRI, MachineReg); + bool FBReg = isFrameRegister(TRI, MachineReg); + int SignedOffset = 0; assert(Reg.Size == 0 && "subregister has same size as superregister"); // Pattern-match combinations for which more efficient representations exist. - switch (Op->getOp()) { - default: { - if (FBReg) - addFBReg(0); - else - addReg(Reg.DwarfRegNo, 0); - break; + // [Reg, Offset, DW_OP_plus] --> [DW_OP_breg, Offset]. + // [Reg, Offset, DW_OP_minus] --> [DW_OP_breg, -Offset]. + // If Reg is a subregister we need to mask it out before subtracting. + if (Op && ((Op->getOp() == dwarf::DW_OP_plus) || + (Op->getOp() == dwarf::DW_OP_minus && !SubRegisterSizeInBits))) { + int Offset = Op->getArg(0); + SignedOffset = (Op->getOp() == dwarf::DW_OP_plus) ? Offset : -Offset; + ExprCursor.take(); } - case dwarf::DW_OP_plus: - case dwarf::DW_OP_minus: { - // [DW_OP_reg,Offset,DW_OP_plus, DW_OP_deref] --> [DW_OP_breg, Offset]. - // [DW_OP_reg,Offset,DW_OP_minus,DW_OP_deref] --> [DW_OP_breg,-Offset]. - auto N = ExprCursor.peekNext(); - if (N && N->getOp() == dwarf::DW_OP_deref) { - int Offset = Op->getArg(0); - int SignedOffset = (Op->getOp() == dwarf::DW_OP_plus) ? Offset : -Offset; - if (FBReg) - addFBReg(SignedOffset); - else - addBReg(Reg.DwarfRegNo, SignedOffset); + if (FBReg) + addFBReg(SignedOffset); + else + addBReg(Reg.DwarfRegNo, SignedOffset); + DwarfRegs.clear(); + return true; +} - ExprCursor.consume(2); +/// Assuming a well-formed expression, match "DW_OP_deref* DW_OP_LLVM_fragment?". +static bool isMemoryLocation(DIExpressionCursor ExprCursor) { + while (ExprCursor) { + auto Op = ExprCursor.take(); + switch (Op->getOp()) { + case dwarf::DW_OP_deref: + case dwarf::DW_OP_LLVM_fragment: break; + default: + return false; } - addReg(Reg.DwarfRegNo, 0); - break; - } - case dwarf::DW_OP_deref: - // [DW_OP_reg,DW_OP_deref] --> [DW_OP_breg]. - if (FBReg) - addFBReg(0); - else - addBReg(Reg.DwarfRegNo, 0); - ExprCursor.take(); - break; } - DwarfRegs.clear(); return true; } void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor, unsigned FragmentOffsetInBits) { + // If we need to mask out a subregister, do it now, unless the next + // operation would emit an OpPiece anyway. + auto N = ExprCursor.peek(); + if (SubRegisterSizeInBits && N && (N->getOp() != dwarf::DW_OP_LLVM_fragment)) + maskSubRegister(); + while (ExprCursor) { auto Op = ExprCursor.take(); - - // If we need to mask out a subregister, do it now, unless the next - // operation would emit an OpPiece anyway. - if (SubRegisterSizeInBits && Op->getOp() != dwarf::DW_OP_LLVM_fragment) - maskSubRegister(); - switch (Op->getOp()) { case dwarf::DW_OP_LLVM_fragment: { unsigned SizeInBits = Op->getArg(1); @@ -281,50 +294,74 @@ void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor, // location. assert(OffsetInBits >= FragmentOffset && "fragment offset not added?"); - // If \a addMachineReg already emitted DW_OP_piece operations to represent + // If addMachineReg already emitted DW_OP_piece operations to represent // a super-register by splicing together sub-registers, subtract the size // of the pieces that was already emitted. SizeInBits -= OffsetInBits - FragmentOffset; - // If \a addMachineReg requested a DW_OP_bit_piece to stencil out a + // If addMachineReg requested a DW_OP_bit_piece to stencil out a // sub-register that is smaller than the current fragment's size, use it. if (SubRegisterSizeInBits) SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits); - + + // Emit a DW_OP_stack_value for implicit location descriptions. + if (LocationKind == Implicit) + addStackValue(); + + // Emit the DW_OP_piece. addOpPiece(SizeInBits, SubRegisterOffsetInBits); setSubRegisterPiece(0, 0); - break; + // Reset the location description kind. + LocationKind = Unknown; + return; } case dwarf::DW_OP_plus: + assert(LocationKind != Register); emitOp(dwarf::DW_OP_plus_uconst); emitUnsigned(Op->getArg(0)); break; case dwarf::DW_OP_minus: - // There is no OP_minus_uconst. + assert(LocationKind != Register); + // There is no DW_OP_minus_uconst. emitOp(dwarf::DW_OP_constu); emitUnsigned(Op->getArg(0)); emitOp(dwarf::DW_OP_minus); break; - case dwarf::DW_OP_deref: - emitOp(dwarf::DW_OP_deref); + case dwarf::DW_OP_deref: { + assert(LocationKind != Register); + if (LocationKind != Memory && isMemoryLocation(ExprCursor)) + // Turning this into a memory location description makes the deref + // implicit. + LocationKind = Memory; + else + emitOp(dwarf::DW_OP_deref); break; + } case dwarf::DW_OP_constu: + assert(LocationKind != Register); emitOp(dwarf::DW_OP_constu); emitUnsigned(Op->getArg(0)); break; case dwarf::DW_OP_stack_value: - addStackValue(); + assert(LocationKind == Unknown || LocationKind == Implicit); + LocationKind = Implicit; break; case dwarf::DW_OP_swap: + assert(LocationKind != Register); emitOp(dwarf::DW_OP_swap); break; case dwarf::DW_OP_xderef: + assert(LocationKind != Register); emitOp(dwarf::DW_OP_xderef); break; default: llvm_unreachable("unhandled opcode found in expression"); } } + + if (LocationKind == Implicit) + // Turn this into an implicit location description. + addStackValue(); } /// add masking operations to stencil out a subregister. diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.h b/lib/CodeGen/AsmPrinter/DwarfExpression.h index e8dc211eb3c22..de86132000672 100644 --- a/lib/CodeGen/AsmPrinter/DwarfExpression.h +++ b/lib/CodeGen/AsmPrinter/DwarfExpression.h @@ -72,6 +72,8 @@ public: } /// Determine whether there are any operations left in this expression. operator bool() const { return Start != End; } + DIExpression::expr_op_iterator begin() const { return Start; } + DIExpression::expr_op_iterator end() const { return End; } /// Retrieve the fragment information, if any. Optional<DIExpression::FragmentInfo> getFragmentInfo() const { @@ -102,6 +104,9 @@ protected: unsigned SubRegisterSizeInBits = 0; unsigned SubRegisterOffsetInBits = 0; + /// The kind of location description being produced. + enum { Unknown = 0, Register, Memory, Implicit } LocationKind = Unknown; + /// Push a DW_OP_piece / DW_OP_bit_piece for emitting later, if one is needed /// to represent a subregister. void setSubRegisterPiece(unsigned SizeInBits, unsigned OffsetInBits) { @@ -122,7 +127,8 @@ protected: /// current function. virtual bool isFrameRegister(const TargetRegisterInfo &TRI, unsigned MachineReg) = 0; - /// Emit a DW_OP_reg operation. + /// Emit a DW_OP_reg operation. Note that this is only legal inside a DWARF + /// register location description. void addReg(int DwarfReg, const char *Comment = nullptr); /// Emit a DW_OP_breg operation. void addBReg(int DwarfReg, int Offset); @@ -185,11 +191,18 @@ public: /// Emit an unsigned constant. void addUnsignedConstant(const APInt &Value); + /// Lock this down to become a memory location description. + void setMemoryLocationKind() { + assert(LocationKind == Unknown); + LocationKind = Memory; + } + /// Emit a machine register location. As an optimization this may also consume /// the prefix of a DwarfExpression if a more efficient representation for /// combining the register location and the first operation exists. /// - /// \param FragmentOffsetInBits If this is one fragment out of a fragmented + /// \param FragmentOffsetInBits If this is one fragment out of a + /// fragmented /// location, this is the offset of the /// fragment inside the entire variable. /// \return false if no DWARF register exists diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp index bad5b09553cdc..bac0c204d04fd 100644 --- a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -27,6 +27,7 @@ #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Metadata.h" #include "llvm/MC/MachineLocation.h" +#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" @@ -73,8 +74,8 @@ bool DIEDwarfExpression::isFrameRegister(const TargetRegisterInfo &TRI, DwarfUnit::DwarfUnit(dwarf::Tag UnitTag, const DICompileUnit *Node, AsmPrinter *A, DwarfDebug *DW, DwarfFile *DWU) - : DIEUnit(A->getDwarfVersion(), A->getPointerSize(), UnitTag), CUNode(Node), - Asm(A), DD(DW), DU(DWU), IndexTyDie(nullptr) { + : DIEUnit(A->getDwarfVersion(), A->MAI->getCodePointerSize(), UnitTag), + CUNode(Node), Asm(A), DD(DW), DU(DWU), IndexTyDie(nullptr) { } DwarfTypeUnit::DwarfTypeUnit(DwarfCompileUnit &CU, AsmPrinter *A, @@ -471,12 +472,13 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die, // variable's location. DIELoc *Loc = new (DIEValueAllocator) DIELoc; DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); + if (Location.isIndirect()) + DwarfExpr.setMemoryLocationKind(); SmallVector<uint64_t, 9> Ops; - if (Location.isIndirect()) { + if (Location.isIndirect() && Location.getOffset()) { Ops.push_back(dwarf::DW_OP_plus); Ops.push_back(Location.getOffset()); - Ops.push_back(dwarf::DW_OP_deref); } // If we started with a pointer to the __Block_byref... struct, then // the first thing we need to do is dereference the pointer (DW_OP_deref). @@ -1546,7 +1548,7 @@ void DwarfUnit::emitCommonHeader(bool UseOffsets, dwarf::UnitType UT) { Asm->OutStreamer->AddComment("DWARF Unit Type"); Asm->EmitInt8(UT); Asm->OutStreamer->AddComment("Address Size (in bytes)"); - Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); + Asm->EmitInt8(Asm->MAI->getCodePointerSize()); } // We share one abbreviations table across all units so it's always at the @@ -1562,7 +1564,7 @@ void DwarfUnit::emitCommonHeader(bool UseOffsets, dwarf::UnitType UT) { if (Version <= 4) { Asm->OutStreamer->AddComment("Address Size (in bytes)"); - Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); + Asm->EmitInt8(Asm->MAI->getCodePointerSize()); } } diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index 2bdd189557b40..c862cfd28add3 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -570,8 +570,14 @@ bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) { ValueToValueMapTy VMap; BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F); - for (BasicBlock *Pred : OtherPreds) - Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc); + for (BasicBlock *Pred : OtherPreds) { + // If the target is a loop to itself, then the terminator of the split + // block needs to be updated. + if (Pred == Target) + BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc); + else + Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc); + } // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that // they are clones, so the number of PHIs are the same. @@ -5059,16 +5065,14 @@ bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { if (!ShlC) return false; uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); - auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt); - DemandBits |= ShlDemandBits; + DemandBits.setLowBits(BitWidth - ShiftAmt); break; } case llvm::Instruction::Trunc: { EVT TruncVT = TLI->getValueType(*DL, I->getType()); unsigned TruncBitWidth = TruncVT.getSizeInBits(); - auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth); - DemandBits |= TruncBits; + DemandBits.setLowBits(TruncBitWidth); break; } diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp index 7661873784469..5fb8dfc95d3fd 100644 --- a/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -381,18 +381,19 @@ bool IRTranslator::translateInsertValue(const User &U, uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices); unsigned Res = getOrCreateVReg(U); - const Value &Inserted = *U.getOperand(1); - MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted), - Offset); + unsigned Inserted = getOrCreateVReg(*U.getOperand(1)); + MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), Inserted, Offset); return true; } bool IRTranslator::translateSelect(const User &U, MachineIRBuilder &MIRBuilder) { - MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)), - getOrCreateVReg(*U.getOperand(1)), - getOrCreateVReg(*U.getOperand(2))); + unsigned Res = getOrCreateVReg(U); + unsigned Tst = getOrCreateVReg(*U.getOperand(0)); + unsigned Op0 = getOrCreateVReg(*U.getOperand(1)); + unsigned Op1 = getOrCreateVReg(*U.getOperand(2)); + MIRBuilder.buildSelect(Res, Tst, Op0, Op1); return true; } @@ -984,9 +985,11 @@ bool IRTranslator::translateInsertElement(const User &U, ValToVReg[&U] = Elt; return true; } - MIRBuilder.buildInsertVectorElement( - getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)), - getOrCreateVReg(*U.getOperand(1)), getOrCreateVReg(*U.getOperand(2))); + unsigned Res = getOrCreateVReg(U); + unsigned Val = getOrCreateVReg(*U.getOperand(0)); + unsigned Elt = getOrCreateVReg(*U.getOperand(1)); + unsigned Idx = getOrCreateVReg(*U.getOperand(2)); + MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); return true; } @@ -999,9 +1002,10 @@ bool IRTranslator::translateExtractElement(const User &U, ValToVReg[&U] = Elt; return true; } - MIRBuilder.buildExtractVectorElement(getOrCreateVReg(U), - getOrCreateVReg(*U.getOperand(0)), - getOrCreateVReg(*U.getOperand(1))); + unsigned Res = getOrCreateVReg(U); + unsigned Val = getOrCreateVReg(*U.getOperand(0)); + unsigned Idx = getOrCreateVReg(*U.getOperand(1)); + MIRBuilder.buildExtractVectorElement(Res, Val, Idx); return true; } diff --git a/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/lib/CodeGen/GlobalISel/InstructionSelector.cpp index fb9d01ef8542a..942680b6fff34 100644 --- a/lib/CodeGen/GlobalISel/InstructionSelector.cpp +++ b/lib/CodeGen/GlobalISel/InstructionSelector.cpp @@ -68,23 +68,6 @@ bool InstructionSelector::constrainSelectedInstRegOperands( return true; } -Optional<int64_t> -InstructionSelector::getConstantVRegVal(unsigned VReg, - const MachineRegisterInfo &MRI) const { - MachineInstr *MI = MRI.getVRegDef(VReg); - if (MI->getOpcode() != TargetOpcode::G_CONSTANT) - return None; - - if (MI->getOperand(1).isImm()) - return MI->getOperand(1).getImm(); - - if (MI->getOperand(1).isCImm() && - MI->getOperand(1).getCImm()->getBitWidth() <= 64) - return MI->getOperand(1).getCImm()->getSExtValue(); - - return None; -} - bool InstructionSelector::isOperandImmEqual( const MachineOperand &MO, int64_t Value, const MachineRegisterInfo &MRI) const { diff --git a/lib/CodeGen/GlobalISel/Legalizer.cpp b/lib/CodeGen/GlobalISel/Legalizer.cpp index 657ddb3079195..74ed58e8d0493 100644 --- a/lib/CodeGen/GlobalISel/Legalizer.cpp +++ b/lib/CodeGen/GlobalISel/Legalizer.cpp @@ -24,6 +24,8 @@ #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" +#include <iterator> + #define DEBUG_TYPE "legalizer" using namespace llvm; @@ -161,7 +163,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) { // convergence for performance reasons. bool Changed = false; MachineBasicBlock::iterator NextMI; - for (auto &MBB : MF) + for (auto &MBB : MF) { for (auto MI = MBB.begin(); MI != MBB.end(); MI = NextMI) { // Get the next Instruction before we try to legalize, because there's a // good chance MI will be deleted. @@ -171,18 +173,21 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) { // and are assumed to be legal. if (!isPreISelGenericOpcode(MI->getOpcode())) continue; + unsigned NumNewInsns = 0; SmallVector<MachineInstr *, 4> WorkList; - Helper.MIRBuilder.recordInsertions( - [&](MachineInstr *MI) { WorkList.push_back(MI); }); + Helper.MIRBuilder.recordInsertions([&](MachineInstr *MI) { + ++NumNewInsns; + WorkList.push_back(MI); + }); WorkList.push_back(&*MI); + bool Changed = false; LegalizerHelper::LegalizeResult Res; unsigned Idx = 0; do { Res = Helper.legalizeInstrStep(*WorkList[Idx]); // Error out if we couldn't legalize this instruction. We may want to - // fall - // back to DAG ISel instead in the future. + // fall back to DAG ISel instead in the future. if (Res == LegalizerHelper::UnableToLegalize) { Helper.MIRBuilder.stopRecordingInsertions(); if (Res == LegalizerHelper::UnableToLegalize) { @@ -194,10 +199,21 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) { } Changed |= Res == LegalizerHelper::Legalized; ++Idx; + +#ifndef NDEBUG + if (NumNewInsns) { + DEBUG(dbgs() << ".. .. Emitted " << NumNewInsns << " insns\n"); + for (auto I = WorkList.end() - NumNewInsns, E = WorkList.end(); + I != E; ++I) + DEBUG(dbgs() << ".. .. New MI: "; (*I)->print(dbgs())); + NumNewInsns = 0; + } +#endif } while (Idx < WorkList.size()); Helper.MIRBuilder.stopRecordingInsertions(); } + } MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); @@ -207,7 +223,11 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) { // good chance MI will be deleted. NextMI = std::next(MI); - Changed |= combineExtracts(*MI, MRI, TII); + // combineExtracts erases MI. + if (combineExtracts(*MI, MRI, TII)) { + Changed = true; + continue; + } Changed |= combineMerges(*MI, MRI, TII); } } diff --git a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 20358f7ee6c2e..58778077bc0e7 100644 --- a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -24,7 +24,7 @@ #include <sstream> -#define DEBUG_TYPE "legalize-mir" +#define DEBUG_TYPE "legalizer" using namespace llvm; @@ -35,24 +35,34 @@ LegalizerHelper::LegalizerHelper(MachineFunction &MF) LegalizerHelper::LegalizeResult LegalizerHelper::legalizeInstrStep(MachineInstr &MI) { + DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs())); + auto Action = LI.getAction(MI, MRI); switch (std::get<0>(Action)) { case LegalizerInfo::Legal: + DEBUG(dbgs() << ".. Already legal\n"); return AlreadyLegal; case LegalizerInfo::Libcall: + DEBUG(dbgs() << ".. Convert to libcall\n"); return libcall(MI); case LegalizerInfo::NarrowScalar: + DEBUG(dbgs() << ".. Narrow scalar\n"); return narrowScalar(MI, std::get<1>(Action), std::get<2>(Action)); case LegalizerInfo::WidenScalar: + DEBUG(dbgs() << ".. Widen scalar\n"); return widenScalar(MI, std::get<1>(Action), std::get<2>(Action)); case LegalizerInfo::Lower: + DEBUG(dbgs() << ".. Lower\n"); return lower(MI, std::get<1>(Action), std::get<2>(Action)); case LegalizerInfo::FewerElements: + DEBUG(dbgs() << ".. Reduce number of elements\n"); return fewerElementsVector(MI, std::get<1>(Action), std::get<2>(Action)); case LegalizerInfo::Custom: + DEBUG(dbgs() << ".. Custom legalization\n"); return LI.legalizeCustom(MI, MRI, MIRBuilder) ? Legalized : UnableToLegalize; default: + DEBUG(dbgs() << ".. Unable to legalize\n"); return UnableToLegalize; } } diff --git a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 8d1a263395a0e..54ef7e5c5a1b1 100644 --- a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -592,7 +592,7 @@ MachineInstrBuilder MachineIRBuilder::buildInsertVectorElement(unsigned Res, LLT EltTy = MRI->getType(Elt); LLT IdxTy = MRI->getType(Idx); assert(ResTy.isVector() && ValTy.isVector() && "invalid operand type"); - assert(EltTy.isScalar() && IdxTy.isScalar() && "invalid operand type"); + assert(IdxTy.isScalar() && "invalid operand type"); assert(ResTy.getNumElements() == ValTy.getNumElements() && "type mismatch"); assert(ResTy.getElementType() == EltTy && "type mismatch"); #endif @@ -612,7 +612,8 @@ MachineInstrBuilder MachineIRBuilder::buildExtractVectorElement(unsigned Res, LLT ValTy = MRI->getType(Val); LLT IdxTy = MRI->getType(Idx); assert(ValTy.isVector() && "invalid operand type"); - assert(ResTy.isScalar() && IdxTy.isScalar() && "invalid operand type"); + assert((ResTy.isScalar() || ResTy.isPointer()) && "invalid operand type"); + assert(IdxTy.isScalar() && "invalid operand type"); assert(ValTy.getElementType() == ResTy && "type mismatch"); #endif diff --git a/lib/CodeGen/GlobalISel/Utils.cpp b/lib/CodeGen/GlobalISel/Utils.cpp index 606a59680a3d4..3c93f8123b0d1 100644 --- a/lib/CodeGen/GlobalISel/Utils.cpp +++ b/lib/CodeGen/GlobalISel/Utils.cpp @@ -18,6 +18,7 @@ #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/Constants.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetRegisterInfo.h" @@ -93,3 +94,19 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, R << Msg << ": " << ore::MNV("Inst", MI); reportGISelFailure(MF, TPC, MORE, R); } + +Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg, + const MachineRegisterInfo &MRI) { + MachineInstr *MI = MRI.getVRegDef(VReg); + if (MI->getOpcode() != TargetOpcode::G_CONSTANT) + return None; + + if (MI->getOperand(1).isImm()) + return MI->getOperand(1).getImm(); + + if (MI->getOperand(1).isCImm() && + MI->getOperand(1).getCImm()->getBitWidth() <= 64) + return MI->getOperand(1).getCImm()->getSExtValue(); + + return None; +} diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index a1cb0a0695bfa..b7ab404070b1a 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -888,20 +888,10 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { // Debug values are not allowed to affect codegen. if (MI->isDebugValue()) { // Modify DBG_VALUE now that the value is in a spill slot. - bool IsIndirect = MI->isIndirectDebugValue(); - uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0; - const MDNode *Var = MI->getDebugVariable(); - const MDNode *Expr = MI->getDebugExpression(); - DebugLoc DL = MI->getDebugLoc(); - DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); MachineBasicBlock *MBB = MI->getParent(); - assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && - "Expected inlined-at fields to agree"); - BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE)) - .addFrameIndex(StackSlot) - .addImm(Offset) - .addMetadata(Var) - .addMetadata(Expr); + DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI); + buildDbgValueForSpill(*MBB, MI, *MI, StackSlot); + MBB->erase(MI); continue; } diff --git a/lib/CodeGen/LowLevelType.cpp b/lib/CodeGen/LowLevelType.cpp index c4b9068fa905a..1c682e72fa491 100644 --- a/lib/CodeGen/LowLevelType.cpp +++ b/lib/CodeGen/LowLevelType.cpp @@ -21,10 +21,10 @@ using namespace llvm; LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) { if (auto VTy = dyn_cast<VectorType>(&Ty)) { auto NumElements = VTy->getNumElements(); - auto ScalarSizeInBits = VTy->getElementType()->getPrimitiveSizeInBits(); + LLT ScalarTy = getLLTForType(*VTy->getElementType(), DL); if (NumElements == 1) - return LLT::scalar(ScalarSizeInBits); - return LLT::vector(NumElements, ScalarSizeInBits); + return ScalarTy; + return LLT::vector(NumElements, ScalarTy); } else if (auto PTy = dyn_cast<PointerType>(&Ty)) { return LLT::pointer(PTy->getAddressSpace(), DL.getTypeSizeInBits(&Ty)); } else if (Ty.isSized()) { diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index c0a8b95ed8a06..4bd5fbfe38e6e 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -2351,3 +2351,31 @@ MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB, BB.insert(I, MI); return MachineInstrBuilder(MF, MI); } + +MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB, + MachineBasicBlock::iterator I, + const MachineInstr &Orig, + int FrameIndex) { + const MDNode *Var = Orig.getDebugVariable(); + auto *Expr = cast_or_null<DIExpression>(Orig.getDebugExpression()); + bool IsIndirect = Orig.isIndirectDebugValue(); + uint64_t Offset = IsIndirect ? Orig.getOperand(1).getImm() : 0; + DebugLoc DL = Orig.getDebugLoc(); + assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && + "Expected inlined-at fields to agree"); + // If the DBG_VALUE already was a memory location, add an extra + // DW_OP_deref. Otherwise just turning this from a register into a + // memory/indirect location is sufficient. + if (IsIndirect) { + SmallVector<uint64_t, 8> Ops; + Ops.push_back(dwarf::DW_OP_deref); + if (Expr) + Ops.append(Expr->elements_begin(), Expr->elements_end()); + Expr = DIExpression::get(Expr->getContext(), Ops); + } + return BuildMI(BB, I, DL, Orig.getDesc()) + .addFrameIndex(FrameIndex) + .addImm(Offset) + .addMetadata(Var) + .addMetadata(Expr); +} diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index d392c044bd716..84bd670105e10 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -2030,6 +2030,8 @@ namespace { void MachineVerifier::verifyStackFrame() { unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); + if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) + return; SmallVector<StackStateOfBB, 8> SPState; SPState.resize(MF->getNumBlockIDs()); diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp index fd759bc372b25..283d84629f8ee 100644 --- a/lib/CodeGen/RegAllocFast.cpp +++ b/lib/CodeGen/RegAllocFast.cpp @@ -304,19 +304,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, LiveDbgValueMap[LRI->VirtReg]; for (unsigned li = 0, le = LRIDbgValues.size(); li != le; ++li) { MachineInstr *DBG = LRIDbgValues[li]; - const MDNode *Var = DBG->getDebugVariable(); - const MDNode *Expr = DBG->getDebugExpression(); - bool IsIndirect = DBG->isIndirectDebugValue(); - uint64_t Offset = IsIndirect ? DBG->getOperand(1).getImm() : 0; - DebugLoc DL = DBG->getDebugLoc(); - assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && - "Expected inlined-at fields to agree"); - MachineInstr *NewDV = - BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::DBG_VALUE)) - .addFrameIndex(FI) - .addImm(Offset) - .addMetadata(Var) - .addMetadata(Expr); + MachineInstr *NewDV = buildDbgValueForSpill(*MBB, MI, *DBG, FI); assert(NewDV->getParent() == MBB && "dangling parent pointer"); (void)NewDV; DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV); diff --git a/lib/CodeGen/SafeStack.cpp b/lib/CodeGen/SafeStack.cpp index fa68411284e77..7fa379d80c6c7 100644 --- a/lib/CodeGen/SafeStack.cpp +++ b/lib/CodeGen/SafeStack.cpp @@ -550,7 +550,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack( // Replace alloc with the new location. replaceDbgDeclare(Arg, BasePointer, BasePointer->getNextNode(), DIB, - /*Deref=*/true, -Offset); + /*Deref=*/false, -Offset); Arg->replaceAllUsesWith(NewArg); IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode()); IRB.CreateMemCpy(Off, Arg, Size, Arg->getParamAlignment()); @@ -565,7 +565,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack( if (Size == 0) Size = 1; // Don't create zero-sized stack objects. - replaceDbgDeclareForAlloca(AI, BasePointer, DIB, /*Deref=*/true, -Offset); + replaceDbgDeclareForAlloca(AI, BasePointer, DIB, /*Deref=*/false, -Offset); replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset); // Replace uses of the alloca with the new location. @@ -655,7 +655,7 @@ void SafeStack::moveDynamicAllocasToUnsafeStack( if (AI->hasName() && isa<Instruction>(NewAI)) NewAI->takeName(AI); - replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true); + replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/false); AI->replaceAllUsesWith(NewAI); AI->eraseFromParent(); } diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 4d468551ae24e..4702d63cb617a 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2146,7 +2146,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { if (N->getFlags()->hasNoUnsignedWrap()) return N0; - if (DAG.MaskedValueIsZero(N1, ~APInt::getSignBit(BitWidth))) { + if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) { // N1 is either 0 or the minimum signed value. If the sub is NSW, then // N1 must be 0 because negating the minimum signed value is undefined. if (N->getFlags()->hasNoSignedWrap()) @@ -3705,7 +3705,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) { // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) // fold (and (sra)) -> (and (srl)) when possible. - if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) + if (SimplifyDemandedBits(SDValue(N, 0))) return SDValue(N, 0); // fold (zext_inreg (extload x)) -> (zextload x) @@ -4225,8 +4225,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) { return Load; // Simplify the operands using demanded-bits information. - if (!VT.isVector() && - SimplifyDemandedBits(SDValue(N, 0))) + if (SimplifyDemandedBits(SDValue(N, 0))) return SDValue(N, 0); return SDValue(); @@ -5058,8 +5057,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) { return Tmp; // Simplify the expression using non-local knowledge. - if (!VT.isVector() && - SimplifyDemandedBits(SDValue(N, 0))) + if (SimplifyDemandedBits(SDValue(N, 0))) return SDValue(N, 0); return SDValue(); @@ -5350,7 +5348,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), DAG.getConstant(c2 - c1, DL, N1.getValueType())); } else { - Mask = Mask.lshr(c1 - c2); + Mask.lshrInPlace(c1 - c2); SDLoc DL(N); Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), DAG.getConstant(c1 - c2, DL, N1.getValueType())); @@ -5660,7 +5658,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { DAG.getConstant(ShiftAmt, DL0, getShiftAmountTy(SmallVT))); AddToWorklist(SmallShift.getNode()); - APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt); + APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt); SDLoc DL(N); return DAG.getNode(ISD::AND, DL, VT, DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift), @@ -8300,11 +8298,11 @@ static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG, switch (N0.getOpcode()) { case ISD::AND: FPOpcode = ISD::FABS; - SignMask = ~APInt::getSignBit(SourceVT.getSizeInBits()); + SignMask = ~APInt::getSignMask(SourceVT.getSizeInBits()); break; case ISD::XOR: FPOpcode = ISD::FNEG; - SignMask = APInt::getSignBit(SourceVT.getSizeInBits()); + SignMask = APInt::getSignMask(SourceVT.getSizeInBits()); break; // TODO: ISD::OR --> ISD::FNABS? default: @@ -8415,7 +8413,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { assert(VT.getSizeInBits() == 128); SDValue SignBit = DAG.getConstant( - APInt::getSignBit(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64); + APInt::getSignMask(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64); SDValue FlipBit; if (N0.getOpcode() == ISD::FNEG) { FlipBit = SignBit; @@ -8435,7 +8433,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { AddToWorklist(FlipBits.getNode()); return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits); } - APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); + APInt SignBit = APInt::getSignMask(VT.getSizeInBits()); if (N0.getOpcode() == ISD::FNEG) return DAG.getNode(ISD::XOR, DL, VT, NewConv, DAG.getConstant(SignBit, DL, VT)); @@ -8483,7 +8481,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { } if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { - APInt SignBit = APInt::getSignBit(VT.getSizeInBits() / 2); + APInt SignBit = APInt::getSignMask(VT.getSizeInBits() / 2); SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); AddToWorklist(Cst.getNode()); SDValue X = DAG.getBitcast(VT, N0.getOperand(1)); @@ -8504,7 +8502,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { AddToWorklist(FlipBits.getNode()); return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits); } - APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); + APInt SignBit = APInt::getSignMask(VT.getSizeInBits()); X = DAG.getNode(ISD::AND, SDLoc(X), VT, X, DAG.getConstant(SignBit, SDLoc(X), VT)); AddToWorklist(X.getNode()); @@ -8687,7 +8685,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { for (unsigned j = 0; j != NumOutputsPerInput; ++j) { APInt ThisVal = OpVal.trunc(DstBitSize); Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT)); - OpVal = OpVal.lshr(DstBitSize); + OpVal.lshrInPlace(DstBitSize); } // For big endian targets, swap the order of the pieces of each element. @@ -10315,11 +10313,11 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) { if (N0.getValueType().isVector()) { // For a vector, get a mask such as 0x80... per scalar element // and splat it. - SignMask = APInt::getSignBit(N0.getScalarValueSizeInBits()); + SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits()); SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); } else { // For a scalar, just generate 0x80... - SignMask = APInt::getSignBit(IntVT.getSizeInBits()); + SignMask = APInt::getSignMask(IntVT.getSizeInBits()); } SDLoc DL0(N0); Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int, @@ -10420,11 +10418,11 @@ SDValue DAGCombiner::visitFABS(SDNode *N) { if (N0.getValueType().isVector()) { // For a vector, get a mask such as 0x7f... per scalar element // and splat it. - SignMask = ~APInt::getSignBit(N0.getScalarValueSizeInBits()); + SignMask = ~APInt::getSignMask(N0.getScalarValueSizeInBits()); SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); } else { // For a scalar, just generate 0x7f... - SignMask = ~APInt::getSignBit(IntVT.getSizeInBits()); + SignMask = ~APInt::getSignMask(IntVT.getSizeInBits()); } SDLoc DL(N0); Int = DAG.getNode(ISD::AND, DL, IntVT, Int, @@ -12375,6 +12373,27 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) { return LHS.OffsetFromBase < RHS.OffsetFromBase; }); + // Store Merge attempts to merge the lowest stores. This generally + // works out as if successful, as the remaining stores are checked + // after the first collection of stores is merged. However, in the + // case that a non-mergeable store is found first, e.g., {p[-2], + // p[0], p[1], p[2], p[3]}, we would fail and miss the subsequent + // mergeable cases. To prevent this, we prune such stores from the + // front of StoreNodes here. + + unsigned StartIdx = 0; + while ((StartIdx + 1 < StoreNodes.size()) && + StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes != + StoreNodes[StartIdx + 1].OffsetFromBase) + ++StartIdx; + + // Bail if we don't have enough candidates to merge. + if (StartIdx + 1 >= StoreNodes.size()) + return false; + + if (StartIdx) + StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx); + // Scan the memory operations on the chain and find the first non-consecutive // store memory address. unsigned NumConsecutiveStores = 0; @@ -12485,39 +12504,52 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) { // When extracting multiple vector elements, try to store them // in one vector store rather than a sequence of scalar stores. if (IsExtractVecSrc) { - LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; - unsigned FirstStoreAS = FirstInChain->getAddressSpace(); - unsigned FirstStoreAlign = FirstInChain->getAlignment(); - unsigned NumStoresToMerge = 0; - bool IsVec = MemVT.isVector(); - for (unsigned i = 0; i < NumConsecutiveStores; ++i) { - StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); - unsigned StoreValOpcode = St->getValue().getOpcode(); - // This restriction could be loosened. - // Bail out if any stored values are not elements extracted from a vector. - // It should be possible to handle mixed sources, but load sources need - // more careful handling (see the block of code below that handles - // consecutive loads). - if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT && - StoreValOpcode != ISD::EXTRACT_SUBVECTOR) - return false; + bool RV = false; + while (StoreNodes.size() >= 2) { + LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; + unsigned FirstStoreAS = FirstInChain->getAddressSpace(); + unsigned FirstStoreAlign = FirstInChain->getAlignment(); + unsigned NumStoresToMerge = 0; + bool IsVec = MemVT.isVector(); + for (unsigned i = 0; i < NumConsecutiveStores; ++i) { + StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); + unsigned StoreValOpcode = St->getValue().getOpcode(); + // This restriction could be loosened. + // Bail out if any stored values are not elements extracted from a + // vector. It should be possible to handle mixed sources, but load + // sources need more careful handling (see the block of code below that + // handles consecutive loads). + if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT && + StoreValOpcode != ISD::EXTRACT_SUBVECTOR) + return false; - // Find a legal type for the vector store. - unsigned Elts = i + 1; - if (IsVec) { - // When merging vector stores, get the total number of elements. - Elts *= MemVT.getVectorNumElements(); + // Find a legal type for the vector store. + unsigned Elts = i + 1; + if (IsVec) { + // When merging vector stores, get the total number of elements. + Elts *= MemVT.getVectorNumElements(); + } + EVT Ty = + EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); + bool IsFast; + if (TLI.isTypeLegal(Ty) && + TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, + FirstStoreAlign, &IsFast) && + IsFast) + NumStoresToMerge = i + 1; } - EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); - bool IsFast; - if (TLI.isTypeLegal(Ty) && - TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, - FirstStoreAlign, &IsFast) && IsFast) - NumStoresToMerge = i + 1; - } - return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumStoresToMerge, - false, true); + bool Merged = MergeStoresOfConstantsOrVecElts( + StoreNodes, MemVT, NumStoresToMerge, false, true); + if (!Merged) + break; + // Remove merged stores for next iteration. + StoreNodes.erase(StoreNodes.begin(), + StoreNodes.begin() + NumStoresToMerge); + RV = true; + NumConsecutiveStores -= NumStoresToMerge; + } + return RV; } // Below we handle the case of multiple consecutive stores that @@ -15122,9 +15154,9 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { // Extract the sub element from the constant bit mask. if (DAG.getDataLayout().isBigEndian()) { - Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits); + Bits.lshrInPlace((Split - SubIdx - 1) * NumSubBits); } else { - Bits = Bits.lshr(SubIdx * NumSubBits); + Bits.lshrInPlace(SubIdx * NumSubBits); } if (Split > 1) @@ -16004,7 +16036,7 @@ SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags) { /// Return true if base is a frame index, which is known not to alias with /// anything but itself. Provides base object and offset as results. -static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, +static bool findBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, const GlobalValue *&GV, const void *&CV) { // Assume it is a primitive operation. Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr; @@ -16057,53 +16089,56 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { return false; // Gather base node and offset information. - SDValue Base1, Base2; - int64_t Offset1, Offset2; - const GlobalValue *GV1, *GV2; - const void *CV1, *CV2; - bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(), + SDValue Base0, Base1; + int64_t Offset0, Offset1; + const GlobalValue *GV0, *GV1; + const void *CV0, *CV1; + bool IsFrameIndex0 = findBaseOffset(Op0->getBasePtr(), + Base0, Offset0, GV0, CV0); + bool IsFrameIndex1 = findBaseOffset(Op1->getBasePtr(), Base1, Offset1, GV1, CV1); - bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(), - Base2, Offset2, GV2, CV2); - // If they have a same base address then check to see if they overlap. - if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2))) - return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || - (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); + // If they have the same base address, then check to see if they overlap. + unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3; + unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3; + if (Base0 == Base1 || (GV0 && (GV0 == GV1)) || (CV0 && (CV0 == CV1))) + return !((Offset0 + NumBytes0) <= Offset1 || + (Offset1 + NumBytes1) <= Offset0); // It is possible for different frame indices to alias each other, mostly // when tail call optimization reuses return address slots for arguments. // To catch this case, look up the actual index of frame indices to compute // the real alias relationship. - if (isFrameIndex1 && isFrameIndex2) { + if (IsFrameIndex0 && IsFrameIndex1) { MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); + Offset0 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base0)->getIndex()); Offset1 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex()); - Offset2 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex()); - return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || - (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); + return !((Offset0 + NumBytes0) <= Offset1 || + (Offset1 + NumBytes1) <= Offset0); } // Otherwise, if we know what the bases are, and they aren't identical, then // we know they cannot alias. - if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) + if ((IsFrameIndex0 || CV0 || GV0) && (IsFrameIndex1 || CV1 || GV1)) return false; // If we know required SrcValue1 and SrcValue2 have relatively large alignment // compared to the size and offset of the access, we may be able to prove they - // do not alias. This check is conservative for now to catch cases created by + // do not alias. This check is conservative for now to catch cases created by // splitting vector types. - if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) && - (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) && - (Op0->getMemoryVT().getSizeInBits() >> 3 == - Op1->getMemoryVT().getSizeInBits() >> 3) && - (Op0->getOriginalAlignment() > (Op0->getMemoryVT().getSizeInBits() >> 3))) { - int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment(); - int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment(); + int64_t SrcValOffset0 = Op0->getSrcValueOffset(); + int64_t SrcValOffset1 = Op1->getSrcValueOffset(); + unsigned OrigAlignment0 = Op0->getOriginalAlignment(); + unsigned OrigAlignment1 = Op1->getOriginalAlignment(); + if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 && + NumBytes0 == NumBytes1 && OrigAlignment0 > NumBytes0) { + int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0; + int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1; // There is no overlap between these relatively aligned accesses of similar - // size, return no alias. - if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 || - (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1) + // size. Return no alias. + if ((OffAlign0 + NumBytes0) <= OffAlign1 || + (OffAlign1 + NumBytes1) <= OffAlign0) return false; } @@ -16115,19 +16150,17 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) UseAA = false; #endif + if (UseAA && Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) { // Use alias analysis information. - int64_t MinOffset = std::min(Op0->getSrcValueOffset(), - Op1->getSrcValueOffset()); - int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) + - Op0->getSrcValueOffset() - MinOffset; - int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) + - Op1->getSrcValueOffset() - MinOffset; + int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1); + int64_t Overlap0 = NumBytes0 + SrcValOffset0 - MinOffset; + int64_t Overlap1 = NumBytes1 + SrcValOffset1 - MinOffset; AliasResult AAResult = - AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1, + AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap0, UseTBAA ? Op0->getAAInfo() : AAMDNodes()), - MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2, + MemoryLocation(Op1->getMemOperand()->getValue(), Overlap1, UseTBAA ? Op1->getAAInfo() : AAMDNodes())); if (AAResult == NoAlias) return false; diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 0584ab9f60d1b..6fb26fc3b73d5 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1164,9 +1164,11 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { "Expected inlined-at fields to agree"); if (Op->isReg()) { Op->setIsDebug(true); + // A dbg.declare describes the address of a source variable, so lower it + // into an indirect DBG_VALUE. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0, - DI->getVariable(), DI->getExpression()); + TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, + Op->getReg(), 0, DI->getVariable(), DI->getExpression()); } else BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::DBG_VALUE)) diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index fc7cd020fe2e3..3bae3bf9ab7cf 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1343,7 +1343,7 @@ void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State, // Convert to an integer of the same size. if (TLI.isTypeLegal(IVT)) { State.IntValue = DAG.getNode(ISD::BITCAST, DL, IVT, Value); - State.SignMask = APInt::getSignBit(NumBits); + State.SignMask = APInt::getSignMask(NumBits); State.SignBit = NumBits - 1; return; } @@ -2984,7 +2984,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { EVT NVT = Node->getValueType(0); APFloat apf(DAG.EVTToAPFloatSemantics(VT), APInt::getNullValue(VT.getSizeInBits())); - APInt x = APInt::getSignBit(NVT.getSizeInBits()); + APInt x = APInt::getSignMask(NVT.getSizeInBits()); (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); Tmp1 = DAG.getConstantFP(apf, dl, VT); Tmp2 = DAG.getSetCC(dl, getSetCCResultType(VT), diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index 6f2b1b94ce465..c1cb5d9b5235e 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -72,7 +72,7 @@ bool DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) { case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break; case ISD::ConstantFP: R = SoftenFloatRes_ConstantFP(N, ResNo); break; case ISD::EXTRACT_VECTOR_ELT: - R = SoftenFloatRes_EXTRACT_VECTOR_ELT(N); break; + R = SoftenFloatRes_EXTRACT_VECTOR_ELT(N, ResNo); break; case ISD::FABS: R = SoftenFloatRes_FABS(N, ResNo); break; case ISD::FMINNUM: R = SoftenFloatRes_FMINNUM(N); break; case ISD::FMAXNUM: R = SoftenFloatRes_FMAXNUM(N); break; @@ -171,7 +171,10 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(SDNode *N, unsigned ResNo) { } } -SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N, unsigned ResNo) { + // When LegalInHWReg, keep the extracted value in register. + if (isLegalInHWReg(N->getValueType(ResNo))) + return SDValue(N, ResNo); SDValue NewOp = BitConvertVectorToIntegerVector(N->getOperand(0)); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NewOp.getValueType().getVectorElementType(), diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index 0a2b680e1c66e..154af46c94464 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -925,9 +925,9 @@ SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) { assert(Op.getValueType().isVector() && "Only applies to vectors!"); unsigned EltWidth = Op.getScalarValueSizeInBits(); EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth); - unsigned NumElts = Op.getValueType().getVectorNumElements(); + auto EltCnt = Op.getValueType().getVectorElementCount(); return DAG.getNode(ISD::BITCAST, SDLoc(Op), - EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op); + EVT::getVectorVT(*DAG.getContext(), EltNVT, EltCnt), Op); } SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op, diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h index 80c939700518f..af55a22972a61 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -428,7 +428,7 @@ private: SDValue SoftenFloatRes_BITCAST(SDNode *N, unsigned ResNo); SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N); SDValue SoftenFloatRes_ConstantFP(SDNode *N, unsigned ResNo); - SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N); + SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N, unsigned ResNo); SDValue SoftenFloatRes_FABS(SDNode *N, unsigned ResNo); SDValue SoftenFloatRes_FMINNUM(SDNode *N); SDValue SoftenFloatRes_FMAXNUM(SDNode *N); diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 78fddb5ce8f58..1a7d7b7af5fa1 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1293,12 +1293,9 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo, if ((NumElements & 1) == 0 && SrcVT.getSizeInBits() * 2 < DestVT.getSizeInBits()) { LLVMContext &Ctx = *DAG.getContext(); - EVT NewSrcVT = EVT::getVectorVT( - Ctx, EVT::getIntegerVT( - Ctx, SrcVT.getScalarSizeInBits() * 2), - NumElements); - EVT SplitSrcVT = - EVT::getVectorVT(Ctx, SrcVT.getVectorElementType(), NumElements / 2); + EVT NewSrcVT = SrcVT.widenIntegerVectorElementType(Ctx); + EVT SplitSrcVT = SrcVT.getHalfNumVectorElementsVT(Ctx); + EVT SplitLoVT, SplitHiVT; std::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT); if (TLI.isTypeLegal(SrcVT) && !TLI.isTypeLegal(SplitSrcVT) && @@ -3012,8 +3009,8 @@ SDValue DAGTypeLegalizer::WidenVSELECTAndMask(SDNode *N) { // Don't touch if this will be scalarized. EVT FinalVT = VSelVT; while (getTypeAction(FinalVT) == TargetLowering::TypeSplitVector) - FinalVT = EVT::getVectorVT(Ctx, FinalVT.getVectorElementType(), - FinalVT.getVectorNumElements() / 2); + FinalVT = FinalVT.getHalfNumVectorElementsVT(Ctx); + if (FinalVT.getVectorNumElements() == 1) return SDValue(); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 003ea5030bfce..523f409e6b2cb 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -639,12 +639,15 @@ void SelectionDAG::DeallocateNode(SDNode *N) { // If we have operands, deallocate them. removeOperands(N); + NodeAllocator.Deallocate(AllNodes.remove(N)); + // Set the opcode to DELETED_NODE to help catch bugs when node // memory is reallocated. + // FIXME: There are places in SDag that have grown a dependency on the opcode + // value in the released node. + __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); N->NodeType = ISD::DELETED_NODE; - NodeAllocator.Deallocate(AllNodes.remove(N)); - // If any of the SDDbgValue nodes refer to this SDNode, invalidate // them and forget about that node. DbgInfo->erase(N); @@ -1826,7 +1829,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); - return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout())); + return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); } SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { @@ -1839,7 +1842,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); - return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout())); + return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); } SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, @@ -1955,7 +1958,7 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, /// use this predicate to simplify operations downstream. bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { unsigned BitWidth = Op.getScalarValueSizeInBits(); - return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); + return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); } /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use @@ -2330,8 +2333,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, Depth + 1); - KnownZero = KnownZero.lshr(*ShAmt); - KnownOne = KnownOne.lshr(*ShAmt); + KnownZero.lshrInPlace(*ShAmt); + KnownOne.lshrInPlace(*ShAmt); // High bits are known zero. KnownZero.setHighBits(ShAmt->getZExtValue()); } @@ -2340,15 +2343,15 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, Depth + 1); - KnownZero = KnownZero.lshr(*ShAmt); - KnownOne = KnownOne.lshr(*ShAmt); + KnownZero.lshrInPlace(*ShAmt); + KnownOne.lshrInPlace(*ShAmt); // If we know the value of the sign bit, then we know it is copied across // the high bits by the shift amount. - APInt SignBit = APInt::getSignBit(BitWidth); - SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask. - if (KnownZero.intersects(SignBit)) { + APInt SignMask = APInt::getSignMask(BitWidth); + SignMask.lshrInPlace(*ShAmt); // Adjust to where it is now in the mask. + if (KnownZero.intersects(SignMask)) { KnownZero.setHighBits(ShAmt->getZExtValue());// New bits are known zero. - } else if (KnownOne.intersects(SignBit)) { + } else if (KnownOne.intersects(SignMask)) { KnownOne.setHighBits(ShAmt->getZExtValue()); // New bits are known one. } } @@ -2361,14 +2364,14 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, // present in the input. APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); - APInt InSignBit = APInt::getSignBit(EBits); + APInt InSignMask = APInt::getSignMask(EBits); APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); // If the sign extended bits are demanded, we know that the sign // bit is demanded. - InSignBit = InSignBit.zext(BitWidth); + InSignMask = InSignMask.zext(BitWidth); if (NewBits.getBoolValue()) - InputDemandedBits |= InSignBit; + InputDemandedBits |= InSignMask; computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, Depth + 1); @@ -2377,10 +2380,10 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, // If the sign bit of the input is known set or clear, then we know the // top bits of the result. - if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear + if (KnownZero.intersects(InSignMask)) { // Input sign bit known clear KnownZero |= NewBits; KnownOne &= ~NewBits; - } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set + } else if (KnownOne.intersects(InSignMask)) { // Input sign bit known set KnownOne |= NewBits; KnownZero &= ~NewBits; } else { // Input sign bit unknown @@ -2745,7 +2748,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, // a set bit that isn't the sign bit (otherwise it could be INT_MIN). KnownOne2.clearBit(BitWidth - 1); if (KnownOne2.getBoolValue()) { - KnownZero = APInt::getSignBit(BitWidth); + KnownZero = APInt::getSignMask(BitWidth); break; } break; @@ -2833,7 +2836,7 @@ SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, computeKnownBits(N0, N0Zero, N0One); bool overflow; - (~N0Zero).uadd_ov(~N1Zero, overflow); + (void)(~N0Zero).uadd_ov(~N1Zero, overflow); if (!overflow) return OFK_Never; } @@ -2874,7 +2877,7 @@ bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { // one bit set. if (Val.getOpcode() == ISD::SRL) { auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0)); - if (C && C->getAPIntValue().isSignBit()) + if (C && C->getAPIntValue().isSignMask()) return true; } @@ -2967,7 +2970,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, return std::max(Tmp, Tmp2); case ISD::SRA: - Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); + Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); // SRA X, C -> adds C sign bits. if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { APInt ShiftVal = C->getAPIntValue(); @@ -3130,40 +3133,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, // result. Otherwise it gives either negative or > bitwidth result return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); } - case ISD::INSERT_VECTOR_ELT: { - SDValue InVec = Op.getOperand(0); - SDValue InVal = Op.getOperand(1); - SDValue EltNo = Op.getOperand(2); - unsigned NumElts = InVec.getValueType().getVectorNumElements(); - - ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); - if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { - // If we know the element index, split the demand between the - // source vector and the inserted element. - unsigned EltIdx = CEltNo->getZExtValue(); - - // If we demand the inserted element then get its sign bits. - Tmp = UINT_MAX; - if (DemandedElts[EltIdx]) - Tmp = ComputeNumSignBits(InVal, Depth + 1); - - // If we demand the source vector then get its sign bits, and determine - // the minimum. - APInt VectorElts = DemandedElts; - VectorElts.clearBit(EltIdx); - if (!!VectorElts) { - Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); - Tmp = std::min(Tmp, Tmp2); - } - } else { - // Unknown element index, so ignore DemandedElts and demand them all. - Tmp = ComputeNumSignBits(InVec, Depth + 1); - Tmp2 = ComputeNumSignBits(InVal, Depth + 1); - Tmp = std::min(Tmp, Tmp2); - } - assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); - return Tmp; - } case ISD::EXTRACT_VECTOR_ELT: { SDValue InVec = Op.getOperand(0); SDValue EltNo = Op.getOperand(1); @@ -7607,14 +7576,11 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { // Currently all types are split in half. EVT LoVT, HiVT; - if (!VT.isVector()) { + if (!VT.isVector()) LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); - } else { - unsigned NumElements = VT.getVectorNumElements(); - assert(!(NumElements & 1) && "Splitting vector, but not in half!"); - LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), - NumElements/2); - } + else + LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); + return std::make_pair(LoVT, HiVT); } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 8708f58f1e632..2c58953ee9089 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1151,7 +1151,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { FuncInfo.StaticAllocaMap.find(AI); if (SI != FuncInfo.StaticAllocaMap.end()) return DAG.getFrameIndex(SI->second, - TLI.getPointerTy(DAG.getDataLayout())); + TLI.getFrameIndexTy(DAG.getDataLayout())); } // If this is an instruction which fast-isel has deferred, select it now. @@ -4674,7 +4674,7 @@ static unsigned getUnderlyingArgReg(const SDValue &N) { /// At the end of instruction selection, they will be inserted to the entry BB. bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( const Value *V, DILocalVariable *Variable, DIExpression *Expr, - DILocation *DL, int64_t Offset, bool IsIndirect, const SDValue &N) { + DILocation *DL, int64_t Offset, bool IsDbgDeclare, const SDValue &N) { const Argument *Arg = dyn_cast<Argument>(V); if (!Arg) return false; @@ -4688,6 +4688,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction())) return false; + bool IsIndirect = false; Optional<MachineOperand> Op; // Some arguments' frame index is recorded during argument lowering. if (int FI = FuncInfo.getArgumentFrameIndex(Arg)) @@ -4701,15 +4702,19 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( if (PR) Reg = PR; } - if (Reg) + if (Reg) { Op = MachineOperand::CreateReg(Reg, false); + IsIndirect = IsDbgDeclare; + } } if (!Op) { // Check if ValueMap has reg number. DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); - if (VMI != FuncInfo.ValueMap.end()) + if (VMI != FuncInfo.ValueMap.end()) { Op = MachineOperand::CreateReg(VMI->second, false); + IsIndirect = IsDbgDeclare; + } } if (!Op && N.getNode()) @@ -4955,8 +4960,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { } else if (isa<Argument>(Address)) { // Address is an argument, so try to emit its dbg value using // virtual register info from the FuncInfo.ValueMap. - EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false, - N); + EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true, N); return nullptr; } else { SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(), @@ -4966,7 +4970,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { } else { // If Address is an argument then try to emit its dbg value using // virtual register info from the FuncInfo.ValueMap. - if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false, + if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true, N)) { // If variable is pinned by a alloca in dominating bb then // use StaticAllocaMap. @@ -5613,7 +5617,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { SDValue Ops[2]; Ops[0] = getRoot(); Ops[1] = - DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()), true); + DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true); unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END); Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops); @@ -6626,7 +6630,7 @@ static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, unsigned Align = DL.getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false); - SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy(DL)); + SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL)); Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot, MachinePointerInfo::getFixedStack(MF, SSFI)); OpInfo.CallOperand = StackSlot; @@ -7389,7 +7393,7 @@ static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx, } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) { const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); Ops.push_back(Builder.DAG.getTargetFrameIndex( - FI->getIndex(), TLI.getPointerTy(Builder.DAG.getDataLayout()))); + FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout()))); } else Ops.push_back(OpVal); } @@ -7657,7 +7661,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false); Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy); - DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy(DL)); + DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL)); ArgListEntry Entry; Entry.Node = DemoteStackSlot; Entry.Ty = StackSlotPtrType; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index c6acc09b66028..9e34590cc39c6 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -928,7 +928,7 @@ private: /// instruction selection, they will be inserted to the entry BB. bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable, DIExpression *Expr, DILocation *DL, - int64_t Offset, bool IsIndirect, + int64_t Offset, bool IsDbgDeclare, const SDValue &N); /// Return the next block after MBB, or nullptr if there is none. diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 2756e276c6a91..93c6738f650d4 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -574,7 +574,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, // using the bits from the RHS. Below, we use knowledge about the RHS to // simplify the LHS, here we're using information from the LHS to simplify // the RHS. - if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { + if (ConstantSDNode *RHSC = isConstOrConstSplat(Op.getOperand(1))) { SDValue Op0 = Op.getOperand(0); APInt LHSZero, LHSOne; // Do not increment Depth here; that can cause an infinite loop. @@ -715,7 +715,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, // If the RHS is a constant, see if we can simplify it. // for XOR, we prefer to force bits to 1 if they will make a -1. // If we can't force bits, try to shrink the constant. - if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { + if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { APInt Expanded = C->getAPIntValue() | (~NewMask); // If we can expand it to have all bits set, do it. if (Expanded.isAllOnesValue()) { @@ -778,7 +778,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, // If (1) we only need the sign-bit, (2) the setcc operands are the same // width as the setcc result, and (3) the result of a setcc conforms to 0 or // -1, we may be able to bypass the setcc. - if (NewMask.isSignBit() && Op0.getScalarValueSizeInBits() == BitWidth && + if (NewMask.isSignMask() && Op0.getScalarValueSizeInBits() == BitWidth && getBooleanContents(Op.getValueType()) == BooleanContent::ZeroOrNegativeOneBooleanContent) { // If we're testing X < 0, then this compare isn't needed - just use X! @@ -839,7 +839,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, SDValue InnerOp = InOp.getNode()->getOperand(0); EVT InnerVT = InnerOp.getValueType(); unsigned InnerBits = InnerVT.getSizeInBits(); - if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 && + if (ShAmt < InnerBits && NewMask.getActiveBits() <= InnerBits && isTypeDesirableForOp(ISD::SHL, InnerVT)) { EVT ShTy = getShiftAmountTy(InnerVT, DL); if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) @@ -861,12 +861,12 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, InnerOp.getOpcode() == ISD::SRL && InnerOp.hasOneUse() && isa<ConstantSDNode>(InnerOp.getOperand(1))) { - uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1)) + unsigned InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1)) ->getZExtValue(); if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && - NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 && - NewMask.trunc(ShAmt) == 0) { + NewMask.getActiveBits() <= (InnerBits - InnerShAmt + ShAmt) && + NewMask.countTrailingZeros() >= ShAmt) { SDValue NewSA = TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, Op.getOperand(1).getValueType()); @@ -929,8 +929,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, KnownZero, KnownOne, TLO, Depth+1)) return true; assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - KnownZero = KnownZero.lshr(ShAmt); - KnownOne = KnownOne.lshr(ShAmt); + KnownZero.lshrInPlace(ShAmt); + KnownOne.lshrInPlace(ShAmt); KnownZero.setHighBits(ShAmt); // High bits known zero. } @@ -964,21 +964,21 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, // demand the input sign bit. APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); if (HighBits.intersects(NewMask)) - InDemandedMask |= APInt::getSignBit(VT.getScalarSizeInBits()); + InDemandedMask |= APInt::getSignMask(VT.getScalarSizeInBits()); if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne, TLO, Depth+1)) return true; assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - KnownZero = KnownZero.lshr(ShAmt); - KnownOne = KnownOne.lshr(ShAmt); + KnownZero.lshrInPlace(ShAmt); + KnownOne.lshrInPlace(ShAmt); // Handle the sign bit, adjusted to where it is now in the mask. - APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); + APInt SignMask = APInt::getSignMask(BitWidth).lshr(ShAmt); // If the input sign bit is known to be zero, or if none of the top bits // are demanded, turn this into an unsigned shift right. - if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { + if (KnownZero.intersects(SignMask) || (HighBits & ~NewMask) == HighBits) { SDNodeFlags Flags; Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact()); return TLO.CombineTo(Op, @@ -996,7 +996,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, Op.getOperand(0), NewSA)); } - if (KnownOne.intersects(SignBit)) + if (KnownOne.intersects(SignMask)) // New bits are known one. KnownOne |= HighBits; } @@ -1040,7 +1040,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, return TLO.CombineTo(Op, Op.getOperand(0)); APInt InSignBit = - APInt::getSignBit(ExVT.getScalarSizeInBits()).zext(BitWidth); + APInt::getSignMask(ExVT.getScalarSizeInBits()).zext(BitWidth); APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, ExVT.getScalarSizeInBits()) & @@ -1205,20 +1205,23 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, getShiftAmountTy(Op.getValueType(), DL)); } - APInt HighBits = APInt::getHighBitsSet(OperandBitWidth, - OperandBitWidth - BitWidth); - HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth); - - if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { - // None of the shifted in bits are needed. Add a truncate of the - // shift input, then shift it. - SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl, - Op.getValueType(), - In.getOperand(0)); - return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, - Op.getValueType(), - NewTrunc, - Shift)); + if (ShAmt->getZExtValue() < BitWidth) { + APInt HighBits = APInt::getHighBitsSet(OperandBitWidth, + OperandBitWidth - BitWidth); + HighBits.lshrInPlace(ShAmt->getZExtValue()); + HighBits = HighBits.trunc(BitWidth); + + if (!(HighBits & NewMask)) { + // None of the shifted in bits are needed. Add a truncate of the + // shift input, then shift it. + SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl, + Op.getValueType(), + In.getOperand(0)); + return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, + Op.getValueType(), + NewTrunc, + Shift)); + } } break; } @@ -1247,7 +1250,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, if (!TLO.LegalOperations() && !Op.getValueType().isVector() && !Op.getOperand(0).getValueType().isVector() && - NewMask == APInt::getSignBit(Op.getValueSizeInBits()) && + NewMask == APInt::getSignMask(Op.getValueSizeInBits()) && Op.getOperand(0).getValueType().isFloatingPoint()) { bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType()); bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); @@ -2055,7 +2058,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, } else { ShiftBits = C1.countTrailingZeros(); } - NewC = NewC.lshr(ShiftBits); + NewC.lshrInPlace(ShiftBits); if (ShiftBits && NewC.getMinSignedBits() <= 64 && isLegalICmpImmediate(NewC.getSExtValue())) { auto &DL = DAG.getDataLayout(); @@ -3353,7 +3356,7 @@ bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); SDValue Bias = DAG.getConstant(127, dl, IntVT); - SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl, + SDValue SignMask = DAG.getConstant(APInt::getSignMask(VT.getSizeInBits()), dl, IntVT); SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT); SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp index cbce2dc89debe..bbb19b5e998da 100644 --- a/lib/DebugInfo/DWARF/DWARFContext.cpp +++ b/lib/DebugInfo/DWARF/DWARFContext.cpp @@ -579,7 +579,7 @@ DWARFContext::getInliningInfoForAddress(uint64_t Address, return InliningInfo; } - uint32_t CallFile = 0, CallLine = 0, CallColumn = 0; + uint32_t CallFile = 0, CallLine = 0, CallColumn = 0, CallDiscriminator = 0; for (uint32_t i = 0, n = InlinedChain.size(); i != n; i++) { DWARFDie &FunctionDIE = InlinedChain[i]; DILineInfo Frame; @@ -605,10 +605,12 @@ DWARFContext::getInliningInfoForAddress(uint64_t Address, Spec.FLIKind, Frame.FileName); Frame.Line = CallLine; Frame.Column = CallColumn; + Frame.Discriminator = CallDiscriminator; } // Get call file/line/column of a current DIE. if (i + 1 < n) { - FunctionDIE.getCallerFrame(CallFile, CallLine, CallColumn); + FunctionDIE.getCallerFrame(CallFile, CallLine, CallColumn, + CallDiscriminator); } } InliningInfo.addFrame(Frame); diff --git a/lib/DebugInfo/DWARF/DWARFDie.cpp b/lib/DebugInfo/DWARF/DWARFDie.cpp index 4308cc2e26396..24039eb35209a 100644 --- a/lib/DebugInfo/DWARF/DWARFDie.cpp +++ b/lib/DebugInfo/DWARF/DWARFDie.cpp @@ -290,10 +290,12 @@ uint64_t DWARFDie::getDeclLine() const { } void DWARFDie::getCallerFrame(uint32_t &CallFile, uint32_t &CallLine, - uint32_t &CallColumn) const { + uint32_t &CallColumn, + uint32_t &CallDiscriminator) const { CallFile = toUnsigned(find(DW_AT_call_file), 0); CallLine = toUnsigned(find(DW_AT_call_line), 0); CallColumn = toUnsigned(find(DW_AT_call_column), 0); + CallDiscriminator = toUnsigned(find(DW_AT_GNU_discriminator), 0); } void DWARFDie::dump(raw_ostream &OS, unsigned RecurseDepth, @@ -350,32 +352,6 @@ void DWARFDie::dump(raw_ostream &OS, unsigned RecurseDepth, } } -void DWARFDie::getInlinedChainForAddress( - const uint64_t Address, SmallVectorImpl<DWARFDie> &InlinedChain) const { - if (isNULL()) - return; - DWARFDie DIE(*this); - while (DIE) { - // Append current DIE to inlined chain only if it has correct tag - // (e.g. it is not a lexical block). - if (DIE.isSubroutineDIE()) - InlinedChain.push_back(DIE); - - // Try to get child which also contains provided address. - DWARFDie Child = DIE.getFirstChild(); - while (Child) { - if (Child.addressRangeContainsAddress(Address)) { - // Assume there is only one such child. - break; - } - Child = Child.getSibling(); - } - DIE = Child; - } - // Reverse the obtained chain to make the root of inlined chain last. - std::reverse(InlinedChain.begin(), InlinedChain.end()); -} - DWARFDie DWARFDie::getParent() const { if (isValid()) return U->getParent(Die); diff --git a/lib/DebugInfo/DWARF/DWARFUnit.cpp b/lib/DebugInfo/DWARF/DWARFUnit.cpp index 4ee8e8f46d2eb..c3f467745402f 100644 --- a/lib/DebugInfo/DWARF/DWARFUnit.cpp +++ b/lib/DebugInfo/DWARF/DWARFUnit.cpp @@ -343,37 +343,63 @@ void DWARFUnit::collectAddressRanges(DWARFAddressRangesVector &CURanges) { clearDIEs(true); } -DWARFDie -DWARFUnit::getSubprogramForAddress(uint64_t Address) { - extractDIEsIfNeeded(false); - for (const DWARFDebugInfoEntry &D : DieArray) { - DWARFDie DIE(this, &D); - if (DIE.isSubprogramDIE() && - DIE.addressRangeContainsAddress(Address)) { - return DIE; +void DWARFUnit::updateAddressDieMap(DWARFDie Die) { + if (Die.isSubroutineDIE()) { + for (const auto &R : Die.getAddressRanges()) { + // Ignore 0-sized ranges. + if (R.first == R.second) + continue; + auto B = AddrDieMap.upper_bound(R.first); + if (B != AddrDieMap.begin() && R.first < (--B)->second.first) { + // The range is a sub-range of existing ranges, we need to split the + // existing range. + if (R.second < B->second.first) + AddrDieMap[R.second] = B->second; + if (R.first > B->first) + AddrDieMap[B->first].first = R.first; + } + AddrDieMap[R.first] = std::make_pair(R.second, Die); } } - return DWARFDie(); + // Parent DIEs are added to the AddrDieMap prior to the Children DIEs to + // simplify the logic to update AddrDieMap. The child's range will always + // be equal or smaller than the parent's range. With this assumption, when + // adding one range into the map, it will at most split a range into 3 + // sub-ranges. + for (DWARFDie Child = Die.getFirstChild(); Child; Child = Child.getSibling()) + updateAddressDieMap(Child); +} + +DWARFDie DWARFUnit::getSubroutineForAddress(uint64_t Address) { + extractDIEsIfNeeded(false); + if (AddrDieMap.empty()) + updateAddressDieMap(getUnitDIE()); + auto R = AddrDieMap.upper_bound(Address); + if (R == AddrDieMap.begin()) + return DWARFDie(); + // upper_bound's previous item contains Address. + --R; + if (Address >= R->second.first) + return DWARFDie(); + return R->second.second; } void DWARFUnit::getInlinedChainForAddress(uint64_t Address, SmallVectorImpl<DWARFDie> &InlinedChain) { - // First, find a subprogram that contains the given address (the root - // of inlined chain). - DWARFDie SubprogramDIE; + assert(InlinedChain.empty()); // Try to look for subprogram DIEs in the DWO file. parseDWO(); - if (DWO) - SubprogramDIE = DWO->getUnit()->getSubprogramForAddress(Address); - else - SubprogramDIE = getSubprogramForAddress(Address); - - // Get inlined chain rooted at this subprogram DIE. - if (SubprogramDIE) - SubprogramDIE.getInlinedChainForAddress(Address, InlinedChain); - else - InlinedChain.clear(); + // First, find the subroutine that contains the given address (the leaf + // of inlined chain). + DWARFDie SubroutineDIE = + (DWO ? DWO->getUnit() : this)->getSubroutineForAddress(Address); + + while (SubroutineDIE) { + if (SubroutineDIE.isSubroutineDIE()) + InlinedChain.push_back(SubroutineDIE); + SubroutineDIE = SubroutineDIE.getParent(); + } } const DWARFUnitIndex &llvm::getDWARFUnitIndex(DWARFContext &Context, diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp index e29e9fc2c702e..10b4e98b6079a 100644 --- a/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1580,7 +1580,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, GenericValue Elt; Elt.IntVal = Elt.IntVal.zext(SrcBitSize); Elt.IntVal = TempSrc.AggregateVal[i].IntVal; - Elt.IntVal = Elt.IntVal.lshr(ShiftAmt); + Elt.IntVal.lshrInPlace(ShiftAmt); // it could be DstBitSize == SrcBitSize, so check it if (DstBitSize < SrcBitSize) Elt.IntVal = Elt.IntVal.trunc(DstBitSize); diff --git a/lib/Fuzzer/FuzzerDriver.cpp b/lib/Fuzzer/FuzzerDriver.cpp index 0fb83ca64de61..b85ba210afb3b 100644 --- a/lib/Fuzzer/FuzzerDriver.cpp +++ b/lib/Fuzzer/FuzzerDriver.cpp @@ -289,6 +289,66 @@ static std::string GetDedupTokenFromFile(const std::string &Path) { return S.substr(Beg, End - Beg); } +int CleanseCrashInput(const std::vector<std::string> &Args, + const FuzzingOptions &Options) { + if (Inputs->size() != 1 || !Flags.exact_artifact_path) { + Printf("ERROR: -cleanse_crash should be given one input file and" + " -exact_artifact_path\n"); + exit(1); + } + std::string InputFilePath = Inputs->at(0); + std::string OutputFilePath = Flags.exact_artifact_path; + std::string BaseCmd = + CloneArgsWithoutX(Args, "cleanse_crash", "cleanse_crash"); + + auto InputPos = BaseCmd.find(" " + InputFilePath + " "); + assert(InputPos != std::string::npos); + BaseCmd.erase(InputPos, InputFilePath.size() + 1); + + auto LogFilePath = DirPlusFile( + TmpDir(), "libFuzzerTemp." + std::to_string(GetPid()) + ".txt"); + auto TmpFilePath = DirPlusFile( + TmpDir(), "libFuzzerTemp." + std::to_string(GetPid()) + ".repro"); + auto LogFileRedirect = " > " + LogFilePath + " 2>&1 "; + + auto Cmd = BaseCmd + " " + TmpFilePath + LogFileRedirect; + + std::string CurrentFilePath = InputFilePath; + auto U = FileToVector(CurrentFilePath); + size_t Size = U.size(); + + const std::vector<uint8_t> ReplacementBytes = {' ', 0xff}; + for (int NumAttempts = 0; NumAttempts < 5; NumAttempts++) { + bool Changed = false; + for (size_t Idx = 0; Idx < Size; Idx++) { + Printf("CLEANSE[%d]: Trying to replace byte %zd of %zd\n", NumAttempts, + Idx, Size); + uint8_t OriginalByte = U[Idx]; + if (ReplacementBytes.end() != std::find(ReplacementBytes.begin(), + ReplacementBytes.end(), + OriginalByte)) + continue; + for (auto NewByte : ReplacementBytes) { + U[Idx] = NewByte; + WriteToFile(U, TmpFilePath); + auto ExitCode = ExecuteCommand(Cmd); + RemoveFile(TmpFilePath); + if (!ExitCode) { + U[Idx] = OriginalByte; + } else { + Changed = true; + Printf("CLEANSE: Replaced byte %zd with 0x%x\n", Idx, NewByte); + WriteToFile(U, OutputFilePath); + break; + } + } + } + if (!Changed) break; + } + RemoveFile(LogFilePath); + return 0; +} + int MinimizeCrashInput(const std::vector<std::string> &Args, const FuzzingOptions &Options) { if (Inputs->size() != 1) { @@ -516,7 +576,6 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { Options.PreferSmall = Flags.prefer_small; Options.ReloadIntervalSec = Flags.reload; Options.OnlyASCII = Flags.only_ascii; - Options.OutputCSV = Flags.output_csv; Options.DetectLeaks = Flags.detect_leaks; Options.TraceMalloc = Flags.trace_malloc; Options.RssLimitMb = Flags.rss_limit_mb; @@ -583,6 +642,9 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { if (Flags.minimize_crash_internal_step) return MinimizeCrashInputInternalStep(F, Corpus); + if (Flags.cleanse_crash) + return CleanseCrashInput(Args, Options); + if (auto Name = Flags.run_equivalence_server) { SMR.Destroy(Name); if (!SMR.Create(Name)) { diff --git a/lib/Fuzzer/FuzzerFlags.def b/lib/Fuzzer/FuzzerFlags.def index 28bf0ca8ce691..0a1ff1b1df6a5 100644 --- a/lib/Fuzzer/FuzzerFlags.def +++ b/lib/Fuzzer/FuzzerFlags.def @@ -48,7 +48,15 @@ FUZZER_FLAG_STRING(load_coverage_summary, "Experimental:" " Used with -merge=1") FUZZER_FLAG_INT(minimize_crash, 0, "If 1, minimizes the provided" " crash input. Use with -runs=N or -max_total_time=N to limit " - "the number attempts") + "the number attempts." + " Use with -exact_artifact_path to specify the output." + " Combine with ASAN_OPTIONS=dedup_token_length=3 (or similar) to ensure that" + " the minimized input triggers the same crash." + ) +FUZZER_FLAG_INT(cleanse_crash, 0, "If 1, tries to cleanse the provided" + " crash input to make it contain fewer original bytes." + " Use with -exact_artifact_path to specify the output." + ) FUZZER_FLAG_INT(minimize_crash_internal_step, 0, "internal flag") FUZZER_FLAG_INT(use_counters, 1, "Use coverage counters") FUZZER_FLAG_INT(use_indir_calls, 1, "Use indirect caller-callee counters") @@ -80,7 +88,6 @@ FUZZER_FLAG_STRING(exact_artifact_path, "as $(exact_artifact_path). This overrides -artifact_prefix " "and will not use checksum in the file name. Do not " "use the same path for several parallel processes.") -FUZZER_FLAG_INT(output_csv, 0, "Enable pulse output in CSV format.") FUZZER_FLAG_INT(print_pcs, 0, "If 1, print out newly covered PCs.") FUZZER_FLAG_INT(print_final_stats, 0, "If 1, print statistics at exit.") FUZZER_FLAG_INT(print_corpus_stats, 0, @@ -124,3 +131,4 @@ FUZZER_DEPRECATED_FLAG(sync_timeout) FUZZER_DEPRECATED_FLAG(test_single_input) FUZZER_DEPRECATED_FLAG(drill) FUZZER_DEPRECATED_FLAG(truncate_units) +FUZZER_DEPRECATED_FLAG(output_csv) diff --git a/lib/Fuzzer/FuzzerLoop.cpp b/lib/Fuzzer/FuzzerLoop.cpp index 704092896eb66..4e4def8cb87e9 100644 --- a/lib/Fuzzer/FuzzerLoop.cpp +++ b/lib/Fuzzer/FuzzerLoop.cpp @@ -253,17 +253,6 @@ void Fuzzer::RssLimitCallback() { void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units) { size_t ExecPerSec = execPerSec(); - if (Options.OutputCSV) { - static bool csvHeaderPrinted = false; - if (!csvHeaderPrinted) { - csvHeaderPrinted = true; - Printf("runs,block_cov,bits,cc_cov,corpus,execs_per_sec,tbms,reason\n"); - } - Printf("%zd,%zd,%zd,%zd,%s\n", TotalNumberOfRuns, - TPC.GetTotalPCCoverage(), - Corpus.size(), ExecPerSec, Where); - } - if (!Options.Verbosity) return; Printf("#%zd\t%s", TotalNumberOfRuns, Where); diff --git a/lib/Fuzzer/FuzzerOptions.h b/lib/Fuzzer/FuzzerOptions.h index 872def0326f08..b1366789be007 100644 --- a/lib/Fuzzer/FuzzerOptions.h +++ b/lib/Fuzzer/FuzzerOptions.h @@ -45,7 +45,6 @@ struct FuzzingOptions { std::string ExitOnItem; bool SaveArtifacts = true; bool PrintNEW = true; // Print a status line when new units are found; - bool OutputCSV = false; bool PrintNewCovPcs = false; bool PrintFinalStats = false; bool PrintCorpusStats = false; diff --git a/lib/Fuzzer/test/CMakeLists.txt b/lib/Fuzzer/test/CMakeLists.txt index f72bc3909a3cf..cd049d3f03d84 100644 --- a/lib/Fuzzer/test/CMakeLists.txt +++ b/lib/Fuzzer/test/CMakeLists.txt @@ -80,6 +80,7 @@ set(Tests BogusInitializeTest BufferOverflowOnInput CallerCalleeTest + CleanseTest CounterTest CustomCrossOverAndMutateTest CustomCrossOverTest diff --git a/lib/Fuzzer/test/CleanseTest.cpp b/lib/Fuzzer/test/CleanseTest.cpp new file mode 100644 index 0000000000000..faea8dcb3c30c --- /dev/null +++ b/lib/Fuzzer/test/CleanseTest.cpp @@ -0,0 +1,16 @@ +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. + +// Test the the fuzzer is able to 'cleanse' the reproducer +// by replacing all irrelevant bytes with garbage. +#include <cstdint> +#include <cstdlib> +#include <cstddef> + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + if (Size >= 20 && Data[1] == '1' && Data[5] == '5' && Data[10] == 'A' && + Data[19] == 'Z') + abort(); + return 0; +} + diff --git a/lib/Fuzzer/test/cleanse.test b/lib/Fuzzer/test/cleanse.test new file mode 100644 index 0000000000000..ad08591d2fa31 --- /dev/null +++ b/lib/Fuzzer/test/cleanse.test @@ -0,0 +1,3 @@ +RUN: echo -n 0123456789ABCDEFGHIZ > %t-in +RUN: LLVMFuzzer-CleanseTest -cleanse_crash=1 %t-in -exact_artifact_path=%t-out +RUN: echo -n ' 1 5 A Z' | diff - %t-out diff --git a/lib/Fuzzer/test/fuzzer-oom.test b/lib/Fuzzer/test/fuzzer-oom.test index e9d33552723eb..2db91915876e3 100644 --- a/lib/Fuzzer/test/fuzzer-oom.test +++ b/lib/Fuzzer/test/fuzzer-oom.test @@ -1,4 +1,3 @@ -XFAIL: darwin RUN: not LLVMFuzzer-OutOfMemoryTest -rss_limit_mb=300 2>&1 | FileCheck %s CHECK: ERROR: libFuzzer: out-of-memory (used: {{.*}}; limit: 300Mb) diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp index 2b7359dab807d..d690111ef210e 100644 --- a/lib/IR/Attributes.cpp +++ b/lib/IR/Attributes.cpp @@ -984,20 +984,23 @@ AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index, } AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index, - AttributeSet AS) const { - if (!AS.hasAttributes()) + const AttrBuilder &B) const { + if (!B.hasAttributes()) return *this; + if (!pImpl) + return AttributeList::get(C, {{Index, AttributeSet::get(C, B)}}); + #ifndef NDEBUG // FIXME it is not obvious how this should work for alignment. For now, say // we can't change a known alignment. unsigned OldAlign = getParamAlignment(Index); - unsigned NewAlign = AS.getAlignment(); + unsigned NewAlign = B.getAlignment(); assert((!OldAlign || !NewAlign || OldAlign == NewAlign) && "Attempt to change alignment!"); #endif - SmallVector<std::pair<unsigned, AttributeSet>, 4> AttrSet; + SmallVector<IndexAttrPair, 4> AttrVec; uint64_t NumAttrs = pImpl->getNumSlots(); unsigned I; @@ -1005,31 +1008,25 @@ AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index, for (I = 0; I < NumAttrs; ++I) { if (getSlotIndex(I) >= Index) break; - AttrSet.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I)); + AttrVec.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I)); } + AttrBuilder NewAttrs; if (I < NumAttrs && getSlotIndex(I) == Index) { - // We need to merge two AttributeSets. - AttributeSet Merged = AttributeSet::get( - C, AttrBuilder(pImpl->getSlotNode(I)).merge(AttrBuilder(AS))); - AttrSet.emplace_back(Index, Merged); + // We need to merge the attribute sets. + NewAttrs.merge(pImpl->getSlotNode(I)); ++I; - } else { - // Otherwise, there were no attributes at this position in the original - // list. Add the set as is. - AttrSet.emplace_back(Index, AS); } + NewAttrs.merge(B); + + // Add the new or merged attribute set at this index. + AttrVec.emplace_back(Index, AttributeSet::get(C, NewAttrs)); // Add the remaining entries. for (; I < NumAttrs; ++I) - AttrSet.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I)); - - return get(C, AttrSet); -} + AttrVec.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I)); -AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index, - const AttrBuilder &B) const { - return get(C, Index, AttributeSet::get(C, B)); + return get(C, AttrVec); } AttributeList AttributeList::removeAttribute(LLVMContext &C, unsigned Index, @@ -1046,46 +1043,7 @@ AttributeList AttributeList::removeAttribute(LLVMContext &C, unsigned Index, AttributeList AttributeList::removeAttributes(LLVMContext &C, unsigned Index, AttributeList Attrs) const { - if (!pImpl) - return AttributeList(); - if (!Attrs.pImpl) return *this; - - // FIXME it is not obvious how this should work for alignment. - // For now, say we can't pass in alignment, which no current use does. - assert(!Attrs.hasAttribute(Index, Attribute::Alignment) && - "Attempt to change alignment!"); - - // Add the attribute slots before the one we're trying to add. - SmallVector<AttributeList, 4> AttrSet; - uint64_t NumAttrs = pImpl->getNumSlots(); - AttributeList AL; - uint64_t LastIndex = 0; - for (unsigned I = 0, E = NumAttrs; I != E; ++I) { - if (getSlotIndex(I) >= Index) { - if (getSlotIndex(I) == Index) AL = getSlotAttributes(LastIndex++); - break; - } - LastIndex = I + 1; - AttrSet.push_back(getSlotAttributes(I)); - } - - // Now remove the attribute from the correct slot. There may already be an - // AttributeList there. - AttrBuilder B(AL, Index); - - for (unsigned I = 0, E = Attrs.pImpl->getNumSlots(); I != E; ++I) - if (Attrs.getSlotIndex(I) == Index) { - B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Index); - break; - } - - AttrSet.push_back(AttributeList::get(C, Index, B)); - - // Add the remaining attribute slots. - for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I) - AttrSet.push_back(getSlotAttributes(I)); - - return get(C, AttrSet); + return removeAttributes(C, Index, AttrBuilder(Attrs.getAttributes(Index))); } AttributeList AttributeList::removeAttributes(LLVMContext &C, unsigned Index, @@ -1098,31 +1056,30 @@ AttributeList AttributeList::removeAttributes(LLVMContext &C, unsigned Index, assert(!Attrs.hasAlignmentAttr() && "Attempt to change alignment!"); // Add the attribute slots before the one we're trying to add. - SmallVector<AttributeList, 4> AttrSet; + SmallVector<IndexAttrPair, 4> AttrSets; uint64_t NumAttrs = pImpl->getNumSlots(); - AttributeList AL; + AttrBuilder B; uint64_t LastIndex = 0; for (unsigned I = 0, E = NumAttrs; I != E; ++I) { if (getSlotIndex(I) >= Index) { - if (getSlotIndex(I) == Index) AL = getSlotAttributes(LastIndex++); + if (getSlotIndex(I) == Index) + B = AttrBuilder(pImpl->getSlotNode(LastIndex++)); break; } LastIndex = I + 1; - AttrSet.push_back(getSlotAttributes(I)); + AttrSets.push_back({getSlotIndex(I), pImpl->getSlotNode(I)}); } - // Now remove the attribute from the correct slot. There may already be an - // AttributeList there. - AttrBuilder B(AL, Index); + // Remove the attributes from the existing set and add them. B.remove(Attrs); - - AttrSet.push_back(AttributeList::get(C, Index, B)); + if (B.hasAttributes()) + AttrSets.push_back({Index, AttributeSet::get(C, B)}); // Add the remaining attribute slots. for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I) - AttrSet.push_back(getSlotAttributes(I)); + AttrSets.push_back({getSlotIndex(I), pImpl->getSlotNode(I)}); - return get(C, AttrSet); + return get(C, AttrSets); } AttributeList AttributeList::removeAttributes(LLVMContext &C, @@ -1406,18 +1363,7 @@ AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) { } AttrBuilder &AttrBuilder::removeAttributes(AttributeList A, uint64_t Index) { - unsigned Slot = ~0U; - for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I) - if (A.getSlotIndex(I) == Index) { - Slot = I; - break; - } - - assert(Slot != ~0U && "Couldn't find index in AttributeList!"); - - for (AttributeList::iterator I = A.begin(Slot), E = A.end(Slot); I != E; - ++I) { - Attribute Attr = *I; + for (Attribute Attr : A.getAttributes(Index)) { if (Attr.isEnumAttribute() || Attr.isIntAttribute()) { removeAttribute(Attr.getKindAsEnum()); } else { diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp index bba230677ebf7..80b117015ede8 100644 --- a/lib/IR/ConstantFold.cpp +++ b/lib/IR/ConstantFold.cpp @@ -223,7 +223,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart, if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { APInt V = CI->getValue(); if (ByteStart) - V = V.lshr(ByteStart*8); + V.lshrInPlace(ByteStart*8); V = V.trunc(ByteSize*8); return ConstantInt::get(CI->getContext(), V); } diff --git a/lib/IR/ConstantRange.cpp b/lib/IR/ConstantRange.cpp index 8dfd6c8036c49..0cc38b0252094 100644 --- a/lib/IR/ConstantRange.cpp +++ b/lib/IR/ConstantRange.cpp @@ -29,8 +29,6 @@ #include "llvm/Support/raw_ostream.h" using namespace llvm; -/// Initialize a full (the default) or empty set for the specified type. -/// ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) { if (Full) Lower = Upper = APInt::getMaxValue(BitWidth); @@ -38,8 +36,6 @@ ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) { Lower = Upper = APInt::getMinValue(BitWidth); } -/// Initialize a range to hold the single specified value. -/// ConstantRange::ConstantRange(APInt V) : Lower(std::move(V)), Upper(Lower + 1) {} @@ -232,35 +228,23 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, return Result; } -/// isFullSet - Return true if this set contains all of the elements possible -/// for this data-type bool ConstantRange::isFullSet() const { return Lower == Upper && Lower.isMaxValue(); } -/// isEmptySet - Return true if this set contains no members. -/// bool ConstantRange::isEmptySet() const { return Lower == Upper && Lower.isMinValue(); } -/// isWrappedSet - Return true if this set wraps around the top of the range, -/// for example: [100, 8) -/// bool ConstantRange::isWrappedSet() const { return Lower.ugt(Upper); } -/// isSignWrappedSet - Return true if this set wraps around the INT_MIN of -/// its bitwidth, for example: i8 [120, 140). -/// bool ConstantRange::isSignWrappedSet() const { return contains(APInt::getSignedMaxValue(getBitWidth())) && contains(APInt::getSignedMinValue(getBitWidth())); } -/// getSetSize - Return the number of elements in this set. -/// APInt ConstantRange::getSetSize() const { if (isFullSet()) { APInt Size(getBitWidth()+1, 0); @@ -272,12 +256,6 @@ APInt ConstantRange::getSetSize() const { return (Upper - Lower).zext(getBitWidth()+1); } -/// isSizeStrictlySmallerThanOf - Compare set size of this range with the range -/// CR. -/// This function is faster than comparing results of getSetSize for the two -/// ranges, because we don't need to extend bitwidth of APInts we're operating -/// with. -/// bool ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const { assert(getBitWidth() == Other.getBitWidth()); @@ -288,58 +266,44 @@ ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const { return (Upper - Lower).ult(Other.Upper - Other.Lower); } -/// getUnsignedMax - Return the largest unsigned value contained in the -/// ConstantRange. -/// APInt ConstantRange::getUnsignedMax() const { if (isFullSet() || isWrappedSet()) return APInt::getMaxValue(getBitWidth()); return getUpper() - 1; } -/// getUnsignedMin - Return the smallest unsigned value contained in the -/// ConstantRange. -/// APInt ConstantRange::getUnsignedMin() const { if (isFullSet() || (isWrappedSet() && getUpper() != 0)) return APInt::getMinValue(getBitWidth()); return getLower(); } -/// getSignedMax - Return the largest signed value contained in the -/// ConstantRange. -/// APInt ConstantRange::getSignedMax() const { APInt SignedMax(APInt::getSignedMaxValue(getBitWidth())); if (!isWrappedSet()) { - if (getLower().sle(getUpper() - 1)) - return getUpper() - 1; - return SignedMax; + APInt UpperMinusOne = getUpper() - 1; + if (getLower().sle(UpperMinusOne)) + return UpperMinusOne; + return APInt::getSignedMaxValue(getBitWidth()); } if (getLower().isNegative() == getUpper().isNegative()) - return SignedMax; + return APInt::getSignedMaxValue(getBitWidth()); return getUpper() - 1; } -/// getSignedMin - Return the smallest signed value contained in the -/// ConstantRange. -/// APInt ConstantRange::getSignedMin() const { - APInt SignedMin(APInt::getSignedMinValue(getBitWidth())); if (!isWrappedSet()) { if (getLower().sle(getUpper() - 1)) return getLower(); - return SignedMin; + return APInt::getSignedMinValue(getBitWidth()); } if ((getUpper() - 1).slt(getLower())) { - if (getUpper() != SignedMin) - return SignedMin; + if (!getUpper().isMinSignedValue()) + return APInt::getSignedMinValue(getBitWidth()); } return getLower(); } -/// contains - Return true if the specified value is in the set. -/// bool ConstantRange::contains(const APInt &V) const { if (Lower == Upper) return isFullSet(); @@ -349,10 +313,6 @@ bool ConstantRange::contains(const APInt &V) const { return Lower.ule(V) || V.ult(Upper); } -/// contains - Return true if the argument is a subset of this range. -/// Two equal sets contain each other. The empty set contained by all other -/// sets. -/// bool ConstantRange::contains(const ConstantRange &Other) const { if (isFullSet() || Other.isEmptySet()) return true; if (isEmptySet() || Other.isFullSet()) return false; @@ -371,8 +331,6 @@ bool ConstantRange::contains(const ConstantRange &Other) const { return Other.getUpper().ule(Upper) && Lower.ule(Other.getLower()); } -/// subtract - Subtract the specified constant from the endpoints of this -/// constant range. ConstantRange ConstantRange::subtract(const APInt &Val) const { assert(Val.getBitWidth() == getBitWidth() && "Wrong bit width"); // If the set is empty or full, don't modify the endpoints. @@ -381,17 +339,10 @@ ConstantRange ConstantRange::subtract(const APInt &Val) const { return ConstantRange(Lower - Val, Upper - Val); } -/// \brief Subtract the specified range from this range (aka relative complement -/// of the sets). ConstantRange ConstantRange::difference(const ConstantRange &CR) const { return intersectWith(CR.inverse()); } -/// intersectWith - Return the range that results from the intersection of this -/// range with another range. The resultant range is guaranteed to include all -/// elements contained in both input ranges, and to have the smallest possible -/// set size that does so. Because there may be two intersections with the -/// same set size, A.intersectWith(B) might not be equal to B.intersectWith(A). ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const { assert(getBitWidth() == CR.getBitWidth() && "ConstantRange types don't agree!"); @@ -466,13 +417,6 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const { return CR; } - -/// unionWith - Return the range that results from the union of this range with -/// another range. The resultant range is guaranteed to include the elements of -/// both sets, but may contain more. For example, [3, 9) union [12,15) is -/// [3, 15), which includes 9, 10, and 11, which were not included in either -/// set before. -/// ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const { assert(getBitWidth() == CR.getBitWidth() && "ConstantRange types don't agree!"); @@ -593,10 +537,6 @@ ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp, }; } -/// zeroExtend - Return a new range in the specified integer type, which must -/// be strictly larger than the current type. The returned range will -/// correspond to the possible range of values as if the source range had been -/// zero extended. ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const { if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false); @@ -613,10 +553,6 @@ ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const { return ConstantRange(Lower.zext(DstTySize), Upper.zext(DstTySize)); } -/// signExtend - Return a new range in the specified integer type, which must -/// be strictly larger than the current type. The returned range will -/// correspond to the possible range of values as if the source range had been -/// sign extended. ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const { if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false); @@ -635,10 +571,6 @@ ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const { return ConstantRange(Lower.sext(DstTySize), Upper.sext(DstTySize)); } -/// truncate - Return a new range in the specified integer type, which must be -/// strictly smaller than the current type. The returned range will -/// correspond to the possible range of values as if the source range had been -/// truncated to the specified type. ConstantRange ConstantRange::truncate(uint32_t DstTySize) const { assert(getBitWidth() > DstTySize && "Not a value truncation"); if (isEmptySet()) @@ -690,8 +622,6 @@ ConstantRange ConstantRange::truncate(uint32_t DstTySize) const { return ConstantRange(DstTySize, /*isFullSet=*/true); } -/// zextOrTrunc - make this range have the bit width given by \p DstTySize. The -/// value is zero extended, truncated, or left alone to make it that width. ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const { unsigned SrcTySize = getBitWidth(); if (SrcTySize > DstTySize) @@ -701,8 +631,6 @@ ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const { return *this; } -/// sextOrTrunc - make this range have the bit width given by \p DstTySize. The -/// value is sign extended, truncated, or left alone to make it that width. ConstantRange ConstantRange::sextOrTrunc(uint32_t DstTySize) const { unsigned SrcTySize = getBitWidth(); if (SrcTySize > DstTySize) @@ -999,8 +927,6 @@ ConstantRange ConstantRange::inverse() const { return ConstantRange(Upper, Lower); } -/// print - Print out the bounds to a stream... -/// void ConstantRange::print(raw_ostream &OS) const { if (isFullSet()) OS << "full-set"; @@ -1011,8 +937,6 @@ void ConstantRange::print(raw_ostream &OS) const { } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -/// dump - Allow printing from a debugger easily... -/// LLVM_DUMP_METHOD void ConstantRange::dump() const { print(dbgs()); } diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp index c5f93c9f4db01..ffc8f2e4303b7 100644 --- a/lib/IR/Constants.cpp +++ b/lib/IR/Constants.cpp @@ -518,27 +518,19 @@ ConstantInt *ConstantInt::getFalse(LLVMContext &Context) { } Constant *ConstantInt::getTrue(Type *Ty) { - VectorType *VTy = dyn_cast<VectorType>(Ty); - if (!VTy) { - assert(Ty->isIntegerTy(1) && "True must be i1 or vector of i1."); - return ConstantInt::getTrue(Ty->getContext()); - } - assert(VTy->getElementType()->isIntegerTy(1) && - "True must be vector of i1 or i1."); - return ConstantVector::getSplat(VTy->getNumElements(), - ConstantInt::getTrue(Ty->getContext())); + assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1."); + ConstantInt *TrueC = ConstantInt::getTrue(Ty->getContext()); + if (auto *VTy = dyn_cast<VectorType>(Ty)) + return ConstantVector::getSplat(VTy->getNumElements(), TrueC); + return TrueC; } Constant *ConstantInt::getFalse(Type *Ty) { - VectorType *VTy = dyn_cast<VectorType>(Ty); - if (!VTy) { - assert(Ty->isIntegerTy(1) && "False must be i1 or vector of i1."); - return ConstantInt::getFalse(Ty->getContext()); - } - assert(VTy->getElementType()->isIntegerTy(1) && - "False must be vector of i1 or i1."); - return ConstantVector::getSplat(VTy->getNumElements(), - ConstantInt::getFalse(Ty->getContext())); + assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1."); + ConstantInt *FalseC = ConstantInt::getFalse(Ty->getContext()); + if (auto *VTy = dyn_cast<VectorType>(Ty)) + return ConstantVector::getSplat(VTy->getNumElements(), FalseC); + return FalseC; } // Get a ConstantInt from an APInt. diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp index b5ed30b85c8a1..50292b6e20bf1 100644 --- a/lib/IR/Core.cpp +++ b/lib/IR/Core.cpp @@ -863,6 +863,19 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count) { return LLVMMDNodeInContext(LLVMGetGlobalContext(), Vals, Count); } +LLVMValueRef LLVMMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD) { + return wrap(MetadataAsValue::get(*unwrap(C), unwrap(MD))); +} + +LLVMMetadataRef LLVMValueAsMetadata(LLVMValueRef Val) { + auto *V = unwrap(Val); + if (auto *C = dyn_cast<Constant>(V)) + return wrap(ConstantAsMetadata::get(C)); + if (auto *MAV = dyn_cast<MetadataAsValue>(V)) + return wrap(MAV->getMetadata()); + return wrap(ValueAsMetadata::get(V)); +} + const char *LLVMGetMDString(LLVMValueRef V, unsigned *Length) { if (const auto *MD = dyn_cast<MetadataAsValue>(unwrap(V))) if (const MDString *S = dyn_cast<MDString>(MD->getMetadata())) { @@ -1883,13 +1896,8 @@ void LLVMRemoveStringAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx, void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A, const char *V) { Function *Func = unwrap<Function>(Fn); - AttributeList::AttrIndex Idx = - AttributeList::AttrIndex(AttributeList::FunctionIndex); - AttrBuilder B; - - B.addAttribute(A, V); - AttributeList Set = AttributeList::get(Func->getContext(), Idx, B); - Func->addAttributes(Idx, Set); + Attribute Attr = Attribute::get(Func->getContext(), A, V); + Func->addAttribute(AttributeList::FunctionIndex, Attr); } /*--.. Operations on parameters ............................................--*/ @@ -1949,9 +1957,7 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) { void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) { Argument *A = unwrap<Argument>(Arg); - AttrBuilder B; - B.addAlignmentAttr(align); - A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B)); + A->addAttr(Attribute::getWithAlignment(A->getContext(), align)); } /*--.. Operations on basic blocks ..........................................--*/ @@ -2158,11 +2164,8 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) { void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index, unsigned align) { CallSite Call = CallSite(unwrap<Instruction>(Instr)); - AttrBuilder B; - B.addAlignmentAttr(align); - Call.setAttributes(Call.getAttributes().addAttributes( - Call->getContext(), index, - AttributeList::get(Call->getContext(), index, B))); + Attribute AlignAttr = Attribute::getWithAlignment(Call->getContext(), align); + Call.addAttribute(index, AlignAttr); } void LLVMAddCallSiteAttribute(LLVMValueRef C, LLVMAttributeIndex Idx, diff --git a/lib/IR/DataLayout.cpp b/lib/IR/DataLayout.cpp index 6f90ce5985686..93bacdd2e80fe 100644 --- a/lib/IR/DataLayout.cpp +++ b/lib/IR/DataLayout.cpp @@ -608,11 +608,8 @@ unsigned DataLayout::getPointerSize(unsigned AS) const { unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const { assert(Ty->isPtrOrPtrVectorTy() && "This should only be called with a pointer or pointer vector type"); - - if (Ty->isPointerTy()) - return getTypeSizeInBits(Ty); - - return getTypeSizeInBits(Ty->getScalarType()); + Ty = Ty->getScalarType(); + return getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace()); } /*! @@ -624,7 +621,7 @@ unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const { == false) for the requested type \a Ty. */ unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { - int AlignType = -1; + AlignTypeEnum AlignType; assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); switch (Ty->getTypeID()) { @@ -673,8 +670,7 @@ unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { llvm_unreachable("Bad type for getAlignment!!!"); } - return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty), - abi_or_pref, Ty); + return getAlignmentInfo(AlignType, getTypeSizeInBits(Ty), abi_or_pref, Ty); } unsigned DataLayout::getABITypeAlignment(Type *Ty) const { diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp index c4bb9e83acd79..e1f5fdea44e4e 100644 --- a/lib/IR/Function.cpp +++ b/lib/IR/Function.cpp @@ -138,13 +138,18 @@ bool Argument::onlyReadsMemory() const { Attrs.hasParamAttribute(getArgNo(), Attribute::ReadNone); } -void Argument::addAttr(AttributeList AS) { - assert(AS.getNumSlots() <= 1 && - "Trying to add more than one attribute set to an argument!"); - AttrBuilder B(AS, AS.getSlotIndex(0)); - getParent()->addAttributes( - getArgNo() + 1, - AttributeList::get(Parent->getContext(), getArgNo() + 1, B)); +void Argument::addAttrs(AttrBuilder &B) { + AttributeList AL = getParent()->getAttributes(); + AL = AL.addAttributes(Parent->getContext(), getArgNo() + 1, B); + getParent()->setAttributes(AL); +} + +void Argument::addAttr(Attribute::AttrKind Kind) { + getParent()->addAttribute(getArgNo() + 1, Kind); +} + +void Argument::addAttr(Attribute Attr) { + getParent()->addAttribute(getArgNo() + 1, Attr); } void Argument::removeAttr(AttributeList AS) { @@ -156,6 +161,10 @@ void Argument::removeAttr(AttributeList AS) { AttributeList::get(Parent->getContext(), getArgNo() + 1, B)); } +void Argument::removeAttr(Attribute::AttrKind Kind) { + getParent()->removeAttribute(getArgNo() + 1, Kind); +} + bool Argument::hasAttribute(Attribute::AttrKind Kind) const { return getParent()->hasParamAttribute(getArgNo(), Kind); } diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp index c10c144122e23..76582e334d1fa 100644 --- a/lib/IR/Instructions.cpp +++ b/lib/IR/Instructions.cpp @@ -1855,7 +1855,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, return false; // Mask must be vector of i32. - VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType()); + auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) return false; @@ -1863,10 +1863,10 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) return true; - if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) { + if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); for (Value *Op : MV->operands()) { - if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { + if (auto *CI = dyn_cast<ConstantInt>(Op)) { if (CI->uge(V1Size*2)) return false; } else if (!isa<UndefValue>(Op)) { @@ -1876,8 +1876,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, return true; } - if (const ConstantDataSequential *CDS = - dyn_cast<ConstantDataSequential>(Mask)) { + if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) if (CDS->getElementAsInteger(i) >= V1Size*2) @@ -1889,7 +1888,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, // used as the shuffle mask. When this occurs, the shuffle mask will // fall into this case and fail. To avoid this error, do this bit of // ugliness to allow such a mask pass. - if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask)) + if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) if (CE->getOpcode() == Instruction::UserOp1) return true; @@ -1898,7 +1897,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) { assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); - if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask)) + if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) return CDS->getElementAsInteger(i); Constant *C = Mask->getAggregateElement(i); if (isa<UndefValue>(C)) @@ -1910,7 +1909,7 @@ void ShuffleVectorInst::getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result) { unsigned NumElts = Mask->getType()->getVectorNumElements(); - if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) { + if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { for (unsigned i = 0; i != NumElts; ++i) Result.push_back(CDS->getElementAsInteger(i)); return; diff --git a/lib/MC/MCDwarf.cpp b/lib/MC/MCDwarf.cpp index cc32e90ad36ee..1a320b0165faf 100644 --- a/lib/MC/MCDwarf.cpp +++ b/lib/MC/MCDwarf.cpp @@ -168,7 +168,7 @@ EmitDwarfLineTable(MCObjectStreamer *MCOS, MCSection *Section, // and the current Label. const MCAsmInfo *asmInfo = MCOS->getContext().getAsmInfo(); MCOS->EmitDwarfAdvanceLineAddr(LineDelta, LastLabel, Label, - asmInfo->getPointerSize()); + asmInfo->getCodePointerSize()); Discriminator = 0; LastLine = LineEntry.getLine(); @@ -188,7 +188,7 @@ EmitDwarfLineTable(MCObjectStreamer *MCOS, MCSection *Section, const MCAsmInfo *AsmInfo = Ctx.getAsmInfo(); MCOS->EmitDwarfAdvanceLineAddr(INT64_MAX, LastLabel, SectionEnd, - AsmInfo->getPointerSize()); + AsmInfo->getCodePointerSize()); } // @@ -594,7 +594,7 @@ static void EmitGenDwarfAranges(MCStreamer *MCOS, // Figure the padding after the header before the table of address and size // pairs who's values are PointerSize'ed. const MCAsmInfo *asmInfo = context.getAsmInfo(); - int AddrSize = asmInfo->getPointerSize(); + int AddrSize = asmInfo->getCodePointerSize(); int Pad = 2 * AddrSize - (Length & (2 * AddrSize - 1)); if (Pad == 2 * AddrSize) Pad = 0; @@ -677,7 +677,7 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS, // The DWARF v5 header has unit type, address size, abbrev offset. // Earlier versions have abbrev offset, address size. const MCAsmInfo &AsmInfo = *context.getAsmInfo(); - int AddrSize = AsmInfo.getPointerSize(); + int AddrSize = AsmInfo.getCodePointerSize(); if (context.getDwarfVersion() >= 5) { MCOS->EmitIntValue(dwarf::DW_UT_compile, 1); MCOS->EmitIntValue(AddrSize, 1); @@ -823,7 +823,7 @@ static void EmitGenDwarfRanges(MCStreamer *MCOS) { auto &Sections = context.getGenDwarfSectionSyms(); const MCAsmInfo *AsmInfo = context.getAsmInfo(); - int AddrSize = AsmInfo->getPointerSize(); + int AddrSize = AsmInfo->getCodePointerSize(); MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfRangesSection()); @@ -981,7 +981,7 @@ static unsigned getSizeForEncoding(MCStreamer &streamer, default: llvm_unreachable("Unknown Encoding"); case dwarf::DW_EH_PE_absptr: case dwarf::DW_EH_PE_signed: - return context.getAsmInfo()->getPointerSize(); + return context.getAsmInfo()->getCodePointerSize(); case dwarf::DW_EH_PE_udata2: case dwarf::DW_EH_PE_sdata2: return 2; @@ -1318,7 +1318,7 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(const MCSymbol *personality, if (CIEVersion >= 4) { // Address Size - Streamer.EmitIntValue(context.getAsmInfo()->getPointerSize(), 1); + Streamer.EmitIntValue(context.getAsmInfo()->getCodePointerSize(), 1); // Segment Descriptor Size Streamer.EmitIntValue(0, 1); @@ -1384,7 +1384,7 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(const MCSymbol *personality, InitialCFAOffset = CFAOffset; // Padding - Streamer.EmitValueToAlignment(IsEH ? 4 : MAI->getPointerSize()); + Streamer.EmitValueToAlignment(IsEH ? 4 : MAI->getCodePointerSize()); Streamer.EmitLabel(sectionEnd); return *sectionStart; @@ -1453,7 +1453,7 @@ void FrameEmitterImpl::EmitFDE(const MCSymbol &cieStart, // The size of a .eh_frame section has to be a multiple of the alignment // since a null CIE is interpreted as the end. Old systems overaligned // .eh_frame, so we do too and account for it in the last FDE. - unsigned Align = LastInSection ? asmInfo->getPointerSize() : PCSize; + unsigned Align = LastInSection ? asmInfo->getCodePointerSize() : PCSize; Streamer.EmitValueToAlignment(Align); Streamer.EmitLabel(fdeEnd); @@ -1514,6 +1514,7 @@ void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB, MCContext &Context = Streamer.getContext(); const MCObjectFileInfo *MOFI = Context.getObjectFileInfo(); + const MCAsmInfo *AsmInfo = Context.getAsmInfo(); FrameEmitterImpl Emitter(IsEH, Streamer); ArrayRef<MCDwarfFrameInfo> FrameArray = Streamer.getDwarfFrameInfos(); @@ -1525,7 +1526,7 @@ void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB, if (Frame.CompactUnwindEncoding == 0) continue; if (!SectionEmitted) { Streamer.SwitchSection(MOFI->getCompactUnwindSection()); - Streamer.EmitValueToAlignment(Context.getAsmInfo()->getPointerSize()); + Streamer.EmitValueToAlignment(AsmInfo->getCodePointerSize()); SectionEmitted = true; } NeedsEHFrameSection |= diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp index e65ce9f0b9361..42e8ad340281a 100644 --- a/lib/MC/MCParser/AsmParser.cpp +++ b/lib/MC/MCParser/AsmParser.cpp @@ -1755,8 +1755,8 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info, case DK_8BYTE: return parseDirectiveValue(IDVal, 8); case DK_DC_A: - return parseDirectiveValue(IDVal, - getContext().getAsmInfo()->getPointerSize()); + return parseDirectiveValue( + IDVal, getContext().getAsmInfo()->getCodePointerSize()); case DK_OCTA: return parseDirectiveOctaValue(IDVal); case DK_SINGLE: diff --git a/lib/Object/Archive.cpp b/lib/Object/Archive.cpp index f2021f796d125..c4924f85a907a 100644 --- a/lib/Object/Archive.cpp +++ b/lib/Object/Archive.cpp @@ -1,4 +1,4 @@ -//===- Archive.cpp - ar File Format implementation --------------*- C++ -*-===// +//===- Archive.cpp - ar File Format implementation ------------------------===// // // The LLVM Compiler Infrastructure // @@ -11,12 +11,29 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Object/Archive.h" +#include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" +#include "llvm/Object/Archive.h" +#include "llvm/Object/Binary.h" +#include "llvm/Object/Error.h" +#include "llvm/Support/Chrono.h" #include "llvm/Support/Endian.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <cstring> +#include <memory> +#include <string> +#include <system_error> using namespace llvm; using namespace object; @@ -25,7 +42,7 @@ using namespace llvm::support::endian; static const char *const Magic = "!<arch>\n"; static const char *const ThinMagic = "!<thin>\n"; -void Archive::anchor() { } +void Archive::anchor() {} static Error malformedError(Twine Msg) { @@ -61,8 +78,8 @@ ArchiveMemberHeader::ArchiveMemberHeader(const Archive *Parent, if (Err) { std::string Buf; raw_string_ostream OS(Buf); - OS.write_escaped(llvm::StringRef(ArMemHdr->Terminator, - sizeof(ArMemHdr->Terminator))); + OS.write_escaped(StringRef(ArMemHdr->Terminator, + sizeof(ArMemHdr->Terminator))); OS.flush(); std::string Msg("terminator characters in archive member \"" + Buf + "\" not the correct \"`\\n\" values for the archive " @@ -97,13 +114,13 @@ Expected<StringRef> ArchiveMemberHeader::getRawName() const { EndCond = ' '; else EndCond = '/'; - llvm::StringRef::size_type end = - llvm::StringRef(ArMemHdr->Name, sizeof(ArMemHdr->Name)).find(EndCond); - if (end == llvm::StringRef::npos) + StringRef::size_type end = + StringRef(ArMemHdr->Name, sizeof(ArMemHdr->Name)).find(EndCond); + if (end == StringRef::npos) end = sizeof(ArMemHdr->Name); assert(end <= sizeof(ArMemHdr->Name) && end > 0); // Don't include the EndCond if there is one. - return llvm::StringRef(ArMemHdr->Name, end); + return StringRef(ArMemHdr->Name, end); } // This gets the name looking up long names. Size is the size of the archive @@ -205,12 +222,12 @@ Expected<StringRef> ArchiveMemberHeader::getName(uint64_t Size) const { Expected<uint32_t> ArchiveMemberHeader::getSize() const { uint32_t Ret; - if (llvm::StringRef(ArMemHdr->Size, - sizeof(ArMemHdr->Size)).rtrim(" ").getAsInteger(10, Ret)) { + if (StringRef(ArMemHdr->Size, + sizeof(ArMemHdr->Size)).rtrim(" ").getAsInteger(10, Ret)) { std::string Buf; raw_string_ostream OS(Buf); - OS.write_escaped(llvm::StringRef(ArMemHdr->Size, - sizeof(ArMemHdr->Size)).rtrim(" ")); + OS.write_escaped(StringRef(ArMemHdr->Size, + sizeof(ArMemHdr->Size)).rtrim(" ")); OS.flush(); uint64_t Offset = reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data(); @@ -227,8 +244,8 @@ Expected<sys::fs::perms> ArchiveMemberHeader::getAccessMode() const { sizeof(ArMemHdr->AccessMode)).rtrim(' ').getAsInteger(8, Ret)) { std::string Buf; raw_string_ostream OS(Buf); - OS.write_escaped(llvm::StringRef(ArMemHdr->AccessMode, - sizeof(ArMemHdr->AccessMode)).rtrim(" ")); + OS.write_escaped(StringRef(ArMemHdr->AccessMode, + sizeof(ArMemHdr->AccessMode)).rtrim(" ")); OS.flush(); uint64_t Offset = reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data(); @@ -247,8 +264,8 @@ ArchiveMemberHeader::getLastModified() const { .getAsInteger(10, Seconds)) { std::string Buf; raw_string_ostream OS(Buf); - OS.write_escaped(llvm::StringRef(ArMemHdr->LastModified, - sizeof(ArMemHdr->LastModified)).rtrim(" ")); + OS.write_escaped(StringRef(ArMemHdr->LastModified, + sizeof(ArMemHdr->LastModified)).rtrim(" ")); OS.flush(); uint64_t Offset = reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data(); diff --git a/lib/Object/Binary.cpp b/lib/Object/Binary.cpp index 8467d349cd959..2b44c4a82d2ca 100644 --- a/lib/Object/Binary.cpp +++ b/lib/Object/Binary.cpp @@ -1,4 +1,4 @@ -//===- Binary.cpp - A generic binary file -----------------------*- C++ -*-===// +//===- Binary.cpp - A generic binary file ---------------------------------===// // // The LLVM Compiler Infrastructure // @@ -11,21 +11,25 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Object/Binary.h" #include "llvm/ADT/StringRef.h" -#include "llvm/Support/FileSystem.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/Path.h" - -// Include headers for createBinary. #include "llvm/Object/Archive.h" +#include "llvm/Object/Binary.h" +#include "llvm/Object/Error.h" #include "llvm/Object/MachOUniversal.h" #include "llvm/Object/ObjectFile.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/MemoryBuffer.h" +#include <algorithm> +#include <memory> +#include <system_error> using namespace llvm; using namespace object; -Binary::~Binary() {} +Binary::~Binary() = default; Binary::Binary(unsigned int Type, MemoryBufferRef Source) : TypeID(Type), Data(Source) {} diff --git a/lib/Object/COFFObjectFile.cpp b/lib/Object/COFFObjectFile.cpp index a2d8f12449e6f..1866aba9b21a4 100644 --- a/lib/Object/COFFObjectFile.cpp +++ b/lib/Object/COFFObjectFile.cpp @@ -1,4 +1,4 @@ -//===- COFFObjectFile.cpp - COFF object file implementation -----*- C++ -*-===// +//===- COFFObjectFile.cpp - COFF object file implementation ---------------===// // // The LLVM Compiler Infrastructure // @@ -11,16 +11,28 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Object/COFF.h" #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/ADT/iterator_range.h" +#include "llvm/Object/Binary.h" +#include "llvm/Object/COFF.h" +#include "llvm/Object/Error.h" +#include "llvm/Object/ObjectFile.h" #include "llvm/Support/COFF.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" -#include <cctype> +#include "llvm/Support/Endian.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/MemoryBuffer.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <cstring> #include <limits> +#include <memory> +#include <system_error> using namespace llvm; using namespace object; @@ -116,7 +128,7 @@ const coff_symbol_type *COFFObjectFile::toSymb(DataRefImpl Ref) const { const coff_section *COFFObjectFile::toSec(DataRefImpl Ref) const { const coff_section *Addr = reinterpret_cast<const coff_section*>(Ref.p); -# ifndef NDEBUG +#ifndef NDEBUG // Verify that the section points to a valid entry in the section table. if (Addr < SectionTable || Addr >= (SectionTable + getNumberOfSections())) report_fatal_error("Section was outside of section table."); @@ -124,7 +136,7 @@ const coff_section *COFFObjectFile::toSec(DataRefImpl Ref) const { uintptr_t Offset = uintptr_t(Addr) - uintptr_t(SectionTable); assert(Offset % sizeof(coff_section) == 0 && "Section did not point to the beginning of a section"); -# endif +#endif return Addr; } @@ -985,7 +997,7 @@ COFFObjectFile::getSymbolAuxData(COFFSymbolRef Symbol) const { if (Symbol.getNumberOfAuxSymbols() > 0) { // AUX data comes immediately after the symbol in COFF Aux = reinterpret_cast<const uint8_t *>(Symbol.getRawPtr()) + SymbolSize; -# ifndef NDEBUG +#ifndef NDEBUG // Verify that the Aux symbol points to a valid entry in the symbol table. uintptr_t Offset = uintptr_t(Aux) - uintptr_t(base()); if (Offset < getPointerToSymbolTable() || @@ -995,7 +1007,7 @@ COFFObjectFile::getSymbolAuxData(COFFSymbolRef Symbol) const { assert((Offset - getPointerToSymbolTable()) % SymbolSize == 0 && "Aux Symbol data did not point to the beginning of a symbol"); -# endif +#endif } return makeArrayRef(Aux, Symbol.getNumberOfAuxSymbols() * SymbolSize); } diff --git a/lib/Object/IRSymtab.cpp b/lib/Object/IRSymtab.cpp index da1ef9946b505..bb3d1b2cf695f 100644 --- a/lib/Object/IRSymtab.cpp +++ b/lib/Object/IRSymtab.cpp @@ -28,14 +28,12 @@ struct Builder { Builder(SmallVector<char, 0> &Symtab, SmallVector<char, 0> &Strtab) : Symtab(Symtab), Strtab(Strtab) {} - StringTableBuilder StrtabBuilder{StringTableBuilder::ELF}; + StringTableBuilder StrtabBuilder{StringTableBuilder::RAW}; BumpPtrAllocator Alloc; StringSaver Saver{Alloc}; DenseMap<const Comdat *, unsigned> ComdatMap; - ModuleSymbolTable Msymtab; - SmallPtrSet<GlobalValue *, 8> Used; Mangler Mang; Triple TT; @@ -49,6 +47,7 @@ struct Builder { void setStr(storage::Str &S, StringRef Value) { S.Offset = StrtabBuilder.add(Value); + S.Size = Value.size(); } template <typename T> void writeRange(storage::Range<T> &R, const std::vector<T> &Objs) { @@ -59,18 +58,24 @@ struct Builder { } Error addModule(Module *M); - Error addSymbol(ModuleSymbolTable::Symbol Sym); + Error addSymbol(const ModuleSymbolTable &Msymtab, + const SmallPtrSet<GlobalValue *, 8> &Used, + ModuleSymbolTable::Symbol Sym); Error build(ArrayRef<Module *> Mods); }; Error Builder::addModule(Module *M) { + SmallPtrSet<GlobalValue *, 8> Used; collectUsedGlobalVariables(*M, Used, /*CompilerUsed*/ false); - storage::Module Mod; - Mod.Begin = Msymtab.symbols().size(); + ModuleSymbolTable Msymtab; Msymtab.addModule(M); - Mod.End = Msymtab.symbols().size(); + + storage::Module Mod; + Mod.Begin = Syms.size(); + Mod.End = Syms.size() + Msymtab.symbols().size(); + Mod.UncBegin = Uncommons.size(); Mods.push_back(Mod); if (TT.isOSBinFormatCOFF()) { @@ -84,20 +89,25 @@ Error Builder::addModule(Module *M) { } } + for (ModuleSymbolTable::Symbol Msym : Msymtab.symbols()) + if (Error Err = addSymbol(Msymtab, Used, Msym)) + return Err; + return Error::success(); } -Error Builder::addSymbol(ModuleSymbolTable::Symbol Msym) { +Error Builder::addSymbol(const ModuleSymbolTable &Msymtab, + const SmallPtrSet<GlobalValue *, 8> &Used, + ModuleSymbolTable::Symbol Msym) { Syms.emplace_back(); storage::Symbol &Sym = Syms.back(); Sym = {}; - Sym.UncommonIndex = -1; storage::Uncommon *Unc = nullptr; auto Uncommon = [&]() -> storage::Uncommon & { if (Unc) return *Unc; - Sym.UncommonIndex = Uncommons.size(); + Sym.Flags |= 1 << storage::Symbol::FB_has_uncommon; Uncommons.emplace_back(); Unc = &Uncommons.back(); *Unc = {}; @@ -194,15 +204,10 @@ Error Builder::build(ArrayRef<Module *> IRMods) { setStr(Hdr.SourceFileName, IRMods[0]->getSourceFileName()); TT = Triple(IRMods[0]->getTargetTriple()); - // This adds the symbols for each module to Msymtab. for (auto *M : IRMods) if (Error Err = addModule(M)) return Err; - for (ModuleSymbolTable::Symbol Msym : Msymtab.symbols()) - if (Error Err = addSymbol(Msym)) - return Err; - COFFLinkerOptsOS.flush(); setStr(Hdr.COFFLinkerOpts, COFFLinkerOpts); diff --git a/lib/Object/ObjectFile.cpp b/lib/Object/ObjectFile.cpp index f36388b677f36..1f60e7157bd9c 100644 --- a/lib/Object/ObjectFile.cpp +++ b/lib/Object/ObjectFile.cpp @@ -1,4 +1,4 @@ -//===- ObjectFile.cpp - File format independent object file -----*- C++ -*-===// +//===- ObjectFile.cpp - File format independent object file ---------------===// // // The LLVM Compiler Infrastructure // @@ -11,20 +11,28 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Object/ObjectFile.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Object/Binary.h" #include "llvm/Object/COFF.h" +#include "llvm/Object/Error.h" #include "llvm/Object/MachO.h" +#include "llvm/Object/ObjectFile.h" #include "llvm/Object/Wasm.h" +#include "llvm/Support/Error.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ErrorOr.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstdint> +#include <memory> #include <system_error> using namespace llvm; using namespace object; -void ObjectFile::anchor() { } +void ObjectFile::anchor() {} ObjectFile::ObjectFile(unsigned int Type, MemoryBufferRef Source) : SymbolicFile(Type, Source) {} diff --git a/lib/Object/SymbolicFile.cpp b/lib/Object/SymbolicFile.cpp index 4b51a49cf342d..16cff5c228bdd 100644 --- a/lib/Object/SymbolicFile.cpp +++ b/lib/Object/SymbolicFile.cpp @@ -1,4 +1,4 @@ -//===- SymbolicFile.cpp - Interface that only provides symbols --*- C++ -*-===// +//===- SymbolicFile.cpp - Interface that only provides symbols ------------===// // // The LLVM Compiler Infrastructure // @@ -11,12 +11,20 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Object/COFF.h" +#include "llvm/ADT/StringRef.h" #include "llvm/Object/COFFImportFile.h" +#include "llvm/Object/Error.h" #include "llvm/Object/IRObjectFile.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Object/SymbolicFile.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" +#include <algorithm> +#include <memory> using namespace llvm; using namespace object; @@ -24,7 +32,7 @@ using namespace object; SymbolicFile::SymbolicFile(unsigned int Type, MemoryBufferRef Source) : Binary(Type, Source) {} -SymbolicFile::~SymbolicFile() {} +SymbolicFile::~SymbolicFile() = default; Expected<std::unique_ptr<SymbolicFile>> SymbolicFile::createSymbolicFile( MemoryBufferRef Object, sys::fs::file_magic Type, LLVMContext *Context) { diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index 9778628911cd0..c4c892f0352a0 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -3442,7 +3442,7 @@ void IEEEFloat::toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision, // Ignore trailing binary zeros. int trailingZeros = significand.countTrailingZeros(); exp += trailingZeros; - significand = significand.lshr(trailingZeros); + significand.lshrInPlace(trailingZeros); // Change the exponent from 2^e to 10^e. if (exp == 0) { diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp index 0c7da1dad0d21..2d049a1cff853 100644 --- a/lib/Support/APInt.cpp +++ b/lib/Support/APInt.cpp @@ -125,16 +125,16 @@ APInt::APInt(unsigned numbits, StringRef Str, uint8_t radix) fromString(numbits, Str, radix); } -APInt& APInt::AssignSlowCase(const APInt& RHS) { +void APInt::AssignSlowCase(const APInt& RHS) { // Don't do anything for X = X if (this == &RHS) - return *this; + return; if (BitWidth == RHS.getBitWidth()) { // assume same bit-width single-word case is already handled assert(!isSingleWord()); memcpy(pVal, RHS.pVal, getNumWords() * APINT_WORD_SIZE); - return *this; + return; } if (isSingleWord()) { @@ -154,7 +154,7 @@ APInt& APInt::AssignSlowCase(const APInt& RHS) { memcpy(pVal, RHS.pVal, RHS.getNumWords() * APINT_WORD_SIZE); } BitWidth = RHS.BitWidth; - return clearUnusedBits(); + clearUnusedBits(); } /// This method 'profiles' an APInt for use with FoldingSet. @@ -339,19 +339,16 @@ APInt& APInt::operator*=(const APInt& RHS) { return *this; } -APInt& APInt::AndAssignSlowCase(const APInt& RHS) { +void APInt::AndAssignSlowCase(const APInt& RHS) { tcAnd(pVal, RHS.pVal, getNumWords()); - return *this; } -APInt& APInt::OrAssignSlowCase(const APInt& RHS) { +void APInt::OrAssignSlowCase(const APInt& RHS) { tcOr(pVal, RHS.pVal, getNumWords()); - return *this; } -APInt& APInt::XorAssignSlowCase(const APInt& RHS) { +void APInt::XorAssignSlowCase(const APInt& RHS) { tcXor(pVal, RHS.pVal, getNumWords()); - return *this; } APInt APInt::operator*(const APInt& RHS) const { @@ -367,14 +364,6 @@ bool APInt::EqualSlowCase(const APInt& RHS) const { return std::equal(pVal, pVal + getNumWords(), RHS.pVal); } -bool APInt::EqualSlowCase(uint64_t Val) const { - unsigned n = getActiveBits(); - if (n <= APINT_BITS_PER_WORD) - return pVal[0] == Val; - else - return false; -} - bool APInt::ult(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be same for comparison"); if (isSingleWord()) @@ -733,6 +722,22 @@ unsigned APInt::countPopulationSlowCase() const { return Count; } +bool APInt::intersectsSlowCase(const APInt &RHS) const { + for (unsigned i = 0, e = getNumWords(); i != e; ++i) + if ((pVal[i] & RHS.pVal[i]) != 0) + return true; + + return false; +} + +bool APInt::isSubsetOfSlowCase(const APInt &RHS) const { + for (unsigned i = 0, e = getNumWords(); i != e; ++i) + if ((pVal[i] & ~RHS.pVal[i]) != 0) + return false; + + return true; +} + APInt APInt::byteSwap() const { assert(BitWidth >= 16 && BitWidth % 16 == 0 && "Cannot byteswap!"); if (BitWidth == 16) @@ -774,14 +779,12 @@ APInt APInt::reverseBits() const { } APInt Val(*this); - APInt Reversed(*this); - int S = BitWidth - 1; - - const APInt One(BitWidth, 1); + APInt Reversed(BitWidth, 0); + unsigned S = BitWidth; - for ((Val = Val.lshr(1)); Val != 0; (Val = Val.lshr(1))) { + for (; Val != 0; Val.lshrInPlace(1)) { Reversed <<= 1; - Reversed |= (Val & One); + Reversed |= Val[0]; --S; } @@ -1136,63 +1139,14 @@ APInt APInt::ashr(unsigned shiftAmt) const { /// Logical right-shift this APInt by shiftAmt. /// @brief Logical right-shift function. -APInt APInt::lshr(const APInt &shiftAmt) const { - return lshr((unsigned)shiftAmt.getLimitedValue(BitWidth)); -} - -/// Perform a logical right-shift from Src to Dst of Words words, by Shift, -/// which must be less than 64. If the source and destination ranges overlap, -/// we require that Src >= Dst (put another way, we require that the overall -/// operation is a right shift on the combined range). -static void lshrWords(APInt::WordType *Dst, APInt::WordType *Src, - unsigned Words, unsigned Shift) { - assert(Shift < APInt::APINT_BITS_PER_WORD); - - if (!Words) - return; - - if (Shift == 0) { - std::memmove(Dst, Src, Words * APInt::APINT_WORD_SIZE); - return; - } - - uint64_t Low = Src[0]; - for (unsigned I = 1; I != Words; ++I) { - uint64_t High = Src[I]; - Dst[I - 1] = - (Low >> Shift) | (High << (APInt::APINT_BITS_PER_WORD - Shift)); - Low = High; - } - Dst[Words - 1] = Low >> Shift; +void APInt::lshrInPlace(const APInt &shiftAmt) { + lshrInPlace((unsigned)shiftAmt.getLimitedValue(BitWidth)); } /// Logical right-shift this APInt by shiftAmt. /// @brief Logical right-shift function. -void APInt::lshrInPlace(unsigned shiftAmt) { - if (isSingleWord()) { - if (shiftAmt >= BitWidth) - VAL = 0; - else - VAL >>= shiftAmt; - return; - } - - // Don't bother performing a no-op shift. - if (!shiftAmt) - return; - - // Find number of complete words being shifted out and zeroed. - const unsigned Words = getNumWords(); - const unsigned ShiftFullWords = - std::min(shiftAmt / APINT_BITS_PER_WORD, Words); - - // Fill in first Words - ShiftFullWords by shifting. - lshrWords(pVal, pVal + ShiftFullWords, Words - ShiftFullWords, - shiftAmt % APINT_BITS_PER_WORD); - - // The remaining high words are all zero. - for (unsigned I = Words - ShiftFullWords; I != Words; ++I) - pVal[I] = 0; +void APInt::lshrSlowCase(unsigned ShiftAmt) { + tcShiftRight(pVal, getNumWords(), ShiftAmt); } /// Left-shift this APInt by shiftAmt. @@ -1202,60 +1156,9 @@ APInt APInt::shl(const APInt &shiftAmt) const { return shl((unsigned)shiftAmt.getLimitedValue(BitWidth)); } -APInt APInt::shlSlowCase(unsigned shiftAmt) const { - // If all the bits were shifted out, the result is 0. This avoids issues - // with shifting by the size of the integer type, which produces undefined - // results. We define these "undefined results" to always be 0. - if (shiftAmt == BitWidth) - return APInt(BitWidth, 0); - - // If none of the bits are shifted out, the result is *this. This avoids a - // lshr by the words size in the loop below which can produce incorrect - // results. It also avoids the expensive computation below for a common case. - if (shiftAmt == 0) - return *this; - - // Create some space for the result. - uint64_t * val = new uint64_t[getNumWords()]; - - // If we are shifting less than a word, do it the easy way - if (shiftAmt < APINT_BITS_PER_WORD) { - uint64_t carry = 0; - for (unsigned i = 0; i < getNumWords(); i++) { - val[i] = pVal[i] << shiftAmt | carry; - carry = pVal[i] >> (APINT_BITS_PER_WORD - shiftAmt); - } - APInt Result(val, BitWidth); - Result.clearUnusedBits(); - return Result; - } - - // Compute some values needed by the remaining shift algorithms - unsigned wordShift = shiftAmt % APINT_BITS_PER_WORD; - unsigned offset = shiftAmt / APINT_BITS_PER_WORD; - - // If we are shifting whole words, just move whole words - if (wordShift == 0) { - for (unsigned i = 0; i < offset; i++) - val[i] = 0; - for (unsigned i = offset; i < getNumWords(); i++) - val[i] = pVal[i-offset]; - APInt Result(val, BitWidth); - Result.clearUnusedBits(); - return Result; - } - - // Copy whole words from this to Result. - unsigned i = getNumWords() - 1; - for (; i > offset; --i) - val[i] = pVal[i-offset] << wordShift | - pVal[i-offset-1] >> (APINT_BITS_PER_WORD - wordShift); - val[offset] = pVal[0] << wordShift; - for (i = 0; i < offset; ++i) - val[i] = 0; - APInt Result(val, BitWidth); - Result.clearUnusedBits(); - return Result; +void APInt::shlSlowCase(unsigned ShiftAmt) { + tcShiftLeft(pVal, getNumWords(), ShiftAmt); + clearUnusedBits(); } // Calculate the rotate amount modulo the bit width. @@ -2239,7 +2142,7 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix, while (Tmp != 0) { unsigned Digit = unsigned(Tmp.getRawData()[0]) & MaskAmt; Str.push_back(Digits[Digit]); - Tmp = Tmp.lshr(ShiftAmt); + Tmp.lshrInPlace(ShiftAmt); } } else { APInt divisor(Radix == 10? 4 : 8, Radix); @@ -2698,63 +2601,58 @@ int APInt::tcDivide(WordType *lhs, const WordType *rhs, return false; } -/* Shift a bignum left COUNT bits in-place. Shifted in bits are zero. - There are no restrictions on COUNT. */ -void APInt::tcShiftLeft(WordType *dst, unsigned parts, unsigned count) { - if (count) { - /* Jump is the inter-part jump; shift is is intra-part shift. */ - unsigned jump = count / APINT_BITS_PER_WORD; - unsigned shift = count % APINT_BITS_PER_WORD; - - while (parts > jump) { - WordType part; +/// Shift a bignum left Cound bits in-place. Shifted in bits are zero. There are +/// no restrictions on Count. +void APInt::tcShiftLeft(WordType *Dst, unsigned Words, unsigned Count) { + // Don't bother performing a no-op shift. + if (!Count) + return; - parts--; + /* WordShift is the inter-part shift; BitShift is is intra-part shift. */ + unsigned WordShift = std::min(Count / APINT_BITS_PER_WORD, Words); + unsigned BitShift = Count % APINT_BITS_PER_WORD; - /* dst[i] comes from the two parts src[i - jump] and, if we have - an intra-part shift, src[i - jump - 1]. */ - part = dst[parts - jump]; - if (shift) { - part <<= shift; - if (parts >= jump + 1) - part |= dst[parts - jump - 1] >> (APINT_BITS_PER_WORD - shift); - } - - dst[parts] = part; + // Fastpath for moving by whole words. + if (BitShift == 0) { + std::memmove(Dst + WordShift, Dst, (Words - WordShift) * APINT_WORD_SIZE); + } else { + while (Words-- > WordShift) { + Dst[Words] = Dst[Words - WordShift] << BitShift; + if (Words > WordShift) + Dst[Words] |= + Dst[Words - WordShift - 1] >> (APINT_BITS_PER_WORD - BitShift); } - - while (parts > 0) - dst[--parts] = 0; } + + // Fill in the remainder with 0s. + std::memset(Dst, 0, WordShift * APINT_WORD_SIZE); } -/* Shift a bignum right COUNT bits in-place. Shifted in bits are - zero. There are no restrictions on COUNT. */ -void APInt::tcShiftRight(WordType *dst, unsigned parts, unsigned count) { - if (count) { - /* Jump is the inter-part jump; shift is is intra-part shift. */ - unsigned jump = count / APINT_BITS_PER_WORD; - unsigned shift = count % APINT_BITS_PER_WORD; +/// Shift a bignum right Count bits in-place. Shifted in bits are zero. There +/// are no restrictions on Count. +void APInt::tcShiftRight(WordType *Dst, unsigned Words, unsigned Count) { + // Don't bother performing a no-op shift. + if (!Count) + return; - /* Perform the shift. This leaves the most significant COUNT bits - of the result at zero. */ - for (unsigned i = 0; i < parts; i++) { - WordType part; + // WordShift is the inter-part shift; BitShift is is intra-part shift. + unsigned WordShift = std::min(Count / APINT_BITS_PER_WORD, Words); + unsigned BitShift = Count % APINT_BITS_PER_WORD; - if (i + jump >= parts) { - part = 0; - } else { - part = dst[i + jump]; - if (shift) { - part >>= shift; - if (i + jump + 1 < parts) - part |= dst[i + jump + 1] << (APINT_BITS_PER_WORD - shift); - } - } - - dst[i] = part; + unsigned WordsToMove = Words - WordShift; + // Fastpath for moving by whole words. + if (BitShift == 0) { + std::memmove(Dst, Dst + WordShift, WordsToMove * APINT_WORD_SIZE); + } else { + for (unsigned i = 0; i != WordsToMove; ++i) { + Dst[i] = Dst[i + WordShift] >> BitShift; + if (i + 1 != WordsToMove) + Dst[i] |= Dst[i + WordShift + 1] << (APINT_BITS_PER_WORD - BitShift); } } + + // Fill in the remainder with 0s. + std::memset(Dst + WordsToMove, 0, WordShift * APINT_WORD_SIZE); } /* Bitwise and of two bignums. */ diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp index f4a9108b8544e..34345901eab1f 100644 --- a/lib/Support/CommandLine.cpp +++ b/lib/Support/CommandLine.cpp @@ -2069,12 +2069,15 @@ public: #ifndef NDEBUG OS << " with assertions"; #endif +#if LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO std::string CPU = sys::getHostCPUName(); if (CPU == "generic") CPU = "(unknown)"; OS << ".\n" << " Default target: " << sys::getDefaultTargetTriple() << '\n' - << " Host CPU: " << CPU << '\n'; + << " Host CPU: " << CPU; +#endif + OS << '\n'; } void operator=(bool OptionWasSpecified) { if (!OptionWasSpecified) diff --git a/lib/Support/Dwarf.cpp b/lib/Support/Dwarf.cpp index f13da62e4a87c..200546857de7f 100644 --- a/lib/Support/Dwarf.cpp +++ b/lib/Support/Dwarf.cpp @@ -22,7 +22,7 @@ StringRef llvm::dwarf::TagString(unsigned Tag) { switch (Tag) { default: return StringRef(); -#define HANDLE_DW_TAG(ID, NAME) \ +#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \ case DW_TAG_##NAME: \ return "DW_TAG_" #NAME; #include "llvm/Support/Dwarf.def" @@ -31,11 +31,34 @@ StringRef llvm::dwarf::TagString(unsigned Tag) { unsigned llvm::dwarf::getTag(StringRef TagString) { return StringSwitch<unsigned>(TagString) -#define HANDLE_DW_TAG(ID, NAME) .Case("DW_TAG_" #NAME, DW_TAG_##NAME) +#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \ + .Case("DW_TAG_" #NAME, DW_TAG_##NAME) #include "llvm/Support/Dwarf.def" .Default(DW_TAG_invalid); } +unsigned llvm::dwarf::TagVersion(dwarf::Tag Tag) { + switch (Tag) { + default: + return 0; +#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \ + case DW_TAG_##NAME: \ + return VERSION; +#include "llvm/Support/Dwarf.def" + } +} + +unsigned llvm::dwarf::TagVendor(dwarf::Tag Tag) { + switch (Tag) { + default: + return 0; +#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \ + case DW_TAG_##NAME: \ + return DWARF_VENDOR_##VENDOR; +#include "llvm/Support/Dwarf.def" + } +} + StringRef llvm::dwarf::ChildrenString(unsigned Children) { switch (Children) { case DW_CHILDREN_no: return "DW_CHILDREN_no"; @@ -48,29 +71,73 @@ StringRef llvm::dwarf::AttributeString(unsigned Attribute) { switch (Attribute) { default: return StringRef(); -#define HANDLE_DW_AT(ID, NAME) \ - case DW_AT_##NAME: \ +#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) \ + case DW_AT_##NAME: \ return "DW_AT_" #NAME; #include "llvm/Support/Dwarf.def" } } +unsigned llvm::dwarf::AttributeVersion(dwarf::Attribute Attribute) { + switch (Attribute) { + default: + return 0; +#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) \ + case DW_AT_##NAME: \ + return VERSION; +#include "llvm/Support/Dwarf.def" + } +} + +unsigned llvm::dwarf::AttributeVendor(dwarf::Attribute Attribute) { + switch (Attribute) { + default: + return 0; +#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) \ + case DW_AT_##NAME: \ + return DWARF_VENDOR_##VENDOR; +#include "llvm/Support/Dwarf.def" + } +} + StringRef llvm::dwarf::FormEncodingString(unsigned Encoding) { switch (Encoding) { default: return StringRef(); -#define HANDLE_DW_FORM(ID, NAME) \ - case DW_FORM_##NAME: \ +#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) \ + case DW_FORM_##NAME: \ return "DW_FORM_" #NAME; #include "llvm/Support/Dwarf.def" } } +unsigned llvm::dwarf::FormVersion(dwarf::Form Form) { + switch (Form) { + default: + return 0; +#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) \ + case DW_FORM_##NAME: \ + return VERSION; +#include "llvm/Support/Dwarf.def" + } +} + +unsigned llvm::dwarf::FormVendor(dwarf::Form Form) { + switch (Form) { + default: + return 0; +#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) \ + case DW_FORM_##NAME: \ + return DWARF_VENDOR_##VENDOR; +#include "llvm/Support/Dwarf.def" + } +} + StringRef llvm::dwarf::OperationEncodingString(unsigned Encoding) { switch (Encoding) { default: return StringRef(); -#define HANDLE_DW_OP(ID, NAME) \ +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \ case DW_OP_##NAME: \ return "DW_OP_" #NAME; #include "llvm/Support/Dwarf.def" @@ -81,17 +148,40 @@ StringRef llvm::dwarf::OperationEncodingString(unsigned Encoding) { unsigned llvm::dwarf::getOperationEncoding(StringRef OperationEncodingString) { return StringSwitch<unsigned>(OperationEncodingString) -#define HANDLE_DW_OP(ID, NAME) .Case("DW_OP_" #NAME, DW_OP_##NAME) +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \ + .Case("DW_OP_" #NAME, DW_OP_##NAME) #include "llvm/Support/Dwarf.def" .Case("DW_OP_LLVM_fragment", DW_OP_LLVM_fragment) .Default(0); } +unsigned llvm::dwarf::OperationVersion(dwarf::LocationAtom Op) { + switch (Op) { + default: + return 0; +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \ + case DW_OP_##NAME: \ + return VERSION; +#include "llvm/Support/Dwarf.def" + } +} + +unsigned llvm::dwarf::OperationVendor(dwarf::LocationAtom Op) { + switch (Op) { + default: + return 0; +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \ + case DW_OP_##NAME: \ + return DWARF_VENDOR_##VENDOR; +#include "llvm/Support/Dwarf.def" + } +} + StringRef llvm::dwarf::AttributeEncodingString(unsigned Encoding) { switch (Encoding) { default: return StringRef(); -#define HANDLE_DW_ATE(ID, NAME) \ +#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \ case DW_ATE_##NAME: \ return "DW_ATE_" #NAME; #include "llvm/Support/Dwarf.def" @@ -100,11 +190,34 @@ StringRef llvm::dwarf::AttributeEncodingString(unsigned Encoding) { unsigned llvm::dwarf::getAttributeEncoding(StringRef EncodingString) { return StringSwitch<unsigned>(EncodingString) -#define HANDLE_DW_ATE(ID, NAME) .Case("DW_ATE_" #NAME, DW_ATE_##NAME) +#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \ + .Case("DW_ATE_" #NAME, DW_ATE_##NAME) #include "llvm/Support/Dwarf.def" .Default(0); } +unsigned llvm::dwarf::AttributeEncodingVersion(dwarf::TypeKind ATE) { + switch (ATE) { + default: + return 0; +#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \ + case DW_ATE_##NAME: \ + return VERSION; +#include "llvm/Support/Dwarf.def" + } +} + +unsigned llvm::dwarf::AttributeEncodingVendor(dwarf::TypeKind ATE) { + switch (ATE) { + default: + return 0; +#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \ + case DW_ATE_##NAME: \ + return DWARF_VENDOR_##VENDOR; +#include "llvm/Support/Dwarf.def" + } +} + StringRef llvm::dwarf::DecimalSignString(unsigned Sign) { switch (Sign) { case DW_DS_unsigned: return "DW_DS_unsigned"; @@ -169,7 +282,7 @@ StringRef llvm::dwarf::LanguageString(unsigned Language) { switch (Language) { default: return StringRef(); -#define HANDLE_DW_LANG(ID, NAME) \ +#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \ case DW_LANG_##NAME: \ return "DW_LANG_" #NAME; #include "llvm/Support/Dwarf.def" @@ -178,11 +291,34 @@ StringRef llvm::dwarf::LanguageString(unsigned Language) { unsigned llvm::dwarf::getLanguage(StringRef LanguageString) { return StringSwitch<unsigned>(LanguageString) -#define HANDLE_DW_LANG(ID, NAME) .Case("DW_LANG_" #NAME, DW_LANG_##NAME) +#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \ + .Case("DW_LANG_" #NAME, DW_LANG_##NAME) #include "llvm/Support/Dwarf.def" .Default(0); } +unsigned llvm::dwarf::LanguageVersion(dwarf::SourceLanguage Lang) { + switch (Lang) { + default: + return 0; +#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \ + case DW_LANG_##NAME: \ + return VERSION; +#include "llvm/Support/Dwarf.def" + } +} + +unsigned llvm::dwarf::LanguageVendor(dwarf::SourceLanguage Lang) { + switch (Lang) { + default: + return 0; +#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \ + case DW_LANG_##NAME: \ + return DWARF_VENDOR_##VENDOR; +#include "llvm/Support/Dwarf.def" + } +} + StringRef llvm::dwarf::CaseString(unsigned Case) { switch (Case) { case DW_ID_case_sensitive: return "DW_ID_case_sensitive"; @@ -394,3 +530,12 @@ StringRef llvm::dwarf::AttributeValueString(uint16_t Attr, unsigned Val) { return StringRef(); } + +bool llvm::dwarf::isValidFormForVersion(Form F, unsigned Version, + bool ExtensionsOk) { + if (FormVendor(F) == DWARF_VENDOR_DWARF) { + unsigned FV = FormVersion(F); + return FV > 0 && FV <= Version; + } + return ExtensionsOk; +} diff --git a/lib/Support/LowLevelType.cpp b/lib/Support/LowLevelType.cpp index 4290d69cd197d..0ee3f1d0119e3 100644 --- a/lib/Support/LowLevelType.cpp +++ b/lib/Support/LowLevelType.cpp @@ -18,25 +18,25 @@ using namespace llvm; LLT::LLT(MVT VT) { if (VT.isVector()) { - SizeInBits = VT.getVectorElementType().getSizeInBits(); - ElementsOrAddrSpace = VT.getVectorNumElements(); - Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector; + init(/*isPointer=*/false, VT.getVectorNumElements() > 1, + VT.getVectorNumElements(), VT.getVectorElementType().getSizeInBits(), + /*AddressSpace=*/0); } else if (VT.isValid()) { // Aggregates are no different from real scalars as far as GlobalISel is // concerned. - Kind = Scalar; - SizeInBits = VT.getSizeInBits(); - ElementsOrAddrSpace = 1; - assert(SizeInBits != 0 && "invalid zero-sized type"); + assert(VT.getSizeInBits() != 0 && "invalid zero-sized type"); + init(/*isPointer=*/false, /*isVector=*/false, /*NumElements=*/0, + VT.getSizeInBits(), /*AddressSpace=*/0); } else { - Kind = Invalid; - SizeInBits = ElementsOrAddrSpace = 0; + IsPointer = false; + IsVector = false; + RawData = 0; } } void LLT::print(raw_ostream &OS) const { if (isVector()) - OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">"; + OS << "<" << getNumElements() << " x " << getElementType() << ">"; else if (isPointer()) OS << "p" << getAddressSpace(); else if (isValid()) { @@ -45,3 +45,12 @@ void LLT::print(raw_ostream &OS) const { } else llvm_unreachable("trying to print an invalid type"); } + +const constexpr LLT::BitFieldInfo LLT::ScalarSizeFieldInfo; +const constexpr LLT::BitFieldInfo LLT::PointerSizeFieldInfo; +const constexpr LLT::BitFieldInfo LLT::PointerAddressSpaceFieldInfo; +const constexpr LLT::BitFieldInfo LLT::VectorElementsFieldInfo; +const constexpr LLT::BitFieldInfo LLT::VectorSizeFieldInfo; +const constexpr LLT::BitFieldInfo LLT::PointerVectorElementsFieldInfo; +const constexpr LLT::BitFieldInfo LLT::PointerVectorSizeFieldInfo; +const constexpr LLT::BitFieldInfo LLT::PointerVectorAddressSpaceFieldInfo; diff --git a/lib/Support/Regex.cpp b/lib/Support/Regex.cpp index 68ba79e11766c..b1087fd8853cb 100644 --- a/lib/Support/Regex.cpp +++ b/lib/Support/Regex.cpp @@ -48,7 +48,7 @@ Regex::~Regex() { } } -bool Regex::isValid(std::string &Error) { +bool Regex::isValid(std::string &Error) const { if (!error) return true; diff --git a/lib/Support/TargetParser.cpp b/lib/Support/TargetParser.cpp index 639d2ece263a1..bba7c6d0d6042 100644 --- a/lib/Support/TargetParser.cpp +++ b/lib/Support/TargetParser.cpp @@ -210,7 +210,7 @@ bool llvm::ARM::getHWDivFeatures(unsigned HWDivKind, else Features.push_back("-hwdiv-arm"); - if (HWDivKind & ARM::AEK_HWDIV) + if (HWDivKind & ARM::AEK_HWDIVTHUMB) Features.push_back("+hwdiv"); else Features.push_back("-hwdiv"); diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index ae01ea477bb9a..7141e77fcd253 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1865,7 +1865,7 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm); getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was at zero in the argument - OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm); + OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm); } UsefulBits &= OpUsefulBits; @@ -1894,13 +1894,13 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); Mask = Mask.shl(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); - Mask = Mask.lshr(ShiftAmt); + Mask.lshrInPlace(ShiftAmt); } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { // Shift Right // We do not handle AArch64_AM::ASR, because the sign will change the // number of useful bits uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); - Mask = Mask.lshr(ShiftAmt); + Mask.lshrInPlace(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); Mask = Mask.shl(ShiftAmt); } else @@ -1954,7 +1954,7 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, if (Op.getOperand(1) == Orig) { // Copy the bits from the result to the zero bits. Mask = ResultUsefulBits & OpUsefulBits; - Mask = Mask.lshr(LSB); + Mask.lshrInPlace(LSB); } if (Op.getOperand(0) == Orig) diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 0d3289ac84c30..4ddc95199d4c6 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3239,30 +3239,26 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol // node so that legalize doesn't hack it. - if (getTargetMachine().getCodeModel() == CodeModel::Large && - Subtarget->isTargetMachO()) { - if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { + if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { + auto GV = G->getGlobal(); + if (Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine()) == + AArch64II::MO_GOT) { + Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT); + Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); + } else { const GlobalValue *GV = G->getGlobal(); - bool InternalLinkage = GV->hasInternalLinkage(); - if (InternalLinkage) - Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); - else { - Callee = - DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT); - Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); - } - } else if (ExternalSymbolSDNode *S = - dyn_cast<ExternalSymbolSDNode>(Callee)) { + Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); + } + } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { + if (getTargetMachine().getCodeModel() == CodeModel::Large && + Subtarget->isTargetMachO()) { const char *Sym = S->getSymbol(); Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT); Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); + } else { + const char *Sym = S->getSymbol(); + Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); } - } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { - const GlobalValue *GV = G->getGlobal(); - Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); - } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { - const char *Sym = S->getSymbol(); - Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); } // We don't usually want to end the call-sequence here because we would tidy @@ -7130,7 +7126,7 @@ bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const { if (I->getOpcode() != Instruction::FMul) return true; - if (I->getNumUses() != 1) + if (!I->hasOneUse()) return true; Instruction *User = I->user_back(); @@ -10395,7 +10391,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, // call. This will cause the optimizers to attempt to move, or duplicate, // return instructions to help enable tail call optimizations for this // instruction. -bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { +bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { return CI->isTailCall(); } diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index 2ad6c8b23df8c..a023b4373835c 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -593,7 +593,7 @@ private: } bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; - bool mayBeEmittedAsTailCall(CallInst *CI) const override; + bool mayBeEmittedAsTailCall(const CallInst *CI) const override; bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, bool &IsInc, SelectionDAG &DAG) const; diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td index 4449412532f30..82e9c5a88e3b8 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.td +++ b/lib/Target/AArch64/AArch64InstrInfo.td @@ -2586,6 +2586,11 @@ def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>, def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>, Sched<[WriteF]>; } +// Similarly add aliases +def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>, + Requires<[HasFullFP16]>; +def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>; +def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>; //===----------------------------------------------------------------------===// // Floating point conversion instruction. diff --git a/lib/Target/AArch64/AArch64InstructionSelector.cpp b/lib/Target/AArch64/AArch64InstructionSelector.cpp index 878dac6bff1e3..5e01b6cd2b46f 100644 --- a/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -20,6 +20,7 @@ #include "AArch64TargetMachine.h" #include "MCTargetDesc/AArch64AddressingModes.h" #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstr.h" diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp index 20a5979f9b4b7..6f9021c4a030c 100644 --- a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp +++ b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp @@ -482,7 +482,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands); for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { auto &MO = MI.getOperand(Idx); - if (!MO.isReg()) + if (!MO.isReg() || !MO.getReg()) continue; LLT Ty = MRI.getType(MO.getReg()); @@ -537,7 +537,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands}; SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands); for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { - if (MI.getOperand(Idx).isReg()) { + if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) { auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]); if (!Mapping->isValid()) return InstructionMapping(); diff --git a/lib/Target/AArch64/AArch64SchedFalkorDetails.td b/lib/Target/AArch64/AArch64SchedFalkorDetails.td index 6bce4ef6b652b..4bd77d3444887 100644 --- a/lib/Target/AArch64/AArch64SchedFalkorDetails.td +++ b/lib/Target/AArch64/AArch64SchedFalkorDetails.td @@ -265,6 +265,12 @@ def : InstRW<[FalkorWr_2LD_2VXVY_2LD_1XYZ_2VXVY_4cyc, WriteAdr],(instregex "^LD4 // Arithmetic and Logical Instructions // ----------------------------------------------------------------------------- def : InstRW<[FalkorWr_ADD], (instregex "^ADD(S)?(W|X)r(s|x)$")>; +def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^AND(S)?(W|X)r(i|r|s)$")>; +def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^BIC(S)?(W|X)r(r|s)$")>; +def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^EON(W|X)r(r|s)$")>; +def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^EOR(W|X)r(i|r|s)$")>; +def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^ORN(W|X)r(r|s)$")>; +def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^ORR(W|X)r(i|r|s)$")>; def : InstRW<[FalkorWr_2XYZ_2cyc], (instregex "^SUB(S)?(W|X)r(s|x)$")>; // SIMD Miscellaneous Instructions diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp index b3aba4781db89..042755bd36d0a 100644 --- a/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/lib/Target/AArch64/AArch64Subtarget.cpp @@ -35,6 +35,11 @@ static cl::opt<bool> UseAddressTopByteIgnored("aarch64-use-tbi", cl::desc("Assume that top byte of " "an address is ignored"), cl::init(false), cl::Hidden); +static cl::opt<bool> + UseNonLazyBind("aarch64-enable-nonlazybind", + cl::desc("Call nonlazybind functions via direct GOT load"), + cl::init(false), cl::Hidden); + AArch64Subtarget & AArch64Subtarget::initializeSubtargetDependencies(StringRef FS, StringRef CPUString) { @@ -155,6 +160,23 @@ AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV, return AArch64II::MO_NO_FLAG; } +unsigned char AArch64Subtarget::classifyGlobalFunctionReference( + const GlobalValue *GV, const TargetMachine &TM) const { + // MachO large model always goes via a GOT, because we don't have the + // relocations available to do anything else.. + if (TM.getCodeModel() == CodeModel::Large && isTargetMachO() && + !GV->hasInternalLinkage()) + return AArch64II::MO_GOT; + + // NonLazyBind goes via GOT unless we know it's available locally. + auto *F = dyn_cast<Function>(GV); + if (UseNonLazyBind && F && F->hasFnAttribute(Attribute::NonLazyBind) && + !TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) + return AArch64II::MO_GOT; + + return AArch64II::MO_NO_FLAG; +} + /// This function returns the name of a function which has an interface /// like the non-standard bzero function, if such a function exists on /// the current subtarget and it is considered prefereable over diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h index 40ad9185012cb..3d66a9ea8ce66 100644 --- a/lib/Target/AArch64/AArch64Subtarget.h +++ b/lib/Target/AArch64/AArch64Subtarget.h @@ -271,6 +271,9 @@ public: unsigned char ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const; + unsigned char classifyGlobalFunctionReference(const GlobalValue *GV, + const TargetMachine &TM) const; + /// This function returns the name of a function which has an interface /// like the non-standard bzero function, if such a function exists on /// the current subtarget and it is considered prefereable over diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index cbab68979c567..d7bbc2bcd22cf 100644 --- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -2100,27 +2100,9 @@ AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { bool isNegative = parseOptionalToken(AsmToken::Minus); const AsmToken &Tok = Parser.getTok(); - if (Tok.is(AsmToken::Real)) { - APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); - if (isNegative) - RealVal.changeSign(); - - uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); - int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal)); - Parser.Lex(); // Eat the token. - // Check for out of range values. As an exception, we let Zero through, - // as we handle that special case in post-processing before matching in - // order to use the zero register for it. - if (Val == -1 && !RealVal.isPosZero()) { - TokError("expected compatible register or floating-point constant"); - return MatchOperand_ParseFail; - } - Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext())); - return MatchOperand_Success; - } - if (Tok.is(AsmToken::Integer)) { + if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) { int64_t Val; - if (!isNegative && Tok.getString().startswith("0x")) { + if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) { Val = Tok.getIntVal(); if (Val > 255 || Val < 0) { TokError("encoded floating point value out of range"); @@ -2128,10 +2110,24 @@ AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { } } else { APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); + if (isNegative) + RealVal.changeSign(); + uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); - // If we had a '-' in front, toggle the sign bit. - IntVal ^= (uint64_t)isNegative << 63; Val = AArch64_AM::getFP64Imm(APInt(64, IntVal)); + + // Check for out of range values. As an exception we let Zero through, + // but as tokens instead of an FPImm so that it can be matched by the + // appropriate alias if one exists. + if (RealVal.isPosZero()) { + Parser.Lex(); // Eat the token. + Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext())); + Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext())); + return MatchOperand_Success; + } else if (Val == -1) { + TokError("expected compatible register or floating-point constant"); + return MatchOperand_ParseFail; + } } Parser.Lex(); // Eat the token. Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext())); @@ -3655,21 +3651,6 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, } } - // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR. - if (NumOperands == 3 && Tok == "fmov") { - AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]); - AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]); - if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) { - unsigned zreg = - !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains( - RegOp.getReg()) - ? AArch64::WZR - : AArch64::XZR; - Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(), - Op.getEndLoc(), getContext()); - } - } - MCInst Inst; // First try to match against the secondary set of tables containing the // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2"). diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp index 8fc8223295958..94112849f84ea 100644 --- a/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp +++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp @@ -39,7 +39,7 @@ AArch64MCAsmInfoDarwin::AArch64MCAsmInfoDarwin() { PrivateLabelPrefix = "L"; SeparatorString = "%%"; CommentString = ";"; - PointerSize = CalleeSaveStackSlotSize = 8; + CodePointerSize = CalleeSaveStackSlotSize = 8; AlignmentIsInBytes = false; UsesELFSectionDirectiveForBSS = true; @@ -71,7 +71,7 @@ AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(const Triple &T) { // We prefer NEON instructions to be printed in the short form. AssemblerDialect = AsmWriterVariant == Default ? 0 : AsmWriterVariant; - PointerSize = 8; + CodePointerSize = 8; // ".comm align is in bytes but .align is pow-2." AlignmentIsInBytes = false; diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp index 0446655830d1f..a81bcb56dfdcd 100644 --- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -144,6 +144,10 @@ bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough( } void AMDGPUAsmPrinter::EmitFunctionBodyStart() { + const AMDGPUMachineFunction *MFI = MF->getInfo<AMDGPUMachineFunction>(); + if (!MFI->isEntryFunction()) + return; + const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>(); SIProgramInfo KernelInfo; amd_kernel_code_t KernelCode; @@ -184,9 +188,11 @@ void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { } bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { + const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); // The starting address of all shader programs must be 256 bytes aligned. - MF.setAlignment(8); + // Regular functions just need the basic required instruction alignment. + MF.setAlignment(MFI->isEntryFunction() ? 8 : 2); SetupMachineFunction(MF); @@ -220,13 +226,19 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { OutStreamer->SwitchSection(CommentSection); if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { - OutStreamer->emitRawComment(" Kernel info:", false); - OutStreamer->emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen), - false); + if (MFI->isEntryFunction()) { + OutStreamer->emitRawComment(" Kernel info:", false); + } else { + OutStreamer->emitRawComment(" Function info:", false); + } + + OutStreamer->emitRawComment(" codeLenInByte = " + + Twine(getFunctionCodeSize(MF)), false); OutStreamer->emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR), false); OutStreamer->emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR), false); + OutStreamer->emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode), false); OutStreamer->emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode), @@ -236,6 +248,9 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { OutStreamer->emitRawComment(" LDSByteSize: " + Twine(KernelInfo.LDSSize) + " bytes/workgroup (compile time only)", false); + if (!MFI->isEntryFunction()) + return false; + OutStreamer->emitRawComment(" SGPRBlocks: " + Twine(KernelInfo.SGPRBlocks), false); OutStreamer->emitRawComment(" VGPRBlocks: " + @@ -317,7 +332,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { const MachineOperand &MO = MI.getOperand(op_idx); if (!MO.isReg()) continue; - unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff; + unsigned HWReg = RI->getHWRegIndex(MO.getReg()); // Register with value > 127 aren't GPR if (HWReg > 127) @@ -360,18 +375,12 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { } } -void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, - const MachineFunction &MF) const { +uint64_t AMDGPUAsmPrinter::getFunctionCodeSize(const MachineFunction &MF) const { const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); - const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); - uint64_t CodeSize = 0; - unsigned MaxSGPR = 0; - unsigned MaxVGPR = 0; - bool VCCUsed = false; - bool FlatUsed = false; - const SIRegisterInfo *RI = STM.getRegisterInfo(); const SIInstrInfo *TII = STM.getInstrInfo(); + uint64_t CodeSize = 0; + for (const MachineBasicBlock &MBB : MF) { for (const MachineInstr &MI : MBB) { // TODO: CodeSize should account for multiple functions. @@ -380,122 +389,86 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, if (MI.isDebugValue()) continue; - if (isVerbose()) - CodeSize += TII->getInstSizeInBytes(MI); + CodeSize += TII->getInstSizeInBytes(MI); + } + } - unsigned numOperands = MI.getNumOperands(); - for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { - const MachineOperand &MO = MI.getOperand(op_idx); - unsigned width = 0; - bool isSGPR = false; + return CodeSize; +} - if (!MO.isReg()) - continue; +static bool hasAnyNonFlatUseOfReg(const MachineRegisterInfo &MRI, + const SIInstrInfo &TII, + unsigned Reg) { + for (const MachineOperand &UseOp : MRI.reg_operands(Reg)) { + if (!UseOp.isImplicit() || !TII.isFLAT(*UseOp.getParent())) + return true; + } - unsigned reg = MO.getReg(); - switch (reg) { - case AMDGPU::EXEC: - case AMDGPU::EXEC_LO: - case AMDGPU::EXEC_HI: - case AMDGPU::SCC: - case AMDGPU::M0: - case AMDGPU::SRC_SHARED_BASE: - case AMDGPU::SRC_SHARED_LIMIT: - case AMDGPU::SRC_PRIVATE_BASE: - case AMDGPU::SRC_PRIVATE_LIMIT: - continue; + return false; +} - case AMDGPU::VCC: - case AMDGPU::VCC_LO: - case AMDGPU::VCC_HI: - VCCUsed = true; - continue; +void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, + const MachineFunction &MF) const { + const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); + const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + const SIInstrInfo *TII = STM.getInstrInfo(); + const SIRegisterInfo *RI = &TII->getRegisterInfo(); - case AMDGPU::FLAT_SCR: - case AMDGPU::FLAT_SCR_LO: - case AMDGPU::FLAT_SCR_HI: - // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat - // instructions aren't used to access the scratch buffer. - if (MFI->hasFlatScratchInit()) - FlatUsed = true; - continue; - case AMDGPU::TBA: - case AMDGPU::TBA_LO: - case AMDGPU::TBA_HI: - case AMDGPU::TMA: - case AMDGPU::TMA_LO: - case AMDGPU::TMA_HI: - llvm_unreachable("trap handler registers should not be used"); - - default: - break; - } - - if (AMDGPU::SReg_32RegClass.contains(reg)) { - assert(!AMDGPU::TTMP_32RegClass.contains(reg) && - "trap handler registers should not be used"); - isSGPR = true; - width = 1; - } else if (AMDGPU::VGPR_32RegClass.contains(reg)) { - isSGPR = false; - width = 1; - } else if (AMDGPU::SReg_64RegClass.contains(reg)) { - assert(!AMDGPU::TTMP_64RegClass.contains(reg) && - "trap handler registers should not be used"); - isSGPR = true; - width = 2; - } else if (AMDGPU::VReg_64RegClass.contains(reg)) { - isSGPR = false; - width = 2; - } else if (AMDGPU::VReg_96RegClass.contains(reg)) { - isSGPR = false; - width = 3; - } else if (AMDGPU::SReg_128RegClass.contains(reg)) { - isSGPR = true; - width = 4; - } else if (AMDGPU::VReg_128RegClass.contains(reg)) { - isSGPR = false; - width = 4; - } else if (AMDGPU::SReg_256RegClass.contains(reg)) { - isSGPR = true; - width = 8; - } else if (AMDGPU::VReg_256RegClass.contains(reg)) { - isSGPR = false; - width = 8; - } else if (AMDGPU::SReg_512RegClass.contains(reg)) { - isSGPR = true; - width = 16; - } else if (AMDGPU::VReg_512RegClass.contains(reg)) { - isSGPR = false; - width = 16; - } else { - llvm_unreachable("Unknown register class"); - } - unsigned hwReg = RI->getEncodingValue(reg) & 0xff; - unsigned maxUsed = hwReg + width - 1; - if (isSGPR) { - MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR; - } else { - MaxVGPR = maxUsed > MaxVGPR ? maxUsed : MaxVGPR; - } - } + MCPhysReg NumVGPRReg = AMDGPU::NoRegister; + for (MCPhysReg Reg : reverse(AMDGPU::VGPR_32RegClass.getRegisters())) { + if (MRI.isPhysRegUsed(Reg)) { + NumVGPRReg = Reg; + break; + } + } + + MCPhysReg NumSGPRReg = AMDGPU::NoRegister; + for (MCPhysReg Reg : reverse(AMDGPU::SGPR_32RegClass.getRegisters())) { + if (MRI.isPhysRegUsed(Reg)) { + NumSGPRReg = Reg; + break; } } + // We found the maximum register index. They start at 0, so add one to get the + // number of registers. + ProgInfo.NumVGPR = NumVGPRReg == AMDGPU::NoRegister ? 0 : + RI->getHWRegIndex(NumVGPRReg) + 1; + ProgInfo.NumSGPR = NumSGPRReg == AMDGPU::NoRegister ? 0 : + RI->getHWRegIndex(NumSGPRReg) + 1; unsigned ExtraSGPRs = 0; - if (VCCUsed) + ProgInfo.VCCUsed = MRI.isPhysRegUsed(AMDGPU::VCC_LO) || + MRI.isPhysRegUsed(AMDGPU::VCC_HI); + if (ProgInfo.VCCUsed) ExtraSGPRs = 2; + ProgInfo.FlatUsed = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) || + MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI); + + // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat + // instructions aren't used to access the scratch buffer. Inline assembly + // may need it though. + // + // If we only have implicit uses of flat_scr on flat instructions, it is not + // really needed. + if (ProgInfo.FlatUsed && !MFI->hasFlatScratchInit() && + (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) && + !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) && + !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) { + ProgInfo.FlatUsed = false; + } + if (STM.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) { - if (FlatUsed) + if (ProgInfo.FlatUsed) ExtraSGPRs = 4; } else { if (STM.isXNACKEnabled()) ExtraSGPRs = 4; - if (FlatUsed) + if (ProgInfo.FlatUsed) ExtraSGPRs = 6; } @@ -505,34 +478,29 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, if (STM.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && !STM.hasSGPRInitBug()) { unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs(); - if (MaxSGPR + 1 > MaxAddressableNumSGPRs) { + if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) { // This can happen due to a compiler bug or when using inline asm. LLVMContext &Ctx = MF.getFunction()->getContext(); DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "addressable scalar registers", - MaxSGPR + 1, DS_Error, + ProgInfo.NumSGPR, DS_Error, DK_ResourceLimit, MaxAddressableNumSGPRs); Ctx.diagnose(Diag); - MaxSGPR = MaxAddressableNumSGPRs - 1; + ProgInfo.NumSGPR = MaxAddressableNumSGPRs - 1; } } // Account for extra SGPRs and VGPRs reserved for debugger use. - MaxSGPR += ExtraSGPRs; - MaxVGPR += ExtraVGPRs; - - // We found the maximum register index. They start at 0, so add one to get the - // number of registers. - ProgInfo.NumSGPR = MaxSGPR + 1; - ProgInfo.NumVGPR = MaxVGPR + 1; + ProgInfo.NumSGPR += ExtraSGPRs; + ProgInfo.NumVGPR += ExtraVGPRs; // Adjust number of registers used to meet default/requested minimum/maximum // number of waves per execution unit request. ProgInfo.NumSGPRsForWavesPerEU = std::max( - ProgInfo.NumSGPR, STM.getMinNumSGPRs(MFI->getMaxWavesPerEU())); + std::max(ProgInfo.NumSGPR, 1u), STM.getMinNumSGPRs(MFI->getMaxWavesPerEU())); ProgInfo.NumVGPRsForWavesPerEU = std::max( - ProgInfo.NumVGPR, STM.getMinNumVGPRs(MFI->getMaxWavesPerEU())); + std::max(ProgInfo.NumVGPR, 1u), STM.getMinNumVGPRs(MFI->getMaxWavesPerEU())); if (STM.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS || STM.hasSGPRInitBug()) { @@ -559,10 +527,10 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; } - if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) { + if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) { LLVMContext &Ctx = MF.getFunction()->getContext(); DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs", - MFI->NumUserSGPRs, DS_Error); + MFI->getNumUserSGPRs(), DS_Error); Ctx.diagnose(Diag); } @@ -584,7 +552,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, ProgInfo.VGPRBlocks = ProgInfo.VGPRBlocks / STM.getVGPREncodingGranule() - 1; // Record first reserved VGPR and number of reserved VGPRs. - ProgInfo.ReservedVGPRFirst = STM.debuggerReserveRegs() ? MaxVGPR + 1 : 0; + ProgInfo.ReservedVGPRFirst = STM.debuggerReserveRegs() ? ProgInfo.NumVGPR : 0; ProgInfo.ReservedVGPRCount = STM.getReservedNumVGPRs(MF); // Update DebuggerWavefrontPrivateSegmentOffsetSGPR and @@ -609,10 +577,6 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); ProgInfo.ScratchSize = FrameInfo.getStackSize(); - ProgInfo.FlatUsed = FlatUsed; - ProgInfo.VCCUsed = VCCUsed; - ProgInfo.CodeLen = CodeSize; - unsigned LDSAlignShift; if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) { // LDS is allocated in 64 dword blocks. @@ -623,7 +587,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, } unsigned LDSSpillSize = - MFI->LDSWaveSpillSize * MFI->getMaxFlatWorkGroupSize(); + MFI->getLDSWaveSpillSize() * MFI->getMaxFlatWorkGroupSize(); ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize; ProgInfo.LDSBlocks = diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/lib/Target/AMDGPU/AMDGPUAsmPrinter.h index 13425c8b2a0f5..8c86dea4b885c 100644 --- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.h +++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.h @@ -55,7 +55,7 @@ private: uint32_t NumVGPR = 0; uint32_t NumSGPR = 0; - uint32_t LDSSize; + uint32_t LDSSize = 0; bool FlatUsed = false; // Number of SGPRs that meets number of waves per execution unit request. @@ -85,11 +85,11 @@ private: // Bonus information for debugging. bool VCCUsed = false; - uint64_t CodeLen = 0; SIProgramInfo() = default; }; + uint64_t getFunctionCodeSize(const MachineFunction &MF) const; void getSIProgramInfo(SIProgramInfo &Out, const MachineFunction &MF) const; void getAmdKernelCode(amd_kernel_code_t &Out, const SIProgramInfo &KernelInfo, const MachineFunction &MF) const; diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h index 36bc2498781fe..a5cda817ac11c 100644 --- a/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -415,9 +415,11 @@ public: return 0; } + // Scratch is allocated in 256 dword per wave blocks for the entire + // wavefront. When viewed from the perspecive of an arbitrary workitem, this + // is 4-byte aligned. unsigned getStackAlignment() const { - // Scratch is allocated in 256 dword per wave blocks. - return 4 * 256 / getWavefrontSize(); + return 4; } bool enableMachineScheduler() const override { diff --git a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 01ac9968181ac..6edd3e923ba11 100644 --- a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -426,16 +426,23 @@ static bool isArgPassedInSGPR(const Argument *A) { const Function *F = A->getParent(); // Arguments to compute shaders are never a source of divergence. - if (!AMDGPU::isShader(F->getCallingConv())) + CallingConv::ID CC = F->getCallingConv(); + switch (CC) { + case CallingConv::AMDGPU_KERNEL: + case CallingConv::SPIR_KERNEL: return true; - - // For non-compute shaders, SGPR inputs are marked with either inreg or byval. - if (F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) || - F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal)) - return true; - - // Everything else is in VGPRs. - return false; + case CallingConv::AMDGPU_VS: + case CallingConv::AMDGPU_GS: + case CallingConv::AMDGPU_PS: + case CallingConv::AMDGPU_CS: + // For non-compute shaders, SGPR inputs are marked with either inreg or byval. + // Everything else is in VGPRs. + return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) || + F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal); + default: + // TODO: Should calls support inreg for SGPR inputs? + return false; + } } /// diff --git a/lib/Target/AMDGPU/DSInstructions.td b/lib/Target/AMDGPU/DSInstructions.td index a9f64589fa5ee..357e18108e7e8 100644 --- a/lib/Target/AMDGPU/DSInstructions.td +++ b/lib/Target/AMDGPU/DSInstructions.td @@ -255,8 +255,6 @@ class DS_1A1D_PERMUTE <string opName, SDPatternOperator node = null_frag> [(set i32:$vdst, (node (DS1Addr1Offset i32:$addr, i16:$offset), i32:$data0))] > { - let LGKM_CNT = 0; - let mayLoad = 0; let mayStore = 0; let isConvergent = 1; diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp index 1655591abf390..6c61fb1f2d6b0 100644 --- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp +++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp @@ -14,6 +14,7 @@ using namespace llvm; AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Triple &TT) : MCAsmInfoELF() { + CodePointerSize = (TT.getArch() == Triple::amdgcn) ? 8 : 4; HasSingleParameterDotFile = false; //===------------------------------------------------------------------===// MinInstAlignment = 4; diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 7268131396dc8..dd867b15b4c7f 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -461,6 +461,13 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); + } else { + setOperationAction(ISD::SELECT, MVT::v2i16, Custom); + setOperationAction(ISD::SELECT, MVT::v2f16, Custom); + } + + for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { + setOperationAction(ISD::SELECT, VT, Custom); } setTargetDAGCombine(ISD::FADD); @@ -2191,6 +2198,28 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N, break; } } + case ISD::SELECT: { + SDLoc SL(N); + EVT VT = N->getValueType(0); + EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); + SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); + SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); + + EVT SelectVT = NewVT; + if (NewVT.bitsLT(MVT::i32)) { + LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); + RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); + SelectVT = MVT::i32; + } + + SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, + N->getOperand(0), LHS, RHS); + + if (NewVT != SelectVT) + NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); + Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); + return; + } default: break; } diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/lib/Target/AMDGPU/SIMachineFunctionInfo.h index a84f3e274f82a..810fb05984c4f 100644 --- a/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -133,14 +133,12 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction { AMDGPUBufferPseudoSourceValue BufferPSV; AMDGPUImagePseudoSourceValue ImagePSV; -public: - // FIXME: Make private +private: unsigned LDSWaveSpillSize; unsigned ScratchOffsetReg; unsigned NumUserSGPRs; unsigned NumSystemSGPRs; -private: bool HasSpilledSGPRs; bool HasSpilledVGPRs; bool HasNonSpillStackObjects; @@ -535,6 +533,10 @@ public: llvm_unreachable("unexpected dimension"); } + unsigned getLDSWaveSpillSize() const { + return LDSWaveSpillSize; + } + const AMDGPUBufferPseudoSourceValue *getBufferPSV() const { return &BufferPSV; } diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp index 36d4df52ff0e3..098c67252dd8d 100644 --- a/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -124,7 +124,7 @@ unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg( unsigned RegCount = ST.getMaxNumSGPRs(MF); unsigned Reg; - // Try to place it in a hole after PrivateSegmentbufferReg. + // Try to place it in a hole after PrivateSegmentBufferReg. if (RegCount & 3) { // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to // alignment constraints, so we have a hole where can put the wave offset. diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td index 57f9d1c6b6109..005b74a68af3e 100644 --- a/lib/Target/ARM/ARM.td +++ b/lib/Target/ARM/ARM.td @@ -67,8 +67,9 @@ def FeatureFullFP16 : SubtargetFeature<"fullfp16", "HasFullFP16", "true", [FeatureFPARMv8]>; def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true", "Restrict FP to 16 double registers">; -def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true", - "Enable divide instructions">; +def FeatureHWDivThumb : SubtargetFeature<"hwdiv", "HasHardwareDivideInThumb", + "true", + "Enable divide instructions in Thumb">; def FeatureHWDivARM : SubtargetFeature<"hwdiv-arm", "HasHardwareDivideInARM", "true", "Enable divide instructions in ARM mode">; @@ -225,7 +226,7 @@ def FeatureMP : SubtargetFeature<"mp", "HasMPExtension", "true", def FeatureVirtualization : SubtargetFeature<"virtualization", "HasVirtualization", "true", "Supports Virtualization extension", - [FeatureHWDiv, FeatureHWDivARM]>; + [FeatureHWDivThumb, FeatureHWDivARM]>; // M-series ISA def FeatureMClass : SubtargetFeature<"mclass", "ARMProcClass", "MClass", @@ -433,21 +434,21 @@ def ARMv7ve : Architecture<"armv7ve", "ARMv7ve", [HasV7Ops, def ARMv7r : Architecture<"armv7-r", "ARMv7r", [HasV7Ops, FeatureDB, FeatureDSP, - FeatureHWDiv, + FeatureHWDivThumb, FeatureRClass]>; def ARMv7m : Architecture<"armv7-m", "ARMv7m", [HasV7Ops, FeatureThumb2, FeatureNoARM, FeatureDB, - FeatureHWDiv, + FeatureHWDivThumb, FeatureMClass]>; def ARMv7em : Architecture<"armv7e-m", "ARMv7em", [HasV7Ops, FeatureThumb2, FeatureNoARM, FeatureDB, - FeatureHWDiv, + FeatureHWDivThumb, FeatureMClass, FeatureDSP]>; @@ -502,7 +503,7 @@ def ARMv8mBaseline : Architecture<"armv8-m.base", "ARMv8mBaseline", [HasV8MBaselineOps, FeatureNoARM, FeatureDB, - FeatureHWDiv, + FeatureHWDivThumb, FeatureV7Clrex, Feature8MSecExt, FeatureAcquireRelease, @@ -512,7 +513,7 @@ def ARMv8mMainline : Architecture<"armv8-m.main", "ARMv8mMainline", [HasV8MMainlineOps, FeatureNoARM, FeatureDB, - FeatureHWDiv, + FeatureHWDivThumb, Feature8MSecExt, FeatureAcquireRelease, FeatureMClass]>; @@ -678,7 +679,7 @@ def : ProcessorModel<"krait", CortexA9Model, [ARMv7a, ProcKrait, FeatureFP16, FeatureAvoidPartialCPSR, FeatureVFP4, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM]>; def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift, @@ -686,7 +687,7 @@ def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift, FeatureNEONForFP, FeatureVFP4, FeatureMP, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureAvoidPartialCPSR, FeatureAvoidMOVsShOp, @@ -768,39 +769,39 @@ def : ProcNoItin<"cortex-m33", [ARMv8mMainline, FeatureVFPOnlySP]>; def : ProcNoItin<"cortex-a32", [ARMv8a, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; def : ProcNoItin<"cortex-a35", [ARMv8a, ProcA35, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; def : ProcNoItin<"cortex-a53", [ARMv8a, ProcA53, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC, FeatureFPAO]>; def : ProcNoItin<"cortex-a57", [ARMv8a, ProcA57, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC, FeatureFPAO]>; def : ProcNoItin<"cortex-a72", [ARMv8a, ProcA72, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; def : ProcNoItin<"cortex-a73", [ARMv8a, ProcA73, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; @@ -811,7 +812,7 @@ def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift, FeatureNEONForFP, FeatureVFP4, FeatureMP, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureAvoidPartialCPSR, FeatureAvoidMOVsShOp, @@ -820,25 +821,25 @@ def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift, FeatureZCZeroing]>; def : ProcNoItin<"exynos-m1", [ARMv8a, ProcExynosM1, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; def : ProcNoItin<"exynos-m2", [ARMv8a, ProcExynosM1, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; def : ProcNoItin<"exynos-m3", [ARMv8a, ProcExynosM1, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; def : ProcNoItin<"kryo", [ARMv8a, ProcKryo, - FeatureHWDiv, + FeatureHWDivThumb, FeatureHWDivARM, FeatureCrypto, FeatureCRC]>; diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index eb0d410b596be..14e197f477f1d 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -589,12 +589,6 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) { ATS.finishAttributeSection(); } -static bool isV8M(const ARMSubtarget *Subtarget) { - // Note that v8M Baseline is a subset of v6T2! - return (Subtarget->hasV8MBaselineOps() && !Subtarget->hasV6T2Ops()) || - Subtarget->hasV8MMainlineOps(); -} - //===----------------------------------------------------------------------===// // Helper routines for EmitStartOfAsmFile() and EmitEndOfAsmFile() // FIXME: @@ -602,39 +596,6 @@ static bool isV8M(const ARMSubtarget *Subtarget) { // to appear in the .ARM.attributes section in ELF. // Instead of subclassing the MCELFStreamer, we do the work here. -static ARMBuildAttrs::CPUArch getArchForCPU(StringRef CPU, - const ARMSubtarget *Subtarget) { - if (CPU == "xscale") - return ARMBuildAttrs::v5TEJ; - - if (Subtarget->hasV8Ops()) { - if (Subtarget->isRClass()) - return ARMBuildAttrs::v8_R; - return ARMBuildAttrs::v8_A; - } else if (Subtarget->hasV8MMainlineOps()) - return ARMBuildAttrs::v8_M_Main; - else if (Subtarget->hasV7Ops()) { - if (Subtarget->isMClass() && Subtarget->hasDSP()) - return ARMBuildAttrs::v7E_M; - return ARMBuildAttrs::v7; - } else if (Subtarget->hasV6T2Ops()) - return ARMBuildAttrs::v6T2; - else if (Subtarget->hasV8MBaselineOps()) - return ARMBuildAttrs::v8_M_Base; - else if (Subtarget->hasV6MOps()) - return ARMBuildAttrs::v6S_M; - else if (Subtarget->hasV6Ops()) - return ARMBuildAttrs::v6; - else if (Subtarget->hasV5TEOps()) - return ARMBuildAttrs::v5TE; - else if (Subtarget->hasV5TOps()) - return ARMBuildAttrs::v5T; - else if (Subtarget->hasV4TOps()) - return ARMBuildAttrs::v4T; - else - return ARMBuildAttrs::v4; -} - // Returns true if all functions have the same function attribute value. // It also returns true when the module has no functions. static bool checkFunctionsAttributeConsistency(const Module &M, StringRef Attr, @@ -671,89 +632,8 @@ void ARMAsmPrinter::emitAttributes() { static_cast<const ARMBaseTargetMachine &>(TM); const ARMSubtarget STI(TT, CPU, ArchFS, ATM, ATM.isLittleEndian()); - const std::string &CPUString = STI.getCPUString(); - - if (!StringRef(CPUString).startswith("generic")) { - // FIXME: remove krait check when GNU tools support krait cpu - if (STI.isKrait()) { - ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9"); - // We consider krait as a "cortex-a9" + hwdiv CPU - // Enable hwdiv through ".arch_extension idiv" - if (STI.hasDivide() || STI.hasDivideInARMMode()) - ATS.emitArchExtension(ARM::AEK_HWDIV | ARM::AEK_HWDIVARM); - } else - ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString); - } - - ATS.emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(CPUString, &STI)); - - // Tag_CPU_arch_profile must have the default value of 0 when "Architecture - // profile is not applicable (e.g. pre v7, or cross-profile code)". - if (STI.hasV7Ops() || isV8M(&STI)) { - if (STI.isAClass()) { - ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile, - ARMBuildAttrs::ApplicationProfile); - } else if (STI.isRClass()) { - ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile, - ARMBuildAttrs::RealTimeProfile); - } else if (STI.isMClass()) { - ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile, - ARMBuildAttrs::MicroControllerProfile); - } - } - - ATS.emitAttribute(ARMBuildAttrs::ARM_ISA_use, - STI.hasARMOps() ? ARMBuildAttrs::Allowed - : ARMBuildAttrs::Not_Allowed); - if (isV8M(&STI)) { - ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use, - ARMBuildAttrs::AllowThumbDerived); - } else if (STI.isThumb1Only()) { - ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed); - } else if (STI.hasThumb2()) { - ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use, - ARMBuildAttrs::AllowThumb32); - } - - if (STI.hasNEON()) { - /* NEON is not exactly a VFP architecture, but GAS emit one of - * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */ - if (STI.hasFPARMv8()) { - if (STI.hasCrypto()) - ATS.emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8); - else - ATS.emitFPU(ARM::FK_NEON_FP_ARMV8); - } else if (STI.hasVFP4()) - ATS.emitFPU(ARM::FK_NEON_VFPV4); - else - ATS.emitFPU(STI.hasFP16() ? ARM::FK_NEON_FP16 : ARM::FK_NEON); - // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture - if (STI.hasV8Ops()) - ATS.emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch, - STI.hasV8_1aOps() ? ARMBuildAttrs::AllowNeonARMv8_1a: - ARMBuildAttrs::AllowNeonARMv8); - } else { - if (STI.hasFPARMv8()) - // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one - // FPU, but there are two different names for it depending on the CPU. - ATS.emitFPU(STI.hasD16() - ? (STI.isFPOnlySP() ? ARM::FK_FPV5_SP_D16 : ARM::FK_FPV5_D16) - : ARM::FK_FP_ARMV8); - else if (STI.hasVFP4()) - ATS.emitFPU(STI.hasD16() - ? (STI.isFPOnlySP() ? ARM::FK_FPV4_SP_D16 : ARM::FK_VFPV4_D16) - : ARM::FK_VFPV4); - else if (STI.hasVFP3()) - ATS.emitFPU(STI.hasD16() - // +d16 - ? (STI.isFPOnlySP() - ? (STI.hasFP16() ? ARM::FK_VFPV3XD_FP16 : ARM::FK_VFPV3XD) - : (STI.hasFP16() ? ARM::FK_VFPV3_D16_FP16 : ARM::FK_VFPV3_D16)) - // -d16 - : (STI.hasFP16() ? ARM::FK_VFPV3_FP16 : ARM::FK_VFPV3)); - else if (STI.hasVFP2()) - ATS.emitFPU(ARM::FK_VFPV2); - } + // Emit build attributes for the available hardware. + ATS.emitTargetAttributes(STI); // RW data addressing. if (isPositionIndependent()) { @@ -846,32 +726,15 @@ void ARMAsmPrinter::emitAttributes() { ATS.emitAttribute(ARMBuildAttrs::ABI_FP_number_model, ARMBuildAttrs::AllowIEEE754); - if (STI.allowsUnalignedMem()) - ATS.emitAttribute(ARMBuildAttrs::CPU_unaligned_access, - ARMBuildAttrs::Allowed); - else - ATS.emitAttribute(ARMBuildAttrs::CPU_unaligned_access, - ARMBuildAttrs::Not_Allowed); - // FIXME: add more flags to ARMBuildAttributes.h // 8-bytes alignment stuff. ATS.emitAttribute(ARMBuildAttrs::ABI_align_needed, 1); ATS.emitAttribute(ARMBuildAttrs::ABI_align_preserved, 1); - // ABI_HardFP_use attribute to indicate single precision FP. - if (STI.isFPOnlySP()) - ATS.emitAttribute(ARMBuildAttrs::ABI_HardFP_use, - ARMBuildAttrs::HardFPSinglePrecision); - // Hard float. Use both S and D registers and conform to AAPCS-VFP. if (STI.isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard) ATS.emitAttribute(ARMBuildAttrs::ABI_VFP_args, ARMBuildAttrs::HardFPAAPCS); - // FIXME: Should we signal R9 usage? - - if (STI.hasFP16()) - ATS.emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP); - // FIXME: To support emitting this build attribute as GCC does, the // -mfp16-format option and associated plumbing must be // supported. For now the __fp16 type is exposed by default, so this @@ -879,21 +742,6 @@ void ARMAsmPrinter::emitAttributes() { ATS.emitAttribute(ARMBuildAttrs::ABI_FP_16bit_format, ARMBuildAttrs::FP16FormatIEEE); - if (STI.hasMPExtension()) - ATS.emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP); - - // Hardware divide in ARM mode is part of base arch, starting from ARMv8. - // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M). - // It is not possible to produce DisallowDIV: if hwdiv is present in the base - // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits. - // AllowDIVExt is only emitted if hwdiv isn't available in the base arch; - // otherwise, the default value (AllowDIVIfExists) applies. - if (STI.hasDivideInARMMode() && !STI.hasV8Ops()) - ATS.emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt); - - if (STI.hasDSP() && isV8M(&STI)) - ATS.emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed); - if (MMI) { if (const Module *SourceModule = MMI->getModule()) { // ABI_PCS_wchar_t to indicate wchar_t width @@ -930,16 +778,6 @@ void ARMAsmPrinter::emitAttributes() { else ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_R9_use, ARMBuildAttrs::R9IsGPR); - - if (STI.hasTrustZone() && STI.hasVirtualization()) - ATS.emitAttribute(ARMBuildAttrs::Virtualization_use, - ARMBuildAttrs::AllowTZVirtualization); - else if (STI.hasTrustZone()) - ATS.emitAttribute(ARMBuildAttrs::Virtualization_use, - ARMBuildAttrs::AllowTZ); - else if (STI.hasVirtualization()) - ATS.emitAttribute(ARMBuildAttrs::Virtualization_use, - ARMBuildAttrs::AllowVirtualization); } //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 23777b821f9f3..faf1c631a3a77 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -404,6 +404,29 @@ public: /// Returns true if the instruction has a shift by immediate that can be /// executed in one cycle less. bool isSwiftFastImmShift(const MachineInstr *MI) const; + + /// Returns predicate register associated with the given frame instruction. + unsigned getFramePred(const MachineInstr &MI) const { + assert(isFrameInstr(MI)); + if (isFrameSetup(MI)) + // Operands of ADJCALLSTACKDOWN: + // - argument declared in ADJCALLSTACKDOWN pattern: + // 0 - frame size + // 1 - predicate code (like ARMCC::AL) + // - added by predOps: + // 2 - predicate reg + return MI.getOperand(2).getReg(); + assert(MI.getOpcode() == ARM::ADJCALLSTACKUP || + MI.getOpcode() == ARM::tADJCALLSTACKUP); + // Operands of ADJCALLSTACKUP: + // - argument declared in ADJCALLSTACKUP pattern: + // 0 - frame size + // 1 - arg of CALLSEQ_END + // 2 - predicate code + // - added by predOps: + // 3 - predicate reg + return MI.getOperand(3).getReg(); + } }; /// Get the operands corresponding to the given \p Pred value. By default, the diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td index 7a7b7fede7c83..bc7afdb7f1c9f 100644 --- a/lib/Target/ARM/ARMCallingConv.td +++ b/lib/Target/ARM/ARMCallingConv.td @@ -273,9 +273,9 @@ def CSR_iOS_SwiftError : CalleeSavedRegs<(sub CSR_iOS, R8)>; def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS_ThisReturn, R9))>; -def CSR_iOS_TLSCall : CalleeSavedRegs<(add LR, SP, - (sequence "R%u", 12, 1), - (sequence "D%u", 31, 0))>; +def CSR_iOS_TLSCall + : CalleeSavedRegs<(add LR, SP, (sub(sequence "R%u", 12, 1), R9, R12), + (sequence "D%u", 31, 0))>; // C++ TLS access function saves all registers except SP. Try to match // the order of CSRs in CSR_iOS. diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index 23722f1b7f3ff..6434df317aa8d 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -1741,10 +1741,9 @@ bool ARMConstantIslands::undoLRSpillRestore() { .add(MI->getOperand(1)); MI->eraseFromParent(); MadeChange = true; - } - if (MI->getOpcode() == ARM::tPUSH && - MI->getOperand(2).getReg() == ARM::LR && - MI->getNumExplicitOperands() == 3) { + } else if (MI->getOpcode() == ARM::tPUSH && + MI->getOperand(2).getReg() == ARM::LR && + MI->getNumExplicitOperands() == 3) { // Just remove the push. MI->eraseFromParent(); MadeChange = true; @@ -2158,6 +2157,15 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() { // If we're in PIC mode, there should be another ADD following. auto *TRI = STI->getRegisterInfo(); + + // %base cannot be redefined after the load as it will appear before + // TBB/TBH like: + // %base = + // %base = + // tBB %base, %idx + if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI)) + continue; + if (isPositionIndependentOrROPI) { MachineInstr *Add = Load->getNextNode(); if (Add->getOpcode() != ARM::tADDrr || diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 01e062bd185ce..e9bc7db66fa40 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -1702,7 +1702,8 @@ bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { // If we have integer div support we should have selected this automagically. // In case we have a real miss go ahead and return false and we'll pick // it up later. - if (Subtarget->hasDivide()) return false; + if (Subtarget->hasDivideInThumbMode()) + return false; // Otherwise emit a libcall. RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index 37be22bed5408..70dbe1bc5b95e 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -322,6 +322,18 @@ static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI, } } +/// We need the offset of the frame pointer relative to other MachineFrameInfo +/// offsets which are encoded relative to SP at function begin. +/// See also emitPrologue() for how the FP is set up. +/// Unfortunately we cannot determine this value in determineCalleeSaves() yet +/// as assignCalleeSavedSpillSlots() hasn't run at this point. Instead we use +/// this to produce a conservative estimate that we check in an assert() later. +static int getMaxFPOffset(const Function &F, const ARMFunctionInfo &AFI) { + // This is a conservative estimation: Assume the frame pointer being r7 and + // pc("r15") up to r8 getting spilled before (= 8 registers). + return -AFI.getArgRegsSaveSize() - (8 * 4); +} + void ARMFrameLowering::emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const { MachineBasicBlock::iterator MBBI = MBB.begin(); @@ -432,8 +444,10 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF, unsigned DPRCSOffset = GPRCS2Offset - DPRGapSize - DPRCSSize; int FramePtrOffsetInPush = 0; if (HasFP) { - FramePtrOffsetInPush = - MFI.getObjectOffset(FramePtrSpillFI) + ArgRegsSaveSize; + int FPOffset = MFI.getObjectOffset(FramePtrSpillFI); + assert(getMaxFPOffset(*MF.getFunction(), *AFI) <= FPOffset && + "Max FP estimation is wrong"); + FramePtrOffsetInPush = FPOffset + ArgRegsSaveSize; AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) + NumBytes); } @@ -1700,6 +1714,14 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, // worth the effort and added fragility? unsigned EstimatedStackSize = MFI.estimateStackSize(MF) + 4 * (NumGPRSpills + NumFPRSpills); + + // Determine biggest (positive) SP offset in MachineFrameInfo. + int MaxFixedOffset = 0; + for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) { + int MaxObjectOffset = MFI.getObjectOffset(I) + MFI.getObjectSize(I); + MaxFixedOffset = std::max(MaxFixedOffset, MaxObjectOffset); + } + bool HasFP = hasFP(MF); if (HasFP) { if (AFI->hasStackFrame()) @@ -1707,15 +1729,20 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, } else { // If FP is not used, SP will be used to access arguments, so count the // size of arguments into the estimation. - EstimatedStackSize += AFI->getArgumentStackSize(); + EstimatedStackSize += MaxFixedOffset; } EstimatedStackSize += 16; // For possible paddings. - bool BigStack = EstimatedStackSize >= estimateRSStackSizeLimit(MF, this) || - MFI.hasVarSizedObjects() || - (MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF)); + unsigned EstimatedRSStackSizeLimit = estimateRSStackSizeLimit(MF, this); + int MaxFPOffset = getMaxFPOffset(*MF.getFunction(), *AFI); + bool BigFrameOffsets = EstimatedStackSize >= EstimatedRSStackSizeLimit || + MFI.hasVarSizedObjects() || + (MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF)) || + // For large argument stacks fp relative addressed may overflow. + (HasFP && (MaxFixedOffset - MaxFPOffset) >= (int)EstimatedRSStackSizeLimit); bool ExtraCSSpill = false; - if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) { + if (BigFrameOffsets || + !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) { AFI->setHasStackFrame(true); if (HasFP) { @@ -1899,7 +1926,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, // callee-saved register or reserve a special spill slot to facilitate // register scavenging. Thumb1 needs a spill slot for stack pointer // adjustments also, even when the frame itself is small. - if (BigStack && !ExtraCSSpill) { + if (BigFrameOffsets && !ExtraCSSpill) { // If any non-reserved CS register isn't spilled, just spill one or two // extra. That should take care of it! unsigned NumExtras = TargetAlign / 4; @@ -1958,7 +1985,7 @@ MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr( // ADJCALLSTACKUP -> add, sp, sp, amount MachineInstr &Old = *I; DebugLoc dl = Old.getDebugLoc(); - unsigned Amount = Old.getOperand(0).getImm(); + unsigned Amount = TII.getFrameSize(Old); if (Amount != 0) { // We need to keep the stack aligned properly. To do this, we round the // amount of space needed for the outgoing arguments up to the next @@ -1976,14 +2003,11 @@ MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr( ARMCC::CondCodes Pred = (PIdx == -1) ? ARMCC::AL : (ARMCC::CondCodes)Old.getOperand(PIdx).getImm(); + unsigned PredReg = TII.getFramePred(Old); if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) { - // Note: PredReg is operand 2 for ADJCALLSTACKDOWN. - unsigned PredReg = Old.getOperand(2).getReg(); emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags, Pred, PredReg); } else { - // Note: PredReg is operand 3 for ADJCALLSTACKUP. - unsigned PredReg = Old.getOperand(3).getReg(); assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP); emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags, Pred, PredReg); diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index b07b4e1f5cfbd..e9df9449103c1 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -228,11 +228,6 @@ private: const uint16_t *DOpcodes, const uint16_t *QOpcodes = nullptr); - /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2, - /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be - /// generated to force the table registers to be consecutive. - void SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc); - /// Try to select SBFX/UBFX instructions for ARM. bool tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned); @@ -544,11 +539,11 @@ bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N, SDValue NewMulConst; if (canExtractShiftFromMul(N, 31, PowerOfTwo, NewMulConst)) { HandleSDNode Handle(N); + SDLoc Loc(N); replaceDAGValue(N.getOperand(1), NewMulConst); BaseReg = Handle.getValue(); - Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ARM_AM::lsl, - PowerOfTwo), - SDLoc(N), MVT::i32); + Opc = CurDAG->getTargetConstant( + ARM_AM::getSORegOpc(ARM_AM::lsl, PowerOfTwo), Loc, MVT::i32); return true; } } @@ -1859,6 +1854,14 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) { return Opc; // If not one we handle, return it unchanged. } +/// Returns true if the given increment is a Constant known to be equal to the +/// access size performed by a NEON load/store. This means the "[rN]!" form can +/// be used. +static bool isPerfectIncrement(SDValue Inc, EVT VecTy, unsigned NumVecs) { + auto C = dyn_cast<ConstantSDNode>(Inc); + return C && C->getZExtValue() == VecTy.getSizeInBits() / 8 * NumVecs; +} + void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, const uint16_t *DOpcodes, const uint16_t *QOpcodes0, @@ -1926,13 +1929,13 @@ void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, SDValue Inc = N->getOperand(AddrOpIdx + 1); // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0 // case entirely when the rest are updated to that form, too. - if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode())) + bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs); + if ((NumVecs <= 2) && !IsImmUpdate) Opc = getVLDSTRegisterUpdateOpcode(Opc); // FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so // check for that explicitly too. Horribly hacky, but temporary. - if ((NumVecs > 2 && !isVLDfixed(Opc)) || - !isa<ConstantSDNode>(Inc.getNode())) - Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); + if ((NumVecs > 2 && !isVLDfixed(Opc)) || !IsImmUpdate) + Ops.push_back(IsImmUpdate ? Reg0 : Inc); } Ops.push_back(Pred); Ops.push_back(Reg0); @@ -2080,11 +2083,12 @@ void ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, SDValue Inc = N->getOperand(AddrOpIdx + 1); // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0 // case entirely when the rest are updated to that form, too. - if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode())) + bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs); + if (NumVecs <= 2 && !IsImmUpdate) Opc = getVLDSTRegisterUpdateOpcode(Opc); // FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so // check for that explicitly too. Horribly hacky, but temporary. - if (!isa<ConstantSDNode>(Inc.getNode())) + if (!IsImmUpdate) Ops.push_back(Inc); else if (NumVecs > 2 && !isVSTfixed(Opc)) Ops.push_back(Reg0); @@ -2214,7 +2218,9 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating, Ops.push_back(Align); if (isUpdating) { SDValue Inc = N->getOperand(AddrOpIdx + 1); - Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); + bool IsImmUpdate = + isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs); + Ops.push_back(IsImmUpdate ? Reg0 : Inc); } SDValue SuperReg; @@ -2318,9 +2324,11 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs, // fixed-stride update instructions don't have an explicit writeback // operand. It's implicit in the opcode itself. SDValue Inc = N->getOperand(2); - if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode())) + bool IsImmUpdate = + isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs); + if (NumVecs <= 2 && !IsImmUpdate) Opc = getVLDSTRegisterUpdateOpcode(Opc); - if (!isa<ConstantSDNode>(Inc.getNode())) + if (!IsImmUpdate) Ops.push_back(Inc); // FIXME: VLD3 and VLD4 haven't been updated to that form yet. else if (NumVecs > 2) @@ -2356,39 +2364,6 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs, CurDAG->RemoveDeadNode(N); } -void ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, - unsigned Opc) { - assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range"); - SDLoc dl(N); - EVT VT = N->getValueType(0); - unsigned FirstTblReg = IsExt ? 2 : 1; - - // Form a REG_SEQUENCE to force register allocation. - SDValue RegSeq; - SDValue V0 = N->getOperand(FirstTblReg + 0); - SDValue V1 = N->getOperand(FirstTblReg + 1); - if (NumVecs == 2) - RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0); - else { - SDValue V2 = N->getOperand(FirstTblReg + 2); - // If it's a vtbl3, form a quad D-register and leave the last part as - // an undef. - SDValue V3 = (NumVecs == 3) - ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0) - : N->getOperand(FirstTblReg + 3); - RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0); - } - - SmallVector<SDValue, 6> Ops; - if (IsExt) - Ops.push_back(N->getOperand(1)); - Ops.push_back(RegSeq); - Ops.push_back(N->getOperand(FirstTblReg + NumVecs)); - Ops.push_back(getAL(CurDAG, dl)); // predicate - Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register - ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops)); -} - bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) { if (!Subtarget->hasV6T2Ops()) return false; @@ -3730,59 +3705,6 @@ void ARMDAGToDAGISel::Select(SDNode *N) { break; } - case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); - switch (IntNo) { - default: - break; - - case Intrinsic::arm_neon_vtbl2: - SelectVTBL(N, false, 2, ARM::VTBL2); - return; - case Intrinsic::arm_neon_vtbl3: - SelectVTBL(N, false, 3, ARM::VTBL3Pseudo); - return; - case Intrinsic::arm_neon_vtbl4: - SelectVTBL(N, false, 4, ARM::VTBL4Pseudo); - return; - - case Intrinsic::arm_neon_vtbx2: - SelectVTBL(N, true, 2, ARM::VTBX2); - return; - case Intrinsic::arm_neon_vtbx3: - SelectVTBL(N, true, 3, ARM::VTBX3Pseudo); - return; - case Intrinsic::arm_neon_vtbx4: - SelectVTBL(N, true, 4, ARM::VTBX4Pseudo); - return; - } - break; - } - - case ARMISD::VTBL1: { - SDLoc dl(N); - EVT VT = N->getValueType(0); - SDValue Ops[] = {N->getOperand(0), N->getOperand(1), - getAL(CurDAG, dl), // Predicate - CurDAG->getRegister(0, MVT::i32)}; // Predicate Register - ReplaceNode(N, CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops)); - return; - } - case ARMISD::VTBL2: { - SDLoc dl(N); - EVT VT = N->getValueType(0); - - // Form a REG_SEQUENCE to force register allocation. - SDValue V0 = N->getOperand(0); - SDValue V1 = N->getOperand(1); - SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0); - - SDValue Ops[] = {RegSeq, N->getOperand(2), getAL(CurDAG, dl), // Predicate - CurDAG->getRegister(0, MVT::i32)}; // Predicate Register - ReplaceNode(N, CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops)); - return; - } - case ISD::ATOMIC_CMP_SWAP: SelectCMP_SWAP(N); return; diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index e697c8ca5339e..165e9b7378c7c 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -852,7 +852,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, if (!Subtarget->hasV6Ops()) setOperationAction(ISD::BSWAP, MVT::i32, Expand); - bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide() + bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() : Subtarget->hasDivideInARMMode(); if (!hasDivide) { // These are expanded into libcalls if the cpu doesn't have HW divider. @@ -860,7 +860,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setOperationAction(ISD::UDIV, MVT::i32, LibCall); } - if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) { + if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { setOperationAction(ISD::SDIV, MVT::i32, Custom); setOperationAction(ISD::UDIV, MVT::i32, Custom); @@ -2633,7 +2633,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { return true; } -bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { +bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { if (!Subtarget->supportsTailCall()) return false; @@ -3347,6 +3347,12 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); } + case Intrinsic::arm_neon_vtbl1: + return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::arm_neon_vtbl2: + return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), + Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); } } @@ -10867,11 +10873,8 @@ static SDValue CombineBaseUpdate(SDNode *N, // If the increment is a constant, it must match the memory ref size. SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); - if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { - uint64_t IncVal = CInc->getZExtValue(); - if (IncVal != NumBytes) - continue; - } else if (NumBytes >= 3 * 16) { + ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); + if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) { // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two // separate instructions that make it harder to use a non-constant update. continue; @@ -11688,34 +11691,6 @@ static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } -static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero, - APInt &KnownOne) { - if (Op.getOpcode() == ARMISD::BFI) { - // Conservatively, we can recurse down the first operand - // and just mask out all affected bits. - computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne); - - // The operand to BFI is already a mask suitable for removing the bits it - // sets. - ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); - const APInt &Mask = CI->getAPIntValue(); - KnownZero &= Mask; - KnownOne &= Mask; - return; - } - if (Op.getOpcode() == ARMISD::CMOV) { - APInt KZ2(KnownZero.getBitWidth(), 0); - APInt KO2(KnownOne.getBitWidth(), 0); - computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne); - computeKnownBits(DAG, Op.getOperand(1), KZ2, KO2); - - KnownZero &= KZ2; - KnownOne &= KO2; - return; - } - return DAG.computeKnownBits(Op, KnownZero, KnownOne); -} - SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { // If we have a CMOV, OR and AND combination such as: // if (x & CN) @@ -11777,7 +11752,7 @@ SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &D // Lastly, can we determine that the bits defined by OrCI // are zero in Y? APInt KnownZero, KnownOne; - computeKnownBits(DAG, Y, KnownZero, KnownOne); + DAG.computeKnownBits(Y, KnownZero, KnownOne); if ((OrCI & KnownZero) != OrCI) return SDValue(); @@ -12657,6 +12632,19 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, } } } + case ARMISD::BFI: { + // Conservatively, we can recurse down the first operand + // and just mask out all affected bits. + DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1); + + // The operand to BFI is already a mask suitable for removing the bits it + // sets. + ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); + const APInt &Mask = CI->getAPIntValue(); + KnownZero &= Mask; + KnownOne &= Mask; + return; + } } } @@ -13052,7 +13040,9 @@ SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { // rem = a - b * div // return {div, rem} // This should be lowered into UDIV/SDIV + MLS later on. - if (Subtarget->hasDivide() && Op->getValueType(0).isSimple() && + bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() + : Subtarget->hasDivideInARMMode(); + if (hasDivide && Op->getValueType(0).isSimple() && Op->getSimpleValueType(0) == MVT::i32) { unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; const SDValue Dividend = Op->getOperand(0); diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 70a0b1380ec98..8b54ce430ed2c 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -717,7 +717,7 @@ class InstrItineraryData; bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; - bool mayBeEmittedAsTailCall(CallInst *CI) const override; + bool mayBeEmittedAsTailCall(const CallInst *CI) const override; SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, SDValue ARMcc, SDValue CCR, SDValue Cmp, diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index cc0e7d4d9c359..703e8071b1777 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -259,8 +259,8 @@ def HasFP16 : Predicate<"Subtarget->hasFP16()">, AssemblerPredicate<"FeatureFP16","half-float conversions">; def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">, AssemblerPredicate<"FeatureFullFP16","full half-float">; -def HasDivide : Predicate<"Subtarget->hasDivide()">, - AssemblerPredicate<"FeatureHWDiv", "divide in THUMB">; +def HasDivideInThumb : Predicate<"Subtarget->hasDivideInThumbMode()">, + AssemblerPredicate<"FeatureHWDivThumb", "divide in THUMB">; def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">, AssemblerPredicate<"FeatureHWDivARM", "divide in ARM">; def HasDSP : Predicate<"Subtarget->hasDSP()">, diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 681e235d78f08..9b08c612e16bf 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -587,6 +587,14 @@ def SDTARMVMULL : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, def NEONvmulls : SDNode<"ARMISD::VMULLs", SDTARMVMULL>; def NEONvmullu : SDNode<"ARMISD::VMULLu", SDTARMVMULL>; +def SDTARMVTBL1 : SDTypeProfile<1, 2, [SDTCisVT<0, v8i8>, SDTCisVT<1, v8i8>, + SDTCisVT<2, v8i8>]>; +def SDTARMVTBL2 : SDTypeProfile<1, 3, [SDTCisVT<0, v8i8>, SDTCisVT<1, v8i8>, + SDTCisVT<2, v8i8>, SDTCisVT<3, v8i8>]>; +def NEONvtbl1 : SDNode<"ARMISD::VTBL1", SDTARMVTBL1>; +def NEONvtbl2 : SDNode<"ARMISD::VTBL2", SDTARMVTBL2>; + + def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{ ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0)); unsigned EltBits = 0; @@ -6443,7 +6451,8 @@ def VTBL1 : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$Vd), (ins VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1, "vtbl", "8", "$Vd, $Vn, $Vm", "", - [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 VecListOneD:$Vn, DPR:$Vm)))]>; + [(set DPR:$Vd, (v8i8 (NEONvtbl1 VecListOneD:$Vn, DPR:$Vm)))]>; + let hasExtraSrcRegAllocReq = 1 in { def VTBL2 : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$Vd), @@ -6498,6 +6507,49 @@ def VTBX4Pseudo IIC_VTBX4, "$orig = $dst", []>; } // DecoderMethod = "DecodeTBLInstruction" +def : Pat<(v8i8 (NEONvtbl2 v8i8:$Vn0, v8i8:$Vn1, v8i8:$Vm)), + (v8i8 (VTBL2 (REG_SEQUENCE DPair, v8i8:$Vn0, dsub_0, + v8i8:$Vn1, dsub_1), + v8i8:$Vm))>; +def : Pat<(v8i8 (int_arm_neon_vtbx2 v8i8:$orig, v8i8:$Vn0, v8i8:$Vn1, + v8i8:$Vm)), + (v8i8 (VTBX2 v8i8:$orig, + (REG_SEQUENCE DPair, v8i8:$Vn0, dsub_0, + v8i8:$Vn1, dsub_1), + v8i8:$Vm))>; + +def : Pat<(v8i8 (int_arm_neon_vtbl3 v8i8:$Vn0, v8i8:$Vn1, + v8i8:$Vn2, v8i8:$Vm)), + (v8i8 (VTBL3Pseudo (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0, + v8i8:$Vn1, dsub_1, + v8i8:$Vn2, dsub_2, + (v8i8 (IMPLICIT_DEF)), dsub_3), + v8i8:$Vm))>; +def : Pat<(v8i8 (int_arm_neon_vtbx3 v8i8:$orig, v8i8:$Vn0, v8i8:$Vn1, + v8i8:$Vn2, v8i8:$Vm)), + (v8i8 (VTBX3Pseudo v8i8:$orig, + (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0, + v8i8:$Vn1, dsub_1, + v8i8:$Vn2, dsub_2, + (v8i8 (IMPLICIT_DEF)), dsub_3), + v8i8:$Vm))>; + +def : Pat<(v8i8 (int_arm_neon_vtbl4 v8i8:$Vn0, v8i8:$Vn1, + v8i8:$Vn2, v8i8:$Vn3, v8i8:$Vm)), + (v8i8 (VTBL4Pseudo (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0, + v8i8:$Vn1, dsub_1, + v8i8:$Vn2, dsub_2, + v8i8:$Vn3, dsub_3), + v8i8:$Vm))>; +def : Pat<(v8i8 (int_arm_neon_vtbx4 v8i8:$orig, v8i8:$Vn0, v8i8:$Vn1, + v8i8:$Vn2, v8i8:$Vn3, v8i8:$Vm)), + (v8i8 (VTBX4Pseudo v8i8:$orig, + (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0, + v8i8:$Vn1, dsub_1, + v8i8:$Vn2, dsub_2, + v8i8:$Vn3, dsub_3), + v8i8:$Vm))>; + // VRINT : Vector Rounding multiclass VRINT_FPI<string op, bits<3> op9_7, SDPatternOperator Int> { let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in { diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index f5b673b78ad71..f710ee6a7e774 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -2797,7 +2797,7 @@ def t2SMLSLDX : T2DualHalfMulAddLong<0b101, 0b1101, "smlsldx">; def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV, "sdiv", "\t$Rd, $Rn, $Rm", [(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>, - Requires<[HasDivide, IsThumb, HasV8MBaseline]>, + Requires<[HasDivideInThumb, IsThumb, HasV8MBaseline]>, Sched<[WriteDIV]> { let Inst{31-27} = 0b11111; let Inst{26-21} = 0b011100; @@ -2809,7 +2809,7 @@ def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV, def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV, "udiv", "\t$Rd, $Rn, $Rm", [(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>, - Requires<[HasDivide, IsThumb, HasV8MBaseline]>, + Requires<[HasDivideInThumb, IsThumb, HasV8MBaseline]>, Sched<[WriteDIV]> { let Inst{31-27} = 0b11111; let Inst{26-21} = 0b011101; diff --git a/lib/Target/ARM/ARMInstructionSelector.cpp b/lib/Target/ARM/ARMInstructionSelector.cpp index 8d224d6a70fa8..816596b857214 100644 --- a/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/lib/Target/ARM/ARMInstructionSelector.cpp @@ -299,6 +299,20 @@ bool ARMInstructionSelector::select(MachineInstr &I) const { I.setDesc(TII.get(ARM::ADDrr)); MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); break; + case G_SUB: + I.setDesc(TII.get(ARM::SUBrr)); + MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); + break; + case G_MUL: + if (TII.getSubtarget().hasV6Ops()) { + I.setDesc(TII.get(ARM::MUL)); + } else { + assert(TII.getSubtarget().useMulOps() && "Unsupported target"); + I.setDesc(TII.get(ARM::MULv5)); + MIB->getOperand(0).setIsEarlyClobber(true); + } + MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); + break; case G_FADD: if (!selectFAdd(MIB, TII, MRI)) return false; diff --git a/lib/Target/ARM/ARMLegalizerInfo.cpp b/lib/Target/ARM/ARMLegalizerInfo.cpp index 994bbd673dd87..fe9681439e6b5 100644 --- a/lib/Target/ARM/ARMLegalizerInfo.cpp +++ b/lib/Target/ARM/ARMLegalizerInfo.cpp @@ -43,8 +43,9 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) { setAction({Op, 1, p0}, Legal); } - for (auto Ty : {s1, s8, s16, s32}) - setAction({G_ADD, Ty}, Legal); + for (unsigned Op : {G_ADD, G_SUB, G_MUL}) + for (auto Ty : {s1, s8, s16, s32}) + setAction({Op, Ty}, Legal); for (unsigned Op : {G_SEXT, G_ZEXT}) { setAction({Op, s32}, Legal); diff --git a/lib/Target/ARM/ARMRegisterBankInfo.cpp b/lib/Target/ARM/ARMRegisterBankInfo.cpp index 08f3da7388684..e47bd3a8963ec 100644 --- a/lib/Target/ARM/ARMRegisterBankInfo.cpp +++ b/lib/Target/ARM/ARMRegisterBankInfo.cpp @@ -219,6 +219,8 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { switch (Opc) { case G_ADD: + case G_SUB: + case G_MUL: case G_SEXT: case G_ZEXT: case G_GEP: diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index 40993fc0aa8ac..d2630685d91b7 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -208,8 +208,8 @@ protected: /// FP registers for VFPv3. bool HasD16 = false; - /// HasHardwareDivide - True if subtarget supports [su]div - bool HasHardwareDivide = false; + /// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode + bool HasHardwareDivideInThumb = false; /// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode bool HasHardwareDivideInARM = false; @@ -507,7 +507,7 @@ public: return hasNEON() && UseNEONForSinglePrecisionFP; } - bool hasDivide() const { return HasHardwareDivide; } + bool hasDivideInThumbMode() const { return HasHardwareDivideInThumb; } bool hasDivideInARMMode() const { return HasHardwareDivideInARM; } bool hasDataBarrier() const { return HasDataBarrier; } bool hasV7Clrex() const { return HasV7Clrex; } diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index f421d3ac1693b..ada816c163897 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -67,6 +67,9 @@ static cl::opt<ImplicitItModeTy> ImplicitItMode( clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb", "Warn in ARM, emit implicit ITs in Thumb"))); +static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes", + cl::init(false)); + class ARMOperand; enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; @@ -540,6 +543,10 @@ public: // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); + // Add build attributes based on the selected target. + if (AddBuildAttributes) + getTargetStreamer().emitTargetAttributes(STI); + // Not in an ITBlock to start with. ITState.CurPosition = ~0U; @@ -10189,8 +10196,8 @@ static const struct { { ARM::AEK_CRYPTO, Feature_HasV8, {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} }, { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} }, - { (ARM::AEK_HWDIV | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass, - {ARM::FeatureHWDiv, ARM::FeatureHWDivARM} }, + { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass, + {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} }, { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} }, { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} }, { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} }, diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index 6fa890ba1cd5e..4d6c52f3cd492 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -464,7 +464,7 @@ public: void emitUnwindRaw(int64_t Offset, const SmallVectorImpl<uint8_t> &Opcodes); void ChangeSection(MCSection *Section, const MCExpr *Subsection) override { - LastMappingSymbols[getPreviousSection().first] = std::move(LastEMSInfo); + LastMappingSymbols[getCurrentSection().first] = std::move(LastEMSInfo); MCELFStreamer::ChangeSection(Section, Subsection); auto LastMappingSymbol = LastMappingSymbols.find(Section); if (LastMappingSymbol != LastMappingSymbols.end()) { diff --git a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp index 73e563890dd9f..2b0cd461df7af 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp @@ -11,9 +11,13 @@ // //===----------------------------------------------------------------------===// +#include "ARMTargetMachine.h" #include "llvm/MC/ConstantPools.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ARMBuildAttributes.h" +#include "llvm/Support/TargetParser.h" using namespace llvm; @@ -75,3 +79,179 @@ void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) {} void ARMTargetStreamer::AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE) {} void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {} + +static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) { + if (STI.getCPU() == "xscale") + return ARMBuildAttrs::v5TEJ; + + if (STI.hasFeature(ARM::HasV8Ops)) { + if (STI.hasFeature(ARM::FeatureRClass)) + return ARMBuildAttrs::v8_R; + return ARMBuildAttrs::v8_A; + } else if (STI.hasFeature(ARM::HasV8MMainlineOps)) + return ARMBuildAttrs::v8_M_Main; + else if (STI.hasFeature(ARM::HasV7Ops)) { + if (STI.hasFeature(ARM::FeatureMClass) && STI.hasFeature(ARM::FeatureDSP)) + return ARMBuildAttrs::v7E_M; + return ARMBuildAttrs::v7; + } else if (STI.hasFeature(ARM::HasV6T2Ops)) + return ARMBuildAttrs::v6T2; + else if (STI.hasFeature(ARM::HasV8MBaselineOps)) + return ARMBuildAttrs::v8_M_Base; + else if (STI.hasFeature(ARM::HasV6MOps)) + return ARMBuildAttrs::v6S_M; + else if (STI.hasFeature(ARM::HasV6Ops)) + return ARMBuildAttrs::v6; + else if (STI.hasFeature(ARM::HasV5TEOps)) + return ARMBuildAttrs::v5TE; + else if (STI.hasFeature(ARM::HasV5TOps)) + return ARMBuildAttrs::v5T; + else if (STI.hasFeature(ARM::HasV4TOps)) + return ARMBuildAttrs::v4T; + else + return ARMBuildAttrs::v4; +} + +static bool isV8M(const MCSubtargetInfo &STI) { + // Note that v8M Baseline is a subset of v6T2! + return (STI.hasFeature(ARM::HasV8MBaselineOps) && + !STI.hasFeature(ARM::HasV6T2Ops)) || + STI.hasFeature(ARM::HasV8MMainlineOps); +} + +/// Emit the build attributes that only depend on the hardware that we expect +// /to be available, and not on the ABI, or any source-language choices. +void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) { + switchVendor("aeabi"); + + const StringRef CPUString = STI.getCPU(); + if (!CPUString.empty() && !CPUString.startswith("generic")) { + // FIXME: remove krait check when GNU tools support krait cpu + if (STI.hasFeature(ARM::ProcKrait)) { + emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9"); + // We consider krait as a "cortex-a9" + hwdiv CPU + // Enable hwdiv through ".arch_extension idiv" + if (STI.hasFeature(ARM::FeatureHWDivThumb) || + STI.hasFeature(ARM::FeatureHWDivARM)) + emitArchExtension(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM); + } else { + emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString); + } + } + + emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(STI)); + + if (STI.hasFeature(ARM::FeatureAClass)) { + emitAttribute(ARMBuildAttrs::CPU_arch_profile, + ARMBuildAttrs::ApplicationProfile); + } else if (STI.hasFeature(ARM::FeatureRClass)) { + emitAttribute(ARMBuildAttrs::CPU_arch_profile, + ARMBuildAttrs::RealTimeProfile); + } else if (STI.hasFeature(ARM::FeatureMClass)) { + emitAttribute(ARMBuildAttrs::CPU_arch_profile, + ARMBuildAttrs::MicroControllerProfile); + } + + emitAttribute(ARMBuildAttrs::ARM_ISA_use, STI.hasFeature(ARM::FeatureNoARM) + ? ARMBuildAttrs::Not_Allowed + : ARMBuildAttrs::Allowed); + + if (isV8M(STI)) { + emitAttribute(ARMBuildAttrs::THUMB_ISA_use, + ARMBuildAttrs::AllowThumbDerived); + } else if (STI.hasFeature(ARM::FeatureThumb2)) { + emitAttribute(ARMBuildAttrs::THUMB_ISA_use, + ARMBuildAttrs::AllowThumb32); + } else if (STI.hasFeature(ARM::HasV4TOps)) { + emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed); + } + + if (STI.hasFeature(ARM::FeatureNEON)) { + /* NEON is not exactly a VFP architecture, but GAS emit one of + * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */ + if (STI.hasFeature(ARM::FeatureFPARMv8)) { + if (STI.hasFeature(ARM::FeatureCrypto)) + emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8); + else + emitFPU(ARM::FK_NEON_FP_ARMV8); + } else if (STI.hasFeature(ARM::FeatureVFP4)) + emitFPU(ARM::FK_NEON_VFPV4); + else + emitFPU(STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_NEON_FP16 + : ARM::FK_NEON); + // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture + if (STI.hasFeature(ARM::HasV8Ops)) + emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch, + STI.hasFeature(ARM::HasV8_1aOps) + ? ARMBuildAttrs::AllowNeonARMv8_1a + : ARMBuildAttrs::AllowNeonARMv8); + } else { + if (STI.hasFeature(ARM::FeatureFPARMv8)) + // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one + // FPU, but there are two different names for it depending on the CPU. + emitFPU(STI.hasFeature(ARM::FeatureD16) + ? (STI.hasFeature(ARM::FeatureVFPOnlySP) ? ARM::FK_FPV5_SP_D16 + : ARM::FK_FPV5_D16) + : ARM::FK_FP_ARMV8); + else if (STI.hasFeature(ARM::FeatureVFP4)) + emitFPU(STI.hasFeature(ARM::FeatureD16) + ? (STI.hasFeature(ARM::FeatureVFPOnlySP) ? ARM::FK_FPV4_SP_D16 + : ARM::FK_VFPV4_D16) + : ARM::FK_VFPV4); + else if (STI.hasFeature(ARM::FeatureVFP3)) + emitFPU( + STI.hasFeature(ARM::FeatureD16) + // +d16 + ? (STI.hasFeature(ARM::FeatureVFPOnlySP) + ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16 + : ARM::FK_VFPV3XD) + : (STI.hasFeature(ARM::FeatureFP16) + ? ARM::FK_VFPV3_D16_FP16 + : ARM::FK_VFPV3_D16)) + // -d16 + : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16 + : ARM::FK_VFPV3)); + else if (STI.hasFeature(ARM::FeatureVFP2)) + emitFPU(ARM::FK_VFPV2); + } + + // ABI_HardFP_use attribute to indicate single precision FP. + if (STI.hasFeature(ARM::FeatureVFPOnlySP)) + emitAttribute(ARMBuildAttrs::ABI_HardFP_use, + ARMBuildAttrs::HardFPSinglePrecision); + + if (STI.hasFeature(ARM::FeatureFP16)) + emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP); + + if (STI.hasFeature(ARM::FeatureMP)) + emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP); + + // Hardware divide in ARM mode is part of base arch, starting from ARMv8. + // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M). + // It is not possible to produce DisallowDIV: if hwdiv is present in the base + // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits. + // AllowDIVExt is only emitted if hwdiv isn't available in the base arch; + // otherwise, the default value (AllowDIVIfExists) applies. + if (STI.hasFeature(ARM::FeatureHWDivARM) && !STI.hasFeature(ARM::HasV8Ops)) + emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt); + + if (STI.hasFeature(ARM::FeatureDSP) && isV8M(STI)) + emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed); + + if (STI.hasFeature(ARM::FeatureStrictAlign)) + emitAttribute(ARMBuildAttrs::CPU_unaligned_access, + ARMBuildAttrs::Not_Allowed); + else + emitAttribute(ARMBuildAttrs::CPU_unaligned_access, + ARMBuildAttrs::Allowed); + + if (STI.hasFeature(ARM::FeatureTrustZone) && + STI.hasFeature(ARM::FeatureVirtualization)) + emitAttribute(ARMBuildAttrs::Virtualization_use, + ARMBuildAttrs::AllowTZVirtualization); + else if (STI.hasFeature(ARM::FeatureTrustZone)) + emitAttribute(ARMBuildAttrs::Virtualization_use, ARMBuildAttrs::AllowTZ); + else if (STI.hasFeature(ARM::FeatureVirtualization)) + emitAttribute(ARMBuildAttrs::Virtualization_use, + ARMBuildAttrs::AllowVirtualization); +} diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp index fc083b98395b0..d0fd366ab9ed5 100644 --- a/lib/Target/ARM/Thumb1FrameLowering.cpp +++ b/lib/Target/ARM/Thumb1FrameLowering.cpp @@ -83,13 +83,12 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, // ADJCALLSTACKUP -> add, sp, sp, amount MachineInstr &Old = *I; DebugLoc dl = Old.getDebugLoc(); - unsigned Amount = Old.getOperand(0).getImm(); + unsigned Amount = TII.getFrameSize(Old); if (Amount != 0) { // We need to keep the stack aligned properly. To do this, we round the // amount of space needed for the outgoing arguments up to the next // alignment boundary. - unsigned Align = getStackAlignment(); - Amount = (Amount+Align-1)/Align*Align; + Amount = alignTo(Amount, getStackAlignment()); // Replace the pseudo instruction with a new instruction... unsigned Opc = Old.getOpcode(); diff --git a/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp b/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp index 9f2ee8cf80356..535bb012eb07c 100644 --- a/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp +++ b/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp @@ -18,7 +18,7 @@ namespace llvm { AVRMCAsmInfo::AVRMCAsmInfo(const Triple &TT) { - PointerSize = 2; + CodePointerSize = 2; CalleeSaveStackSlotSize = 2; CommentString = ";"; PrivateGlobalPrefix = ".L"; diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h index 559ac291a79e1..fd7c97bf1f0a4 100644 --- a/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h +++ b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h @@ -42,7 +42,7 @@ public: // messed up in random places by 4 bytes. .debug_line // section will be parsable, but with odd offsets and // line numbers, etc. - PointerSize = 8; + CodePointerSize = 8; } }; } diff --git a/lib/Target/Hexagon/BitTracker.cpp b/lib/Target/Hexagon/BitTracker.cpp index 61d3630ac095a..cb3049bf15007 100644 --- a/lib/Target/Hexagon/BitTracker.cpp +++ b/lib/Target/Hexagon/BitTracker.cpp @@ -1011,12 +1011,7 @@ void BT::subst(RegisterRef OldRR, RegisterRef NewRR) { bool BT::reached(const MachineBasicBlock *B) const { int BN = B->getNumber(); assert(BN >= 0); - for (EdgeSetType::iterator I = EdgeExec.begin(), E = EdgeExec.end(); - I != E; ++I) { - if (I->second == BN) - return true; - } - return false; + return ReachedBB.count(BN); } // Visit an individual instruction. This could be a newly added instruction, @@ -1036,6 +1031,8 @@ void BT::reset() { EdgeExec.clear(); InstrExec.clear(); Map.clear(); + ReachedBB.clear(); + ReachedBB.reserve(MF.size()); } void BT::run() { @@ -1068,6 +1065,7 @@ void BT::run() { if (EdgeExec.count(Edge)) continue; EdgeExec.insert(Edge); + ReachedBB.insert(Edge.second); const MachineBasicBlock &B = *MF.getBlockNumbered(Edge.second); MachineBasicBlock::const_iterator It = B.begin(), End = B.end(); diff --git a/lib/Target/Hexagon/BitTracker.h b/lib/Target/Hexagon/BitTracker.h index a547b34e852f6..7f49f430382d8 100644 --- a/lib/Target/Hexagon/BitTracker.h +++ b/lib/Target/Hexagon/BitTracker.h @@ -10,6 +10,7 @@ #ifndef LLVM_LIB_TARGET_HEXAGON_BITTRACKER_H #define LLVM_LIB_TARGET_HEXAGON_BITTRACKER_H +#include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/MachineFunction.h" @@ -68,10 +69,11 @@ private: typedef std::set<const MachineInstr *> InstrSetType; typedef std::queue<CFGEdge> EdgeQueueType; - EdgeSetType EdgeExec; // Executable flow graph edges. - InstrSetType InstrExec; // Executable instructions. - EdgeQueueType FlowQ; // Work queue of CFG edges. - bool Trace; // Enable tracing for debugging. + EdgeSetType EdgeExec; // Executable flow graph edges. + InstrSetType InstrExec; // Executable instructions. + EdgeQueueType FlowQ; // Work queue of CFG edges. + DenseSet<unsigned> ReachedBB; // Cache of reached blocks. + bool Trace; // Enable tracing for debugging. const MachineEvaluator &ME; MachineFunction &MF; diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 418dd71aeb4bf..e5eb059b566f4 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -635,7 +635,7 @@ HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps); } -bool HexagonTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { +bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { // If either no tail call or told not to tail call at all, don't. auto Attr = CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index fb8f0ba6b0579..1415156487c07 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -195,7 +195,7 @@ namespace HexagonISD { const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override; - bool mayBeEmittedAsTailCall(CallInst *CI) const override; + bool mayBeEmittedAsTailCall(const CallInst *CI) const override; /// If a physical register, this returns the register that receives the /// exception address on entry to an EH pad. diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp index b243de317dc54..27b40f134b1f4 100644 --- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -35,7 +35,6 @@ #include "llvm/Support/raw_ostream.h" #include <cassert> #include <cstdint> -#include <map> static cl::opt<int> CodeGrowthLimit("hexagon-amode-growth-limit", cl::Hidden, cl::init(0), cl::desc("Code growth limit for address mode " @@ -45,10 +44,8 @@ using namespace llvm; using namespace rdf; namespace llvm { - FunctionPass *createHexagonOptAddrMode(); - void initializeHexagonOptAddrModePass(PassRegistry &); - + void initializeHexagonOptAddrModePass(PassRegistry&); } // end namespace llvm namespace { @@ -59,10 +56,7 @@ public: HexagonOptAddrMode() : MachineFunctionPass(ID), HII(nullptr), MDT(nullptr), DFG(nullptr), - LV(nullptr) { - PassRegistry &R = *PassRegistry::getPassRegistry(); - initializeHexagonOptAddrModePass(R); - } + LV(nullptr) {} StringRef getPassName() const override { return "Optimize addressing mode of load/store"; @@ -84,7 +78,6 @@ private: MachineDominatorTree *MDT; DataFlowGraph *DFG; DataFlowGraph::DefStackMap DefM; - std::map<RegisterRef, std::map<NodeId, NodeId>> RDefMap; Liveness *LV; MISetType Deleted; @@ -99,8 +92,6 @@ private: void getAllRealUses(NodeAddr<StmtNode *> SN, NodeList &UNodeList); bool allValidCandidates(NodeAddr<StmtNode *> SA, NodeList &UNodeList); short getBaseWithLongOffset(const MachineInstr &MI) const; - void updateMap(NodeAddr<InstrNode *> IA); - bool constructDefMap(MachineBasicBlock *B); bool changeStore(MachineInstr *OldMI, MachineOperand ImmOp, unsigned ImmOpNum); bool changeLoad(MachineInstr *OldMI, MachineOperand ImmOp, unsigned ImmOpNum); @@ -112,11 +103,11 @@ private: char HexagonOptAddrMode::ID = 0; -INITIALIZE_PASS_BEGIN(HexagonOptAddrMode, "opt-amode", +INITIALIZE_PASS_BEGIN(HexagonOptAddrMode, "amode-opt", "Optimize addressing mode", false, false) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier) -INITIALIZE_PASS_END(HexagonOptAddrMode, "opt-amode", "Optimize addressing mode", +INITIALIZE_PASS_END(HexagonOptAddrMode, "amode-opt", "Optimize addressing mode", false, false) bool HexagonOptAddrMode::hasRepForm(MachineInstr &MI, unsigned TfrDefR) { @@ -173,8 +164,11 @@ bool HexagonOptAddrMode::canRemoveAddasl(NodeAddr<StmtNode *> AddAslSN, for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) { NodeAddr<UseNode *> UA = *I; NodeAddr<InstrNode *> IA = UA.Addr->getOwner(*DFG); - if ((UA.Addr->getFlags() & NodeAttrs::PhiRef) || - RDefMap[OffsetRR][IA.Id] != OffsetRegRD) + if (UA.Addr->getFlags() & NodeAttrs::PhiRef) + return false; + NodeAddr<RefNode*> AA = LV->getNearestAliasedRef(OffsetRR, IA); + if ((DFG->IsDef(AA) && AA.Id != OffsetRegRD) || + AA.Addr->getReachingDef() != OffsetRegRD) return false; MachineInstr &UseMI = *NodeAddr<StmtNode *>(IA).Addr->getCode(); @@ -486,14 +480,14 @@ bool HexagonOptAddrMode::changeAddAsl(NodeAddr<UseNode *> AddAslUN, MIB.add(AddAslMI->getOperand(2)); MIB.add(AddAslMI->getOperand(3)); const GlobalValue *GV = ImmOp.getGlobal(); - MIB.addGlobalAddress(GV, UseMI->getOperand(2).getImm(), + MIB.addGlobalAddress(GV, UseMI->getOperand(2).getImm()+ImmOp.getOffset(), ImmOp.getTargetFlags()); OpStart = 3; } else if (UseMID.mayStore()) { MIB.add(AddAslMI->getOperand(2)); MIB.add(AddAslMI->getOperand(3)); const GlobalValue *GV = ImmOp.getGlobal(); - MIB.addGlobalAddress(GV, UseMI->getOperand(1).getImm(), + MIB.addGlobalAddress(GV, UseMI->getOperand(1).getImm()+ImmOp.getOffset(), ImmOp.getTargetFlags()); MIB.add(UseMI->getOperand(2)); OpStart = 3; @@ -597,46 +591,6 @@ bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) { return Changed; } -void HexagonOptAddrMode::updateMap(NodeAddr<InstrNode *> IA) { - RegisterSet RRs; - for (NodeAddr<RefNode *> RA : IA.Addr->members(*DFG)) - RRs.insert(RA.Addr->getRegRef(*DFG)); - bool Common = false; - for (auto &R : RDefMap) { - if (!RRs.count(R.first)) - continue; - Common = true; - break; - } - if (!Common) - return; - - for (auto &R : RDefMap) { - auto F = DefM.find(R.first.Reg); - if (F == DefM.end() || F->second.empty()) - continue; - R.second[IA.Id] = F->second.top()->Id; - } -} - -bool HexagonOptAddrMode::constructDefMap(MachineBasicBlock *B) { - bool Changed = false; - auto BA = DFG->getFunc().Addr->findBlock(B, *DFG); - DFG->markBlock(BA.Id, DefM); - - for (NodeAddr<InstrNode *> IA : BA.Addr->members(*DFG)) { - updateMap(IA); - DFG->pushAllDefs(IA, DefM); - } - - MachineDomTreeNode *N = MDT->getNode(B); - for (auto I : *N) - Changed |= constructDefMap(I->getBlock()); - - DFG->releaseBlock(BA.Id, DefM); - return Changed; -} - bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) { if (skipFunction(*MF.getFunction())) return false; @@ -658,8 +612,6 @@ bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) { L.computePhiInfo(); LV = &L; - constructDefMap(&DFG->getMF().front()); - Deleted.clear(); NodeAddr<FuncNode *> FA = DFG->getFunc(); DEBUG(dbgs() << "==== [RefMap#]=====:\n " diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index 06fc9195fa677..6913d50bbcaab 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -111,6 +111,7 @@ namespace llvm { extern char &HexagonExpandCondsetsID; void initializeHexagonExpandCondsetsPass(PassRegistry&); void initializeHexagonLoopIdiomRecognizePass(PassRegistry&); + void initializeHexagonOptAddrModePass(PassRegistry&); Pass *createHexagonLoopIdiomPass(); FunctionPass *createHexagonBitSimplify(); @@ -152,6 +153,7 @@ extern "C" void LLVMInitializeHexagonTarget() { // Register the target. RegisterTargetMachine<HexagonTargetMachine> X(getTheHexagonTarget()); initializeHexagonLoopIdiomRecognizePass(*PassRegistry::getPassRegistry()); + initializeHexagonOptAddrModePass(*PassRegistry::getPassRegistry()); } HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT, diff --git a/lib/Target/Hexagon/RDFCopy.cpp b/lib/Target/Hexagon/RDFCopy.cpp index 57ce9fabc5e3a..ea86ffba58f61 100644 --- a/lib/Target/Hexagon/RDFCopy.cpp +++ b/lib/Target/Hexagon/RDFCopy.cpp @@ -59,7 +59,7 @@ void CopyPropagation::recordCopy(NodeAddr<StmtNode*> SA, EqualityMap &EM) { bool CopyPropagation::scanBlock(MachineBasicBlock *B) { bool Changed = false; - auto BA = DFG.getFunc().Addr->findBlock(B, DFG); + NodeAddr<BlockNode*> BA = DFG.findBlock(B); for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) { if (DFG.IsCode<NodeAttrs::Stmt>(IA)) { diff --git a/lib/Target/Hexagon/RDFGraph.h b/lib/Target/Hexagon/RDFGraph.h index d5faca4cd6f4b..52f390356b265 100644 --- a/lib/Target/Hexagon/RDFGraph.h +++ b/lib/Target/Hexagon/RDFGraph.h @@ -508,7 +508,8 @@ namespace rdf { static_assert(sizeof(NodeBase) <= NodeAllocator::NodeMemSize, "NodeBase must be at most NodeAllocator::NodeMemSize bytes"); - typedef std::vector<NodeAddr<NodeBase*>> NodeList; +// typedef std::vector<NodeAddr<NodeBase*>> NodeList; + typedef SmallVector<NodeAddr<NodeBase*>,4> NodeList; typedef std::set<NodeId> NodeSet; struct RefNode : public NodeBase { diff --git a/lib/Target/Hexagon/RDFRegisters.cpp b/lib/Target/Hexagon/RDFRegisters.cpp index 5c5496a548af9..4224ded3418b5 100644 --- a/lib/Target/Hexagon/RDFRegisters.cpp +++ b/lib/Target/Hexagon/RDFRegisters.cpp @@ -69,6 +69,19 @@ PhysicalRegisterInfo::PhysicalRegisterInfo(const TargetRegisterInfo &tri, for (const MachineOperand &Op : In.operands()) if (Op.isRegMask()) RegMasks.insert(Op.getRegMask()); + + MaskInfos.resize(RegMasks.size()+1); + for (uint32_t M = 1, NM = RegMasks.size(); M <= NM; ++M) { + BitVector PU(TRI.getNumRegUnits()); + const uint32_t *MB = RegMasks.get(M); + for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i) { + if (!(MB[i/32] & (1u << (i%32)))) + continue; + for (MCRegUnitIterator U(i, &TRI); U.isValid(); ++U) + PU.set(*U); + } + MaskInfos[M].Units = PU.flip(); + } } RegisterRef PhysicalRegisterInfo::normalize(RegisterRef RR) const { @@ -201,17 +214,8 @@ bool PhysicalRegisterInfo::aliasMM(RegisterRef RM, RegisterRef RN) const { bool RegisterAggr::hasAliasOf(RegisterRef RR) const { - if (PhysicalRegisterInfo::isRegMaskId(RR.Reg)) { - // XXX SLOW - const uint32_t *MB = PRI.getRegMaskBits(RR.Reg); - for (unsigned i = 1, e = PRI.getTRI().getNumRegs(); i != e; ++i) { - if (MB[i/32] & (1u << (i%32))) - continue; - if (hasAliasOf(RegisterRef(i, LaneBitmask::getAll()))) - return true; - } - return false; - } + if (PhysicalRegisterInfo::isRegMaskId(RR.Reg)) + return Units.anyCommon(PRI.getMaskUnits(RR.Reg)); for (MCRegUnitMaskIterator U(RR.Reg, &PRI.getTRI()); U.isValid(); ++U) { std::pair<uint32_t,LaneBitmask> P = *U; @@ -224,15 +228,8 @@ bool RegisterAggr::hasAliasOf(RegisterRef RR) const { bool RegisterAggr::hasCoverOf(RegisterRef RR) const { if (PhysicalRegisterInfo::isRegMaskId(RR.Reg)) { - // XXX SLOW - const uint32_t *MB = PRI.getRegMaskBits(RR.Reg); - for (unsigned i = 1, e = PRI.getTRI().getNumRegs(); i != e; ++i) { - if (MB[i/32] & (1u << (i%32))) - continue; - if (!hasCoverOf(RegisterRef(i, LaneBitmask::getAll()))) - return false; - } - return true; + BitVector T(PRI.getMaskUnits(RR.Reg)); + return T.reset(Units).none(); } for (MCRegUnitMaskIterator U(RR.Reg, &PRI.getTRI()); U.isValid(); ++U) { @@ -246,15 +243,7 @@ bool RegisterAggr::hasCoverOf(RegisterRef RR) const { RegisterAggr &RegisterAggr::insert(RegisterRef RR) { if (PhysicalRegisterInfo::isRegMaskId(RR.Reg)) { - BitVector PU(PRI.getTRI().getNumRegUnits()); // Preserved units. - const uint32_t *MB = PRI.getRegMaskBits(RR.Reg); - for (unsigned i = 1, e = PRI.getTRI().getNumRegs(); i != e; ++i) { - if (!(MB[i/32] & (1u << (i%32)))) - continue; - for (MCRegUnitIterator U(i, &PRI.getTRI()); U.isValid(); ++U) - PU.set(*U); - } - Units |= PU.flip(); + Units |= PRI.getMaskUnits(RR.Reg); return *this; } diff --git a/lib/Target/Hexagon/RDFRegisters.h b/lib/Target/Hexagon/RDFRegisters.h index 4b35c85a6b62c..314d8b5666d76 100644 --- a/lib/Target/Hexagon/RDFRegisters.h +++ b/lib/Target/Hexagon/RDFRegisters.h @@ -51,6 +51,8 @@ namespace rdf { return F - Map.begin() + 1; } + uint32_t size() const { return Map.size(); } + typedef typename std::vector<T>::const_iterator const_iterator; const_iterator begin() const { return Map.begin(); } const_iterator end() const { return Map.end(); } @@ -107,6 +109,9 @@ namespace rdf { RegisterRef getRefForUnit(uint32_t U) const { return RegisterRef(UnitInfos[U].Reg, UnitInfos[U].Mask); } + const BitVector &getMaskUnits(RegisterId MaskId) const { + return MaskInfos[TargetRegisterInfo::stackSlot2Index(MaskId)].Units; + } const TargetRegisterInfo &getTRI() const { return TRI; } @@ -118,11 +123,15 @@ namespace rdf { RegisterId Reg = 0; LaneBitmask Mask; }; + struct MaskInfo { + BitVector Units; + }; const TargetRegisterInfo &TRI; + IndexedSet<const uint32_t*> RegMasks; std::vector<RegInfo> RegInfos; std::vector<UnitInfo> UnitInfos; - IndexedSet<const uint32_t*> RegMasks; + std::vector<MaskInfo> MaskInfos; bool aliasRR(RegisterRef RA, RegisterRef RB) const; bool aliasRM(RegisterRef RR, RegisterRef RM) const; @@ -135,7 +144,7 @@ namespace rdf { : Units(pri.getTRI().getNumRegUnits()), PRI(pri) {} RegisterAggr(const RegisterAggr &RG) = default; - bool empty() const { return Units.empty(); } + bool empty() const { return Units.none(); } bool hasAliasOf(RegisterRef RR) const; bool hasCoverOf(RegisterRef RR) const; static bool isCoverOf(RegisterRef RA, RegisterRef RB, diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp index c26b3081dbc32..82e6731ecd782 100644 --- a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp +++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp @@ -17,7 +17,7 @@ using namespace llvm; void MSP430MCAsmInfo::anchor() { } MSP430MCAsmInfo::MSP430MCAsmInfo(const Triple &TT) { - PointerSize = CalleeSaveStackSlotSize = 2; + CodePointerSize = CalleeSaveStackSlotSize = 2; CommentString = ";"; diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp index ebe3c57848882..11411d997bb3b 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp @@ -23,7 +23,7 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Triple &TheTriple) { if ((TheTriple.getArch() == Triple::mips64el) || (TheTriple.getArch() == Triple::mips64)) { - PointerSize = CalleeSaveStackSlotSize = 8; + CodePointerSize = CalleeSaveStackSlotSize = 8; } // FIXME: This condition isn't quite right but it's the best we can do until diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td index 8b04fcb76920d..bf79f0f2ff825 100644 --- a/lib/Target/Mips/MipsMSAInstrInfo.td +++ b/lib/Target/Mips/MipsMSAInstrInfo.td @@ -3781,6 +3781,80 @@ let Predicates = [HasMSA] in { ISA_MIPS1_NOT_32R6_64R6; } +def vsplati64_imm_eq_63 : PatLeaf<(bitconvert (v4i32 (build_vector))), [{ + APInt Imm; + SDNode *BV = N->getOperand(0).getNode(); + EVT EltTy = N->getValueType(0).getVectorElementType(); + + return selectVSplat(BV, Imm, EltTy.getSizeInBits()) && + Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == 63; +}]>; + +def immi32Cst7 : ImmLeaf<i32, [{return isUInt<32>(Imm) && Imm == 7;}]>; +def immi32Cst15 : ImmLeaf<i32, [{return isUInt<32>(Imm) && Imm == 15;}]>; +def immi32Cst31 : ImmLeaf<i32, [{return isUInt<32>(Imm) && Imm == 31;}]>; + +def vsplati8imm7 : PatFrag<(ops node:$wt), + (and node:$wt, (vsplati8 immi32Cst7))>; +def vsplati16imm15 : PatFrag<(ops node:$wt), + (and node:$wt, (vsplati16 immi32Cst15))>; +def vsplati32imm31 : PatFrag<(ops node:$wt), + (and node:$wt, (vsplati32 immi32Cst31))>; +def vsplati64imm63 : PatFrag<(ops node:$wt), + (and node:$wt, vsplati64_imm_eq_63)>; + +class MSAShiftPat<SDNode Node, ValueType VT, MSAInst Insn, dag Vec> : + MSAPat<(VT (Node VT:$ws, (VT (and VT:$wt, Vec)))), + (VT (Insn VT:$ws, VT:$wt))>; + +class MSABitPat<SDNode Node, ValueType VT, MSAInst Insn, PatFrag Frag> : + MSAPat<(VT (Node VT:$ws, (shl vsplat_imm_eq_1, (Frag VT:$wt)))), + (VT (Insn VT:$ws, VT:$wt))>; + +multiclass MSAShiftPats<SDNode Node, string Insn> { + def : MSAShiftPat<Node, v16i8, !cast<MSAInst>(Insn#_B), + (vsplati8 immi32Cst7)>; + def : MSAShiftPat<Node, v8i16, !cast<MSAInst>(Insn#_H), + (vsplati16 immi32Cst15)>; + def : MSAShiftPat<Node, v4i32, !cast<MSAInst>(Insn#_W), + (vsplati32 immi32Cst31)>; + def : MSAPat<(v2i64 (Node v2i64:$ws, (v2i64 (and v2i64:$wt, + vsplati64_imm_eq_63)))), + (v2i64 (!cast<MSAInst>(Insn#_D) v2i64:$ws, v2i64:$wt))>; +} + +multiclass MSABitPats<SDNode Node, string Insn> { + def : MSABitPat<Node, v16i8, !cast<MSAInst>(Insn#_B), vsplati8imm7>; + def : MSABitPat<Node, v8i16, !cast<MSAInst>(Insn#_H), vsplati16imm15>; + def : MSABitPat<Node, v4i32, !cast<MSAInst>(Insn#_W), vsplati32imm31>; + def : MSAPat<(Node v2i64:$ws, (shl (v2i64 vsplati64_imm_eq_1), + (vsplati64imm63 v2i64:$wt))), + (v2i64 (!cast<MSAInst>(Insn#_D) v2i64:$ws, v2i64:$wt))>; +} + +defm : MSAShiftPats<shl, "SLL">; +defm : MSAShiftPats<srl, "SRL">; +defm : MSAShiftPats<sra, "SRA">; +defm : MSABitPats<xor, "BNEG">; +defm : MSABitPats<or, "BSET">; + +def : MSAPat<(and v16i8:$ws, (xor (shl vsplat_imm_eq_1, + (vsplati8imm7 v16i8:$wt)), + immAllOnesV)), + (v16i8 (BCLR_B v16i8:$ws, v16i8:$wt))>; +def : MSAPat<(and v8i16:$ws, (xor (shl vsplat_imm_eq_1, + (vsplati16imm15 v8i16:$wt)), + immAllOnesV)), + (v8i16 (BCLR_H v8i16:$ws, v8i16:$wt))>; +def : MSAPat<(and v4i32:$ws, (xor (shl vsplat_imm_eq_1, + (vsplati32imm31 v4i32:$wt)), + immAllOnesV)), + (v4i32 (BCLR_W v4i32:$ws, v4i32:$wt))>; +def : MSAPat<(and v2i64:$ws, (xor (shl (v2i64 vsplati64_imm_eq_1), + (vsplati64imm63 v2i64:$wt)), + (bitconvert (v4i32 immAllOnesV)))), + (v2i64 (BCLR_D v2i64:$ws, v2i64:$wt))>; + // Vector extraction with fixed index. // // Extracting 32-bit values on MSA32 should always use COPY_S_W rather than diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index e2da8477295b7..bf7f079e31052 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -1547,11 +1547,24 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG, return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm); } +static SDValue truncateVecElts(SDValue Op, SelectionDAG &DAG) { + SDLoc DL(Op); + EVT ResTy = Op->getValueType(0); + SDValue Vec = Op->getOperand(2); + bool BigEndian = !DAG.getSubtarget().getTargetTriple().isLittleEndian(); + MVT ResEltTy = ResTy == MVT::v2i64 ? MVT::i64 : MVT::i32; + SDValue ConstValue = DAG.getConstant(Vec.getScalarValueSizeInBits() - 1, + DL, ResEltTy); + SDValue SplatVec = getBuildVectorSplat(ResTy, ConstValue, BigEndian, DAG); + + return DAG.getNode(ISD::AND, DL, ResTy, Vec, SplatVec); +} + static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) { EVT ResTy = Op->getValueType(0); SDLoc DL(Op); SDValue One = DAG.getConstant(1, DL, ResTy); - SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, Op->getOperand(2)); + SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, truncateVecElts(Op, DAG)); return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), DAG.getNOT(DL, Bit, ResTy)); @@ -1687,7 +1700,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1), DAG.getNode(ISD::SHL, DL, VecTy, One, - Op->getOperand(2))); + truncateVecElts(Op, DAG))); } case Intrinsic::mips_bnegi_b: case Intrinsic::mips_bnegi_h: @@ -1723,7 +1736,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1), DAG.getNode(ISD::SHL, DL, VecTy, One, - Op->getOperand(2))); + truncateVecElts(Op, DAG))); } case Intrinsic::mips_bseti_b: case Intrinsic::mips_bseti_h: @@ -2210,7 +2223,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, case Intrinsic::mips_sll_w: case Intrinsic::mips_sll_d: return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1), - Op->getOperand(2)); + truncateVecElts(Op, DAG)); case Intrinsic::mips_slli_b: case Intrinsic::mips_slli_h: case Intrinsic::mips_slli_w: @@ -2240,7 +2253,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, case Intrinsic::mips_sra_w: case Intrinsic::mips_sra_d: return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1), - Op->getOperand(2)); + truncateVecElts(Op, DAG)); case Intrinsic::mips_srai_b: case Intrinsic::mips_srai_h: case Intrinsic::mips_srai_w: @@ -2270,7 +2283,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, case Intrinsic::mips_srl_w: case Intrinsic::mips_srl_d: return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1), - Op->getOperand(2)); + truncateVecElts(Op, DAG)); case Intrinsic::mips_srli_b: case Intrinsic::mips_srli_h: case Intrinsic::mips_srli_w: diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp index 78bdf4e698d8b..bdd0f156c8afe 100644 --- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp +++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp @@ -27,7 +27,7 @@ void NVPTXMCAsmInfo::anchor() {} NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Triple &TheTriple) { if (TheTriple.getArch() == Triple::nvptx64) { - PointerSize = CalleeSaveStackSlotSize = 8; + CodePointerSize = CalleeSaveStackSlotSize = 8; } CommentString = "//"; diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 21e25de80dc7c..ba28cd83278b6 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -2004,7 +2004,7 @@ void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV, for (unsigned I = 0, E = DL.getTypeAllocSize(CPV->getType()); I < E; ++I) { uint8_t Byte = Val.getLoBits(8).getZExtValue(); aggBuffer->addBytes(&Byte, 1, 1); - Val = Val.lshr(8); + Val.lshrInPlace(8); } return; } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp index d8fab5b7c01a2..d30bf1a56e8aa 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp @@ -20,7 +20,7 @@ void PPCMCAsmInfoDarwin::anchor() { } PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit, const Triple& T) { if (is64Bit) { - PointerSize = CalleeSaveStackSlotSize = 8; + CodePointerSize = CalleeSaveStackSlotSize = 8; } IsLittleEndian = false; @@ -50,7 +50,7 @@ PPCELFMCAsmInfo::PPCELFMCAsmInfo(bool is64Bit, const Triple& T) { NeedsLocalForSize = true; if (is64Bit) { - PointerSize = CalleeSaveStackSlotSize = 8; + CodePointerSize = CalleeSaveStackSlotSize = 8; } IsLittleEndian = T.getArch() == Triple::ppc64le; diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 9c72638023bb3..125c00295f88e 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -2977,10 +2977,10 @@ void PPCDAGToDAGISel::Select(SDNode *N) { SelectAddrIdxOnly(LD->getBasePtr(), Base, Offset)) { SDValue Chain = LD->getChain(); SDValue Ops[] = { Base, Offset, Chain }; - SDNode *NewN = CurDAG->SelectNodeTo(N, PPC::LXVDSX, - N->getValueType(0), Ops); MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = LD->getMemOperand(); + SDNode *NewN = CurDAG->SelectNodeTo(N, PPC::LXVDSX, + N->getValueType(0), Ops); cast<MachineSDNode>(NewN)->setMemRefs(MemOp, MemOp + 1); return; } diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp index b164df8b595a3..d622911e92c4f 100644 --- a/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp +++ b/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp @@ -18,7 +18,7 @@ using namespace llvm; void RISCVMCAsmInfo::anchor() {} RISCVMCAsmInfo::RISCVMCAsmInfo(const Triple &TT) { - PointerSize = CalleeSaveStackSlotSize = TT.isArch64Bit() ? 8 : 4; + CodePointerSize = CalleeSaveStackSlotSize = TT.isArch64Bit() ? 8 : 4; CommentString = "#"; AlignmentIsInBytes = false; SupportsDebugInformation = true; diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp index 3ed09898fb78d..21df60237d96e 100644 --- a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp +++ b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp @@ -28,7 +28,7 @@ SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Triple &TheTriple) { IsLittleEndian = (TheTriple.getArch() == Triple::sparcel); if (isV9) { - PointerSize = CalleeSaveStackSlotSize = 8; + CodePointerSize = CalleeSaveStackSlotSize = 8; } Data16bitsDirective = "\t.half\t"; diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp index b17977d41be1f..6e00981939b63 100644 --- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp +++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp @@ -14,7 +14,7 @@ using namespace llvm; SystemZMCAsmInfo::SystemZMCAsmInfo(const Triple &TT) { - PointerSize = 8; + CodePointerSize = 8; CalleeSaveStackSlotSize = 8; IsLittleEndian = false; diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 84d3c7bed50a2..f2fd581f78476 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -829,7 +829,7 @@ bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, return isTruncateFree(FromType, ToType); } -bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { +bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { return CI->isTailCall(); } diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h index 7d92a73558778..1c34dc43e8bb2 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.h +++ b/lib/Target/SystemZ/SystemZISelLowering.h @@ -454,7 +454,7 @@ public: MachineBasicBlock *BB) const override; SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; bool allowTruncateForTailCall(Type *, Type *) const override; - bool mayBeEmittedAsTailCall(CallInst *CI) const override; + bool mayBeEmittedAsTailCall(const CallInst *CI) const override; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp index 2dcec5263fa1e..5f8c78ed16834 100644 --- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp +++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp @@ -22,7 +22,7 @@ using namespace llvm; WebAssemblyMCAsmInfoELF::~WebAssemblyMCAsmInfoELF() {} WebAssemblyMCAsmInfoELF::WebAssemblyMCAsmInfoELF(const Triple &T) { - PointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4; + CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4; // TODO: What should MaxInstLength be? @@ -55,7 +55,7 @@ WebAssemblyMCAsmInfoELF::WebAssemblyMCAsmInfoELF(const Triple &T) { WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() {} WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) { - PointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4; + CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4; // TODO: What should MaxInstLength be? diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp index a0b008947491a..544cd653fd721 100644 --- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp +++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp @@ -94,6 +94,8 @@ void WebAssemblyMCCodeEmitter::encodeInstruction( MCFixupKind(WebAssembly::fixup_code_global_index), MI.getLoc())); ++MCNumFixups; encodeULEB128(uint64_t(MO.getImm()), OS); + } else if (Info.OperandType == WebAssembly::OPERAND_SIGNATURE) { + encodeSLEB128(int64_t(MO.getImm()), OS); } else { encodeULEB128(uint64_t(MO.getImm()), OS); } diff --git a/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp index f4c9a4ef6b9cc..559165e4c86b2 100644 --- a/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp +++ b/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp @@ -54,7 +54,7 @@ FunctionPass *llvm::createWebAssemblyOptimizeReturned() { void OptimizeReturned::visitCallSite(CallSite CS) { for (unsigned i = 0, e = CS.getNumArgOperands(); i < e; ++i) - if (CS.paramHasAttr(0, Attribute::Returned)) { + if (CS.paramHasAttr(i, Attribute::Returned)) { Instruction *Inst = CS.getInstruction(); Value *Arg = CS.getArgOperand(i); // Ignore constants, globals, undef, etc. diff --git a/lib/Target/WebAssembly/known_gcc_test_failures.txt b/lib/Target/WebAssembly/known_gcc_test_failures.txt index 8dd5e8a03e2ee..8e8e5fd1eff1e 100644 --- a/lib/Target/WebAssembly/known_gcc_test_failures.txt +++ b/lib/Target/WebAssembly/known_gcc_test_failures.txt @@ -1,5 +1,15 @@ # Tests which are known to fail from the GCC torture test suite. +# Syntax: Each line has a single test to be marked as a 'known failure' (or +# 'exclusion'. Known failures are expected to fail, and will cause an error if +# they pass. (Known failures that do not run at all will not cause an +# error). The format is +# <name> <attributes> # comment +# +# The attributes in this case represent the different arguments used to +# compiler: 'wasm-s' is for compiling to .s files, and 'wasm-o' for compiling +# to wasm object files (.o). + # Computed gotos are not supported (Cannot select BlockAddress/BRIND) 20040302-1.c 20071210-1.c @@ -66,3 +76,21 @@ pr41935.c 920728-1.c pr28865.c widechar-2.c + +# crash: Running pass 'WebAssembly Explicit Locals' on function +20020107-1.c wasm-o +20030222-1.c wasm-o +20071220-1.c wasm-o +20071220-2.c wasm-o +990130-1.c wasm-o +pr38533.c wasm-o +pr41239.c wasm-o +pr43385.c wasm-o +pr43560.c wasm-o +pr45695.c wasm-o +pr49279.c wasm-o +pr49390.c wasm-o +pr52286.c wasm-o + +# fatal error: error in backend: data symbols must have a size set with .size +921110-1.c wasm-o diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp index 48a1d8f1330cd..9c35a251e480b 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp @@ -43,7 +43,7 @@ void X86MCAsmInfoDarwin::anchor() { } X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) { bool is64Bit = T.getArch() == Triple::x86_64; if (is64Bit) - PointerSize = CalleeSaveStackSlotSize = 8; + CodePointerSize = CalleeSaveStackSlotSize = 8; AssemblerDialect = AsmWriterFlavor; @@ -92,7 +92,7 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) { // For ELF, x86-64 pointer size depends on the ABI. // For x86-64 without the x32 ABI, pointer size is 8. For x86 and for x86-64 // with the x32 ABI, pointer size remains the default 4. - PointerSize = (is64Bit && !isX32) ? 8 : 4; + CodePointerSize = (is64Bit && !isX32) ? 8 : 4; // OTOH, stack slot size is always 8 for x86-64, even with the x32 ABI. CalleeSaveStackSlotSize = is64Bit ? 8 : 4; @@ -129,7 +129,7 @@ X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) { if (Triple.getArch() == Triple::x86_64) { PrivateGlobalPrefix = ".L"; PrivateLabelPrefix = ".L"; - PointerSize = 8; + CodePointerSize = 8; WinEHEncodingType = WinEH::EncodingType::Itanium; } else { // 32-bit X86 doesn't use CFI, so this isn't a real encoding type. It's just @@ -156,7 +156,7 @@ X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) { if (Triple.getArch() == Triple::x86_64) { PrivateGlobalPrefix = ".L"; PrivateLabelPrefix = ".L"; - PointerSize = 8; + CodePointerSize = 8; WinEHEncodingType = WinEH::EncodingType::Itanium; ExceptionsType = ExceptionHandling::WinEH; } else { diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 78e0bca4158ee..8678a13b95d04 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -1698,21 +1698,18 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, } } -// NOTE: this only has a subset of the full frame index logic. In -// particular, the FI < 0 and AfterFPPop logic is handled in -// X86RegisterInfo::eliminateFrameIndex, but not here. Possibly -// (probably?) it should be moved into here. int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); + bool IsFixed = MFI.isFixedObjectIndex(FI); // We can't calculate offset from frame pointer if the stack is realigned, // so enforce usage of stack/base pointer. The base pointer is used when we // have dynamic allocas in addition to dynamic realignment. if (TRI->hasBasePointer(MF)) - FrameReg = TRI->getBaseRegister(); + FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister(); else if (TRI->needsStackRealignment(MF)) - FrameReg = TRI->getStackRegister(); + FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister(); else FrameReg = TRI->getFrameRegister(MF); diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index eb5c56ff2ff91..2d788bf0cf994 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -1311,8 +1311,9 @@ bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM, ++Cost; // If the base is a register with multiple uses, this // transformation may save a mov. - if ((AM.BaseType == X86ISelAddressMode::RegBase && - AM.Base_Reg.getNode() && + // FIXME: Don't rely on DELETED_NODEs. + if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() && + AM.Base_Reg->getOpcode() != ISD::DELETED_NODE && !AM.Base_Reg.getNode()->hasOneUse()) || AM.BaseType == X86ISelAddressMode::FrameIndexBase) --Cost; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7ff483063ec23..b5f29fb400ef7 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2742,13 +2742,13 @@ static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) { return GuaranteedTailCallOpt && canGuaranteeTCO(CC); } -bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { +bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { auto Attr = CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); if (!CI->isTailCall() || Attr.getValueAsString() == "true") return false; - CallSite CS(CI); + ImmutableCallSite CS(CI); CallingConv::ID CalleeCC = CS.getCallingConv(); if (!mayTailCallThisCC(CalleeCC)) return false; @@ -8327,13 +8327,13 @@ static APInt computeZeroableShuffleElements(ArrayRef<int> Mask, Zeroable.setBit(i); else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) { APInt Val = Cst->getAPIntValue(); - Val = Val.lshr((M % Scale) * ScalarSizeInBits); + Val.lshrInPlace((M % Scale) * ScalarSizeInBits); Val = Val.getLoBits(ScalarSizeInBits); if (Val == 0) Zeroable.setBit(i); } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) { APInt Val = Cst->getValueAPF().bitcastToAPInt(); - Val = Val.lshr((M % Scale) * ScalarSizeInBits); + Val.lshrInPlace((M % Scale) * ScalarSizeInBits); Val = Val.getLoBits(ScalarSizeInBits); if (Val == 0) Zeroable.setBit(i); @@ -16069,7 +16069,7 @@ static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) { unsigned EltBits = EltVT.getSizeInBits(); // For FABS, mask is 0x7f...; for FNEG, mask is 0x80... APInt MaskElt = - IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits); + IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignMask(EltBits); const fltSemantics &Sem = EltVT == MVT::f64 ? APFloat::IEEEdouble() : (IsF128 ? APFloat::IEEEquad() : APFloat::IEEEsingle()); @@ -16132,9 +16132,9 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) { // The mask constants are automatically splatted for vector types. unsigned EltSizeInBits = VT.getScalarSizeInBits(); SDValue SignMask = DAG.getConstantFP( - APFloat(Sem, APInt::getSignBit(EltSizeInBits)), dl, LogicVT); + APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT); SDValue MagMask = DAG.getConstantFP( - APFloat(Sem, ~APInt::getSignBit(EltSizeInBits)), dl, LogicVT); + APFloat(Sem, ~APInt::getSignMask(EltSizeInBits)), dl, LogicVT); // First, clear all bits but the sign bit from the second operand (sign). if (IsFakeVector) @@ -17344,10 +17344,10 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget, // bits of the inputs before performing those operations. if (FlipSigns) { MVT EltVT = VT.getVectorElementType(); - SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), dl, + SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl, VT); - Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB); - Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB); + Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM); + Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM); } SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); @@ -22111,11 +22111,11 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget, } // i64 vector arithmetic shift can be emulated with the transform: - // M = lshr(SIGN_BIT, Amt) + // M = lshr(SIGN_MASK, Amt) // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M) if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) && Op.getOpcode() == ISD::SRA) { - SDValue S = DAG.getConstant(APInt::getSignBit(64), dl, VT); + SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT); SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt); R = DAG.getNode(ISD::SRL, dl, VT, R, Amt); R = DAG.getNode(ISD::XOR, dl, VT, R, M); @@ -22647,7 +22647,7 @@ bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b. TargetLowering::AtomicExpansionKind X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { - auto PTy = cast<PointerType>(LI->getPointerOperand()->getType()); + auto PTy = cast<PointerType>(LI->getPointerOperandType()); return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None; } @@ -26722,8 +26722,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, // Low bits are known zero. KnownZero.setLowBits(ShAmt); } else { - KnownZero = KnownZero.lshr(ShAmt); - KnownOne = KnownOne.lshr(ShAmt); + KnownZero.lshrInPlace(ShAmt); + KnownOne.lshrInPlace(ShAmt); // High bits are known zero. KnownZero.setHighBits(ShAmt); } @@ -30152,7 +30152,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG, // x s< 0 ? x^C : 0 --> subus x, C if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR && ISD::isBuildVectorAllZeros(CondRHS.getNode()) && - OpRHSConst->getAPIntValue().isSignBit()) + OpRHSConst->getAPIntValue().isSignMask()) // Note that we have to rebuild the RHS constant here to ensure we // don't rely on particular values of undef lanes. return DAG.getNode( @@ -30203,7 +30203,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG, return SDValue(); assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); - APInt DemandedMask(APInt::getSignBit(BitWidth)); + APInt DemandedMask(APInt::getSignMask(BitWidth)); APInt KnownZero, KnownOne; TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), DCI.isBeforeLegalizeOps()); @@ -31269,7 +31269,7 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG, else if (X86ISD::VSRAI == Opcode) Elt = Elt.ashr(ShiftImm); else - Elt = Elt.lshr(ShiftImm); + Elt.lshrInPlace(ShiftImm); } return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N)); } @@ -32234,8 +32234,8 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG, BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V); if (!BV || !BV->isConstant()) return false; - for (unsigned i = 0, e = V.getNumOperands(); i < e; i++) { - ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(i)); + for (SDValue Op : V->ops()) { + ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); if (!C) return false; uint64_t Val = C->getZExtValue(); @@ -33428,8 +33428,8 @@ static SDValue isFNEG(SDNode *N) { SDValue Op0 = peekThroughBitcasts(Op.getOperand(0)); unsigned EltBits = Op1.getScalarValueSizeInBits(); - auto isSignBitValue = [&](const ConstantFP *C) { - return C->getValueAPF().bitcastToAPInt() == APInt::getSignBit(EltBits); + auto isSignMask = [&](const ConstantFP *C) { + return C->getValueAPF().bitcastToAPInt() == APInt::getSignMask(EltBits); }; // There is more than one way to represent the same constant on @@ -33440,21 +33440,21 @@ static SDValue isFNEG(SDNode *N) { // We check all variants here. if (Op1.getOpcode() == X86ISD::VBROADCAST) { if (auto *C = getTargetConstantFromNode(Op1.getOperand(0))) - if (isSignBitValue(cast<ConstantFP>(C))) + if (isSignMask(cast<ConstantFP>(C))) return Op0; } else if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1)) { if (ConstantFPSDNode *CN = BV->getConstantFPSplatNode()) - if (isSignBitValue(CN->getConstantFPValue())) + if (isSignMask(CN->getConstantFPValue())) return Op0; } else if (auto *C = getTargetConstantFromNode(Op1)) { if (C->getType()->isVectorTy()) { if (auto *SplatV = C->getSplatValue()) - if (isSignBitValue(cast<ConstantFP>(SplatV))) + if (isSignMask(cast<ConstantFP>(SplatV))) return Op0; } else if (auto *FPConst = dyn_cast<ConstantFP>(C)) - if (isSignBitValue(FPConst)) + if (isSignMask(FPConst)) return Op0; } return SDValue(); @@ -34631,7 +34631,7 @@ static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG, return SDValue(); ShrinkMode Mode; - if (!canReduceVMulWidth(MulOp.getNode(), DAG, Mode)) + if (!canReduceVMulWidth(MulOp.getNode(), DAG, Mode) || Mode == MULU16) return SDValue(); EVT VT = N->getValueType(0); @@ -35922,14 +35922,11 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, if (Subtarget.is64Bit()) { Res.first = X86::RAX; Res.second = &X86::GR64_ADRegClass; - } else if (Subtarget.is32Bit()) { + } else { + assert((Subtarget.is32Bit() || Subtarget.is16Bit()) && + "Expecting 64, 32 or 16 bit subtarget"); Res.first = X86::EAX; Res.second = &X86::GR32_ADRegClass; - } else if (Subtarget.is16Bit()) { - Res.first = X86::AX; - Res.second = &X86::GR16_ADRegClass; - } else { - llvm_unreachable("Expecting 64, 32 or 16 bit subtarget"); } return Res; } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index ab4910daca02b..190a883350000 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -1207,7 +1207,7 @@ namespace llvm { bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; - bool mayBeEmittedAsTailCall(CallInst *CI) const override; + bool mayBeEmittedAsTailCall(const CallInst *CI) const override; EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType ExtendKind) const override; diff --git a/lib/Target/X86/X86InstructionSelector.cpp b/lib/Target/X86/X86InstructionSelector.cpp index 6cc5e8b635975..fb93157928922 100644 --- a/lib/Target/X86/X86InstructionSelector.cpp +++ b/lib/Target/X86/X86InstructionSelector.cpp @@ -67,6 +67,8 @@ private: MachineFunction &MF) const; bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const; + bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI, + MachineFunction &MF) const; const X86Subtarget &STI; const X86InstrInfo &TII; @@ -99,6 +101,10 @@ X86InstructionSelector::X86InstructionSelector(const X86Subtarget &STI, static const TargetRegisterClass * getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) { if (RB.getID() == X86::GPRRegBankID) { + if (Ty.getSizeInBits() <= 8) + return &X86::GR8RegClass; + if (Ty.getSizeInBits() == 16) + return &X86::GR16RegClass; if (Ty.getSizeInBits() == 32) return &X86::GR32RegClass; if (Ty.getSizeInBits() == 64) @@ -207,6 +213,8 @@ bool X86InstructionSelector::select(MachineInstr &I) const { return true; if (selectConstant(I, MRI, MF)) return true; + if (selectTrunc(I, MRI, MF)) + return true; return selectImpl(I); } @@ -509,6 +517,59 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I, return constrainSelectedInstRegOperands(I, TII, TRI, RBI); } +bool X86InstructionSelector::selectTrunc(MachineInstr &I, + MachineRegisterInfo &MRI, + MachineFunction &MF) const { + if (I.getOpcode() != TargetOpcode::G_TRUNC) + return false; + + const unsigned DstReg = I.getOperand(0).getReg(); + const unsigned SrcReg = I.getOperand(1).getReg(); + + const LLT DstTy = MRI.getType(DstReg); + const LLT SrcTy = MRI.getType(SrcReg); + + const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); + const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); + + if (DstRB.getID() != SrcRB.getID()) { + DEBUG(dbgs() << "G_TRUNC input/output on different banks\n"); + return false; + } + + if (DstRB.getID() != X86::GPRRegBankID) + return false; + + const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(DstTy, DstRB); + if (!DstRC) + return false; + + const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank(SrcTy, SrcRB); + if (!SrcRC) + return false; + + if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || + !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { + DEBUG(dbgs() << "Failed to constrain G_TRUNC\n"); + return false; + } + + if (DstRC == SrcRC) { + // Nothing to be done + } else if (DstRC == &X86::GR32RegClass) { + I.getOperand(1).setSubReg(X86::sub_32bit); + } else if (DstRC == &X86::GR16RegClass) { + I.getOperand(1).setSubReg(X86::sub_16bit); + } else if (DstRC == &X86::GR8RegClass) { + I.getOperand(1).setSubReg(X86::sub_8bit); + } else { + return false; + } + + I.setDesc(TII.get(X86::COPY)); + return true; +} + InstructionSelector * llvm::createX86InstructionSelector(X86Subtarget &Subtarget, X86RegisterBankInfo &RBI) { diff --git a/lib/Target/X86/X86RegisterBankInfo.cpp b/lib/Target/X86/X86RegisterBankInfo.cpp index d395c826e6bf7..0f8a750a02352 100644 --- a/lib/Target/X86/X86RegisterBankInfo.cpp +++ b/lib/Target/X86/X86RegisterBankInfo.cpp @@ -68,6 +68,7 @@ X86GenRegisterBankInfo::PartialMappingIdx X86GenRegisterBankInfo::getPartialMappingIdx(const LLT &Ty, bool isFP) { if ((Ty.isScalar() && !isFP) || Ty.isPointer()) { switch (Ty.getSizeInBits()) { + case 1: case 8: return PMI_GPR8; case 16: diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index 58fa31e94fba4..25958f0c31064 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -133,6 +133,11 @@ public: unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const; unsigned getStackRegister() const { return StackPtr; } unsigned getBaseRegister() const { return BasePtr; } + /// Returns physical register used as frame pointer. + /// This will always returns the frame pointer register, contrary to + /// getFrameRegister() which returns the "base pointer" in situations + /// involving a stack, frame and base pointer. + unsigned getFramePtr() const { return FramePtr; } // FIXME: Move to FrameInfok unsigned getSlotSize() const { return SlotSize; } }; diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index c177ba1d52f7c..d235d2b40b15a 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -438,7 +438,6 @@ def LOW32_ADDR_ACCESS_RBP : RegisterClass<"X86", [i32], 32, (add LOW32_ADDR_ACCESS, RBP)>; // A class to support the 'A' assembler constraint: [ER]AX then [ER]DX. -def GR16_AD : RegisterClass<"X86", [i16], 16, (add AX, DX)>; def GR32_AD : RegisterClass<"X86", [i32], 32, (add EAX, EDX)>; def GR64_AD : RegisterClass<"X86", [i64], 64, (add RAX, RDX)>; diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp index 375b74c494d92..8e26849ea9e37 100644 --- a/lib/Transforms/IPO/DeadArgumentElimination.cpp +++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp @@ -167,15 +167,12 @@ bool DeadArgumentEliminationPass::DeleteDeadVarargs(Function &Fn) { // Drop any attributes that were on the vararg arguments. AttributeList PAL = CS.getAttributes(); - if (!PAL.isEmpty() && PAL.getSlotIndex(PAL.getNumSlots() - 1) > NumArgs) { - SmallVector<AttributeList, 8> AttributesVec; - for (unsigned i = 0; PAL.getSlotIndex(i) <= NumArgs; ++i) - AttributesVec.push_back(PAL.getSlotAttributes(i)); - if (PAL.hasAttributes(AttributeList::FunctionIndex)) - AttributesVec.push_back(AttributeList::get(Fn.getContext(), - AttributeList::FunctionIndex, - PAL.getFnAttributes())); - PAL = AttributeList::get(Fn.getContext(), AttributesVec); + if (!PAL.isEmpty()) { + SmallVector<AttributeSet, 8> ArgAttrs; + for (unsigned ArgNo = 0; ArgNo < NumArgs; ++ArgNo) + ArgAttrs.push_back(PAL.getParamAttributes(ArgNo)); + PAL = AttributeList::get(Fn.getContext(), PAL.getFnAttributes(), + PAL.getRetAttributes(), ArgAttrs); } SmallVector<OperandBundleDef, 1> OpBundles; diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp index 4d13b3f406887..9648883b7f275 100644 --- a/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/lib/Transforms/IPO/FunctionAttrs.cpp @@ -222,15 +222,11 @@ static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter) { MadeChange = true; // Clear out any existing attributes. - AttrBuilder B; - B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); - F->removeAttributes( - AttributeList::FunctionIndex, - AttributeList::get(F->getContext(), AttributeList::FunctionIndex, B)); + F->removeFnAttr(Attribute::ReadOnly); + F->removeFnAttr(Attribute::ReadNone); // Add in the new attribute. - F->addAttribute(AttributeList::FunctionIndex, - ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); + F->addFnAttr(ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); if (ReadsMemory) ++NumReadOnly; @@ -495,9 +491,6 @@ determinePointerReadAttrs(Argument *A, static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) { bool Changed = false; - AttrBuilder B; - B.addAttribute(Attribute::Returned); - // Check each function in turn, determining if an argument is always returned. for (Function *F : SCCNodes) { // We can infer and propagate function attributes only when we know that the @@ -535,7 +528,7 @@ static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) { if (Value *RetArg = FindRetArg()) { auto *A = cast<Argument>(RetArg); - A->addAttr(AttributeList::get(F->getContext(), A->getArgNo() + 1, B)); + A->addAttr(Attribute::Returned); ++NumReturned; Changed = true; } @@ -593,9 +586,6 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { ArgumentGraph AG; - AttrBuilder B; - B.addAttribute(Attribute::NoCapture); - // Check each function in turn, determining which pointer arguments are not // captured. for (Function *F : SCCNodes) { @@ -614,7 +604,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; ++A) { if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { - A->addAttr(AttributeList::get(F->getContext(), A->getArgNo() + 1, B)); + A->addAttr(Attribute::NoCapture); ++NumNoCapture; Changed = true; } @@ -633,8 +623,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { if (!Tracker.Captured) { if (Tracker.Uses.empty()) { // If it's trivially not captured, mark it nocapture now. - A->addAttr( - AttributeList::get(F->getContext(), A->getArgNo() + 1, B)); + A->addAttr(Attribute::NoCapture); ++NumNoCapture; Changed = true; } else { @@ -660,9 +649,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { Self.insert(&*A); Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self); if (R != Attribute::None) { - AttrBuilder B; - B.addAttribute(R); - A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B)); + A->addAttr(R); Changed = true; R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; } @@ -687,7 +674,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { if (ArgumentSCC[0]->Uses.size() == 1 && ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { Argument *A = ArgumentSCC[0]->Definition; - A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B)); + A->addAttr(Attribute::NoCapture); ++NumNoCapture; Changed = true; } @@ -729,7 +716,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { Argument *A = ArgumentSCC[i]->Definition; - A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B)); + A->addAttr(Attribute::NoCapture); ++NumNoCapture; Changed = true; } @@ -760,15 +747,12 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { } if (ReadAttr != Attribute::None) { - AttrBuilder B, R; - B.addAttribute(ReadAttr); - R.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { Argument *A = ArgumentSCC[i]->Definition; // Clear out existing readonly/readnone attributes - A->removeAttr( - AttributeList::get(A->getContext(), A->getArgNo() + 1, R)); - A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B)); + A->removeAttr(Attribute::ReadOnly); + A->removeAttr(Attribute::ReadNone); + A->addAttr(ReadAttr); ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; Changed = true; } diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index ade4f21ceb524..ae9d4ce11e0db 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -1979,16 +1979,11 @@ static void ChangeCalleesToFastCall(Function *F) { } } -static AttributeList StripNest(LLVMContext &C, const AttributeList &Attrs) { - for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { - unsigned Index = Attrs.getSlotIndex(i); - if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest)) - continue; - - // There can be only one. - return Attrs.removeAttribute(C, Index, Attribute::Nest); - } - +static AttributeList StripNest(LLVMContext &C, AttributeList Attrs) { + // There can be at most one attribute set with a nest attribute. + unsigned NestIndex; + if (Attrs.hasAttrSomewhere(Attribute::Nest, &NestIndex)) + return Attrs.removeAttribute(C, NestIndex, Attribute::Nest); return Attrs; } diff --git a/lib/Transforms/IPO/SampleProfile.cpp b/lib/Transforms/IPO/SampleProfile.cpp index 3371de6e3d147..e755e2bd8f260 100644 --- a/lib/Transforms/IPO/SampleProfile.cpp +++ b/lib/Transforms/IPO/SampleProfile.cpp @@ -43,6 +43,7 @@ #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" +#include "llvm/IR/ValueSymbolTable.h" #include "llvm/Pass.h" #include "llvm/ProfileData/InstrProf.h" #include "llvm/ProfileData/SampleProfReader.h" @@ -208,6 +209,12 @@ protected: /// the same number of times. EquivalenceClassMap EquivalenceClass; + /// Map from function name to Function *. Used to find the function from + /// the function name. If the function name contains suffix, additional + /// entry is added to map from the stripped name to the function if there + /// is one-to-one mapping. + StringMap<Function *> SymbolMap; + /// \brief Dominance, post-dominance and loop information. std::unique_ptr<DominatorTree> DT; std::unique_ptr<DominatorTreeBase<BasicBlock>> PDT; @@ -670,7 +677,7 @@ bool SampleProfileLoader::inlineHotFunctions( for (auto &I : BB.getInstList()) { const FunctionSamples *FS = nullptr; if ((isa<CallInst>(I) || isa<InvokeInst>(I)) && - (FS = findCalleeFunctionSamples(I))) { + !isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(I))) { Candidates.push_back(&I); if (callsiteIsHot(Samples, FS)) Hot = true; @@ -689,7 +696,10 @@ bool SampleProfileLoader::inlineHotFunctions( for (const auto *FS : findIndirectCallFunctionSamples(*I)) { auto CalleeFunctionName = FS->getName(); const char *Reason = "Callee function not available"; - CalledFunction = F.getParent()->getFunction(CalleeFunctionName); + auto R = SymbolMap.find(CalleeFunctionName); + if (R == SymbolMap.end()) + continue; + CalledFunction = R->getValue(); if (CalledFunction && isLegalToPromote(I, CalledFunction, &Reason)) { // The indirect target was promoted and inlined in the profile, as a // result, we do not have profile info for the branch probability. @@ -1181,8 +1191,11 @@ void SampleProfileLoader::propagateWeights(Function &F) { if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) continue; + DebugLoc BranchLoc = TI->getDebugLoc(); DEBUG(dbgs() << "\nGetting weights for branch at line " - << TI->getDebugLoc().getLine() << ".\n"); + << ((BranchLoc) ? Twine(BranchLoc.getLine()) + : Twine("<UNKNOWN LOCATION>")) + << ".\n"); SmallVector<uint32_t, 4> Weights; uint32_t MaxWeight = 0; DebugLoc MaxDestLoc; @@ -1219,7 +1232,6 @@ void SampleProfileLoader::propagateWeights(Function &F) { DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); TI->setMetadata(llvm::LLVMContext::MD_prof, MDB.createBranchWeights(Weights)); - DebugLoc BranchLoc = TI->getDebugLoc(); emitOptimizationRemark( Ctx, DEBUG_TYPE, F, MaxDestLoc, Twine("most popular destination for conditional branches at ") + @@ -1414,6 +1426,26 @@ bool SampleProfileLoader::runOnModule(Module &M) { for (const auto &I : Reader->getProfiles()) TotalCollectedSamples += I.second.getTotalSamples(); + // Populate the symbol map. + for (const auto &N_F : M.getValueSymbolTable()) { + std::string OrigName = N_F.getKey(); + Function *F = dyn_cast<Function>(N_F.getValue()); + if (F == nullptr) + continue; + SymbolMap[OrigName] = F; + auto pos = OrigName.find('.'); + if (pos != std::string::npos) { + std::string NewName = OrigName.substr(0, pos); + auto r = SymbolMap.insert(std::make_pair(NewName, F)); + // Failiing to insert means there is already an entry in SymbolMap, + // thus there are multiple functions that are mapped to the same + // stripped name. In this case of name conflicting, set the value + // to nullptr to avoid confusion. + if (!r.second) + r.first->second = nullptr; + } + } + bool retval = false; for (auto &F : M) if (!F.isDeclaration()) { diff --git a/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp index 65deb82cd2a5f..9801a0a614165 100644 --- a/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp +++ b/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp @@ -363,6 +363,7 @@ void splitAndWriteThinLTOBitcode( W.writeModule(&M, /*ShouldPreserveUseListOrder=*/false, &Index, /*GenerateHash=*/true, &ModHash); W.writeModule(MergedM.get()); + W.writeStrtab(); OS << Buffer; // If a minimized bitcode module was requested for the thin link, @@ -375,6 +376,7 @@ void splitAndWriteThinLTOBitcode( W2.writeModule(&M, /*ShouldPreserveUseListOrder=*/false, &Index, /*GenerateHash=*/false, &ModHash); W2.writeModule(MergedM.get()); + W2.writeStrtab(); *ThinLinkOS << Buffer; } } diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 174ec8036274e..e30a4bafb9b0c 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1044,14 +1044,14 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { const APInt *RHSC; if (match(RHS, m_APInt(RHSC))) { - if (RHSC->isSignBit()) { + if (RHSC->isSignMask()) { // If wrapping is not allowed, then the addition must set the sign bit: - // X + (signbit) --> X | signbit + // X + (signmask) --> X | signmask if (I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) return BinaryOperator::CreateOr(LHS, RHS); // If wrapping is allowed, then the addition flips the sign bit of LHS: - // X + (signbit) --> X ^ signbit + // X + (signmask) --> X ^ signmask return BinaryOperator::CreateXor(LHS, RHS); } @@ -1120,9 +1120,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI), XorLHS); } - // (X + signbit) + C could have gotten canonicalized to (X ^ signbit) + C, - // transform them into (X + (signbit ^ C)) - if (XorRHS->getValue().isSignBit()) + // (X + signmask) + C could have gotten canonicalized to (X^signmask) + C, + // transform them into (X + (signmask ^ C)) + if (XorRHS->getValue().isSignMask()) return BinaryOperator::CreateAdd(XorLHS, ConstantExpr::getXor(XorRHS, CI)); } diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index b2a41c699202a..3a98e8937bda7 100644 --- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2078,7 +2078,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { Value *NOr = Builder->CreateOr(A, Op1); NOr->takeName(Op0); return BinaryOperator::CreateXor(NOr, - cast<Instruction>(Op0)->getOperand(1)); + ConstantInt::get(NOr->getType(), *C)); } // Y|(X^C) -> (X|Y)^C iff Y&C == 0 @@ -2087,7 +2087,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { Value *NOr = Builder->CreateOr(A, Op0); NOr->takeName(Op0); return BinaryOperator::CreateXor(NOr, - cast<Instruction>(Op1)->getOperand(1)); + ConstantInt::get(NOr->getType(), *C)); } } @@ -2480,8 +2480,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI); return BinaryOperator::CreateSub(SubOne(NegOp0CI), Op0I->getOperand(0)); - } else if (RHSC->getValue().isSignBit()) { - // (X + C) ^ signbit -> (X + C + signbit) + } else if (RHSC->getValue().isSignMask()) { + // (X + C) ^ signmask -> (X + C + signmask) Constant *C = Builder->getInt(RHSC->getValue() + Op0CI->getValue()); return BinaryOperator::CreateAdd(Op0I->getOperand(0), C); diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index 69484f47223f7..e7aa1a4573714 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -839,7 +839,8 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0, // Length bits. if (CI0) { APInt Elt = CI0->getValue(); - Elt = Elt.lshr(Index).zextOrTrunc(Length); + Elt.lshrInPlace(Index); + Elt = Elt.zextOrTrunc(Length); return LowConstantHighUndef(Elt.getZExtValue()); } @@ -1036,7 +1037,7 @@ static Value *simplifyX86vpermilvar(const IntrinsicInst &II, // The PD variants uses bit 1 to select per-lane element index, so // shift down to convert to generic shuffle mask index. if (IsPD) - Index = Index.lshr(1); + Index.lshrInPlace(1); // The _256 variants are a bit trickier since the mask bits always index // into the corresponding 128 half. In order to convert to a generic @@ -4067,21 +4068,15 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { } if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && - !CallerPAL.isEmpty()) + !CallerPAL.isEmpty()) { // In this case we have more arguments than the new function type, but we // won't be dropping them. Check that these extra arguments have attributes // that are compatible with being a vararg call argument. - for (unsigned i = CallerPAL.getNumSlots(); i; --i) { - unsigned Index = CallerPAL.getSlotIndex(i - 1); - if (Index <= FT->getNumParams()) - break; - - // Check if it has an attribute that's incompatible with varargs. - AttributeList PAttrs = CallerPAL.getSlotAttributes(i - 1); - if (PAttrs.hasAttribute(Index, Attribute::StructRet)) - return false; - } - + unsigned SRetIdx; + if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && + SRetIdx > FT->getNumParams()) + return false; + } // Okay, we decided that this is a safe thing to do: go ahead and start // inserting cast instructions as necessary. diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index 25683132c7860..9127ddca59150 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1591,7 +1591,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { // GEP into CI would undo canonicalizing addrspacecast with different // pointer types, causing infinite loops. (!isa<AddrSpaceCastInst>(CI) || - GEP->getType() == GEP->getPointerOperand()->getType())) { + GEP->getType() == GEP->getPointerOperandType())) { // Changing the cast operand is usually not a good idea but it is safe // here because the pointer operand is being replaced with another // pointer operand so the opcode doesn't need to change. diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index bbafa9e9f4687..003029ae39d56 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -140,7 +140,7 @@ static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, case ICmpInst::ICMP_UGE: // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc) TrueIfSigned = true; - return RHS.isSignBit(); + return RHS.isSignMask(); default: return false; } @@ -1532,14 +1532,14 @@ Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp, } if (Xor->hasOneUse()) { - // (icmp u/s (xor X SignBit), C) -> (icmp s/u X, (xor C SignBit)) - if (!Cmp.isEquality() && XorC->isSignBit()) { + // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask)) + if (!Cmp.isEquality() && XorC->isSignMask()) { Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() : Cmp.getSignedPredicate(); return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC)); } - // (icmp u/s (xor X ~SignBit), C) -> (icmp s/u X, (xor C ~SignBit)) + // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask)) if (!Cmp.isEquality() && XorC->isMaxSignedValue()) { Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() : Cmp.getSignedPredicate(); @@ -2402,9 +2402,9 @@ Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp, const APInt &Upper = CR.getUpper(); const APInt &Lower = CR.getLower(); if (Cmp.isSigned()) { - if (Lower.isSignBit()) + if (Lower.isSignMask()) return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper)); - if (Upper.isSignBit()) + if (Upper.isSignMask()) return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower)); } else { if (Lower.isMinValue()) @@ -2604,7 +2604,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, break; // Replace (and X, (1 << size(X)-1) != 0) with x s< 0 - if (BOC->isSignBit()) { + if (BOC->isSignMask()) { Constant *Zero = Constant::getNullValue(BOp0->getType()); auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; return new ICmpInst(NewPred, BOp0, Zero); @@ -3032,9 +3032,9 @@ Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) { if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b return new ICmpInst(I.getPredicate(), BO0->getOperand(0), BO1->getOperand(0)); - // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b + // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) { - if (CI->getValue().isSignBit()) { + if (CI->getValue().isSignMask()) { ICmpInst::Predicate Pred = I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); @@ -3797,7 +3797,7 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal, static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth, bool isSignCheck) { if (isSignCheck) - return APInt::getSignBit(BitWidth); + return APInt::getSignMask(BitWidth); ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1)); if (!CI) return APInt::getAllOnesValue(BitWidth); diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 6288e054f1bc5..675553017838b 100644 --- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -931,6 +931,18 @@ static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, return nullptr; } +static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) { + if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { + const Value *GEPI0 = GEPI->getOperand(0); + if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0) + return true; + } + if (isa<UndefValue>(Op) || + (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) + return true; + return false; +} + Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { Value *Op = LI.getOperand(0); @@ -979,27 +991,13 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { if (!LI.isUnordered()) return nullptr; // load(gep null, ...) -> unreachable - if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { - const Value *GEPI0 = GEPI->getOperand(0); - // TODO: Consider a target hook for valid address spaces for this xform. - if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){ - // Insert a new store to null instruction before the load to indicate - // that this code is not reachable. We do this instead of inserting - // an unreachable instruction directly because we cannot modify the - // CFG. - new StoreInst(UndefValue::get(LI.getType()), - Constant::getNullValue(Op->getType()), &LI); - return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); - } - } - // load null/undef -> unreachable - // TODO: Consider a target hook for valid address spaces for this xform. - if (isa<UndefValue>(Op) || - (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) { - // Insert a new store to null instruction before the load to indicate that - // this code is not reachable. We do this instead of inserting an - // unreachable instruction directly because we cannot modify the CFG. + // TODO: Consider a target hook for valid address spaces for this xforms. + if (canSimplifyNullLoadOrGEP(LI, Op)) { + // Insert a new store to null instruction before the load to indicate + // that this code is not reachable. We do this instead of inserting + // an unreachable instruction directly because we cannot modify the + // CFG. new StoreInst(UndefValue::get(LI.getType()), Constant::getNullValue(Op->getType()), &LI); return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index f1ac82057e6cf..ce66581a491a0 100644 --- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -944,22 +944,21 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { } } - if (ConstantInt *One = dyn_cast<ConstantInt>(Op0)) { - if (One->isOne() && !I.getType()->isIntegerTy(1)) { - bool isSigned = I.getOpcode() == Instruction::SDiv; - if (isSigned) { - // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the - // result is one, if Op1 is -1 then the result is minus one, otherwise - // it's zero. - Value *Inc = Builder->CreateAdd(Op1, One); - Value *Cmp = Builder->CreateICmpULT( - Inc, ConstantInt::get(I.getType(), 3)); - return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0)); - } else { - // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the - // result is one, otherwise it's zero. - return new ZExtInst(Builder->CreateICmpEQ(Op1, One), I.getType()); - } + if (match(Op0, m_One())) { + assert(!I.getType()->getScalarType()->isIntegerTy(1) && + "i1 divide not removed?"); + if (I.getOpcode() == Instruction::SDiv) { + // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the + // result is one, if Op1 is -1 then the result is minus one, otherwise + // it's zero. + Value *Inc = Builder->CreateAdd(Op1, Op0); + Value *Cmp = Builder->CreateICmpULT( + Inc, ConstantInt::get(I.getType(), 3)); + return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0)); + } else { + // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the + // result is one, otherwise it's zero. + return new ZExtInst(Builder->CreateICmpEQ(Op1, Op0), I.getType()); } } @@ -1238,25 +1237,23 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { // If the sign bits of both operands are zero (i.e. we can prove they are // unsigned inputs), turn this into a udiv. - if (I.getType()->isIntegerTy()) { - APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits())); - if (MaskedValueIsZero(Op0, Mask, 0, &I)) { - if (MaskedValueIsZero(Op1, Mask, 0, &I)) { - // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set - auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); - BO->setIsExact(I.isExact()); - return BO; - } + APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); + if (MaskedValueIsZero(Op0, Mask, 0, &I)) { + if (MaskedValueIsZero(Op1, Mask, 0, &I)) { + // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set + auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); + BO->setIsExact(I.isExact()); + return BO; + } - if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) { - // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) - // Safe because the only negative value (1 << Y) can take on is - // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have - // the sign bit set. - auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); - BO->setIsExact(I.isExact()); - return BO; - } + if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) { + // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) + // Safe because the only negative value (1 << Y) can take on is + // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have + // the sign bit set. + auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); + BO->setIsExact(I.isExact()); + return BO; } } @@ -1546,13 +1543,11 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) { // If the sign bits of both operands are zero (i.e. we can prove they are // unsigned inputs), turn this into a urem. - if (I.getType()->isIntegerTy()) { - APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits())); - if (MaskedValueIsZero(Op1, Mask, 0, &I) && - MaskedValueIsZero(Op0, Mask, 0, &I)) { - // X srem Y -> X urem Y, iff X and Y don't have sign bit set - return BinaryOperator::CreateURem(Op0, Op1, I.getName()); - } + APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); + if (MaskedValueIsZero(Op1, Mask, 0, &I) && + MaskedValueIsZero(Op0, Mask, 0, &I)) { + // X srem Y -> X urem Y, iff X and Y don't have sign bit set + return BinaryOperator::CreateURem(Op0, Op1, I.getName()); } // If it's a constant vector, flip any negative values positive. diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp index 693b6c95c169c..5d6d899da4b5f 100644 --- a/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -618,7 +618,7 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI, { unsigned BitWidth = DL.getTypeSizeInBits(TrueVal->getType()->getScalarType()); - APInt MinSignedValue = APInt::getSignBit(BitWidth); + APInt MinSignedValue = APInt::getSignedMinValue(BitWidth); Value *X; const APInt *Y, *C; bool TrueWhenUnset; diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp index 9aa679c60e47b..f77d713b9b071 100644 --- a/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -370,7 +370,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1, MaskV <<= Op1C->getZExtValue(); else { assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift"); - MaskV = MaskV.lshr(Op1C->getZExtValue()); + MaskV.lshrInPlace(Op1C->getZExtValue()); } // shift1 & 0x00FF @@ -760,7 +760,7 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) { } // See if we can turn a signed shr into an unsigned shr. - if (MaskedValueIsZero(Op0, APInt::getSignBit(BitWidth), 0, &I)) + if (MaskedValueIsZero(Op0, APInt::getSignMask(BitWidth), 0, &I)) return BinaryOperator::CreateLShr(Op0, Op1); return nullptr; diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 4e6f02058d839..2ba052b7e02d3 100644 --- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -38,7 +38,7 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, // If there are no bits set that aren't demanded, nothing to do. Demanded = Demanded.zextOrTrunc(C->getBitWidth()); - if ((~Demanded & *C) == 0) + if (C->isSubsetOf(Demanded)) return false; // This instruction is producing bits that are not demanded. Shrink the RHS. @@ -117,27 +117,16 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownOne.getBitWidth() == BitWidth && "Value *V, DemandedMask, KnownZero and KnownOne " "must have same BitWidth"); - const APInt *C; - if (match(V, m_APInt(C))) { - // We know all of the bits for a scalar constant or a splat vector constant! - KnownOne = *C & DemandedMask; - KnownZero = ~KnownOne & DemandedMask; - return nullptr; - } - if (isa<ConstantPointerNull>(V)) { - // We know all of the bits for a constant! - KnownOne.clearAllBits(); - KnownZero = DemandedMask; + + if (isa<Constant>(V)) { + computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI); return nullptr; } KnownZero.clearAllBits(); KnownOne.clearAllBits(); - if (DemandedMask == 0) { // Not demanding any bits from V. - if (isa<UndefValue>(V)) - return nullptr; + if (DemandedMask == 0) // Not demanding any bits from V. return UndefValue::get(VTy); - } if (Depth == 6) // Limit search depth. return nullptr; @@ -187,16 +176,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) return Constant::getIntegerValue(VTy, IKnownOne); // If all of the demanded bits are known 1 on one side, return the other. // These bits cannot contribute to the result of the 'and'. - if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) == - (DemandedMask & ~LHSKnownZero)) + if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne)) return I->getOperand(0); - if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) == - (DemandedMask & ~RHSKnownZero)) + if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne)) return I->getOperand(1); // If the RHS is a constant, see if we can simplify it. @@ -224,25 +211,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) return Constant::getIntegerValue(VTy, IKnownOne); // If all of the demanded bits are known zero on one side, return the other. // These bits cannot contribute to the result of the 'or'. - if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) == - (DemandedMask & ~LHSKnownOne)) + if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero)) return I->getOperand(0); - if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) == - (DemandedMask & ~RHSKnownOne)) - return I->getOperand(1); - - // If all of the potentially set bits on one side are known to be set on - // the other side, just use the 'other' side. - if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) == - (DemandedMask & (~RHSKnownZero))) - return I->getOperand(0); - if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) == - (DemandedMask & (~LHSKnownZero))) + if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero)) return I->getOperand(1); // If the RHS is a constant, see if we can simplify it. @@ -271,20 +247,20 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) return Constant::getIntegerValue(VTy, IKnownOne); // If all of the demanded bits are known zero on one side, return the other. // These bits cannot contribute to the result of the 'xor'. - if ((DemandedMask & RHSKnownZero) == DemandedMask) + if (DemandedMask.isSubsetOf(RHSKnownZero)) return I->getOperand(0); - if ((DemandedMask & LHSKnownZero) == DemandedMask) + if (DemandedMask.isSubsetOf(LHSKnownZero)) return I->getOperand(1); // If all of the demanded bits are known to be zero on one side or the // other, turn this into an *inclusive* or. // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 - if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) { + if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownZero)) { Instruction *Or = BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1), I->getName()); @@ -295,14 +271,12 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // bits on that side are also known to be set on the other side, turn this // into an AND, as we know the bits will be cleared. // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 - if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) { - // all known - if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) { - Constant *AndC = Constant::getIntegerValue(VTy, - ~RHSKnownOne & DemandedMask); - Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC); - return InsertNewInstWith(And, *I); - } + if (DemandedMask.isSubsetOf(RHSKnownZero|RHSKnownOne) && + RHSKnownOne.isSubsetOf(LHSKnownOne)) { + Constant *AndC = Constant::getIntegerValue(VTy, + ~RHSKnownOne & DemandedMask); + Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC); + return InsertNewInstWith(And, *I); } // If the RHS is a constant, see if we can simplify it. @@ -529,9 +503,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownZero.setLowBits(ShiftAmt); } break; - case Instruction::LShr: - // For a logical shift right - if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { + case Instruction::LShr: { + const APInt *SA; + if (match(I->getOperand(1), m_APInt(SA))) { uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); // Unsigned shift right. @@ -546,13 +520,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, Depth + 1)) return I; assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); - KnownZero = KnownZero.lshr(ShiftAmt); - KnownOne = KnownOne.lshr(ShiftAmt); + KnownZero.lshrInPlace(ShiftAmt); + KnownOne.lshrInPlace(ShiftAmt); if (ShiftAmt) KnownZero.setHighBits(ShiftAmt); // high bits known zero. } break; - case Instruction::AShr: + } + case Instruction::AShr: { // If this is an arithmetic shift right and only the low-bit is set, we can // always convert this into a logical shr, even if the shift amount is // variable. The low bit of the shift cannot be an input sign bit unless @@ -566,15 +541,16 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the sign bit is the only bit demanded by this ashr, then there is no // need to do it, the shift doesn't change the high bit. - if (DemandedMask.isSignBit()) + if (DemandedMask.isSignMask()) return I->getOperand(0); - if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { + const APInt *SA; + if (match(I->getOperand(1), m_APInt(SA))) { uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1); // Signed shift right. APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); - // If any of the "high bits" are demanded, we should set the sign bit as + // If any of the high bits are demanded, we should set the sign bit as // demanded. if (DemandedMask.countLeadingZeros() <= ShiftAmt) DemandedMaskIn.setSignBit(); @@ -587,31 +563,32 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne, Depth + 1)) return I; + assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); // Compute the new bits that are at the top now. APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); - KnownZero = KnownZero.lshr(ShiftAmt); - KnownOne = KnownOne.lshr(ShiftAmt); + KnownZero.lshrInPlace(ShiftAmt); + KnownOne.lshrInPlace(ShiftAmt); // Handle the sign bits. - APInt SignBit(APInt::getSignBit(BitWidth)); + APInt SignMask(APInt::getSignMask(BitWidth)); // Adjust to where it is now in the mask. - SignBit = SignBit.lshr(ShiftAmt); + SignMask.lshrInPlace(ShiftAmt); // If the input sign bit is known to be zero, or if none of the top bits // are demanded, turn this into an unsigned shift right. if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] || (HighBits & ~DemandedMask) == HighBits) { - // Perform the logical shift right. - BinaryOperator *NewVal = BinaryOperator::CreateLShr(I->getOperand(0), - SA, I->getName()); - NewVal->setIsExact(cast<BinaryOperator>(I)->isExact()); - return InsertNewInstWith(NewVal, *I); - } else if ((KnownOne & SignBit) != 0) { // New bits are known one. + BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0), + I->getOperand(1)); + LShr->setIsExact(cast<BinaryOperator>(I)->isExact()); + return InsertNewInstWith(LShr, *I); + } else if ((KnownOne & SignMask) != 0) { // New bits are known one. KnownOne |= HighBits; } } break; + } case Instruction::SRem: if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { // X % -1 demands all the bits because we don't want to introduce @@ -624,7 +601,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, return I->getOperand(0); APInt LowBits = RA - 1; - APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); + APInt Mask2 = LowBits | APInt::getSignMask(BitWidth); if (SimplifyDemandedBits(I, 0, Mask2, LHSKnownZero, LHSKnownOne, Depth + 1)) return I; @@ -635,26 +612,26 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If LHS is non-negative or has all low bits zero, then the upper bits // are all zero. - if (LHSKnownZero.isNegative() || ((LHSKnownZero & LowBits) == LowBits)) + if (LHSKnownZero.isSignBitSet() || ((LHSKnownZero & LowBits) == LowBits)) KnownZero |= ~LowBits; // If LHS is negative and not all low bits are zero, then the upper bits // are all one. - if (LHSKnownOne.isNegative() && ((LHSKnownOne & LowBits) != 0)) + if (LHSKnownOne.isSignBitSet() && ((LHSKnownOne & LowBits) != 0)) KnownOne |= ~LowBits; assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); + break; } } // The sign bit is the LHS's sign bit, except when the result of the // remainder is zero. - if (DemandedMask.isNegative() && KnownZero.isNonNegative()) { - APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); + if (DemandedMask.isSignBitSet()) { computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, CxtI); // If it's known zero, our sign bit is also zero. - if (LHSKnownZero.isNegative()) + if (LHSKnownZero.isSignBitSet()) KnownZero.setSignBit(); } break; @@ -744,7 +721,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(KnownZero|KnownOne)) return Constant::getIntegerValue(VTy, KnownOne); return nullptr; } @@ -783,17 +760,15 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) return Constant::getIntegerValue(ITy, IKnownOne); // If all of the demanded bits are known 1 on one side, return the other. // These bits cannot contribute to the result of the 'and' in this // context. - if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) == - (DemandedMask & ~LHSKnownZero)) + if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne)) return I->getOperand(0); - if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) == - (DemandedMask & ~RHSKnownZero)) + if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne)) return I->getOperand(1); KnownZero = std::move(IKnownZero); @@ -817,26 +792,15 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) return Constant::getIntegerValue(ITy, IKnownOne); // If all of the demanded bits are known zero on one side, return the // other. These bits cannot contribute to the result of the 'or' in this // context. - if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) == - (DemandedMask & ~LHSKnownOne)) + if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero)) return I->getOperand(0); - if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) == - (DemandedMask & ~RHSKnownOne)) - return I->getOperand(1); - - // If all of the potentially set bits on one side are known to be set on - // the other side, just use the 'other' side. - if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) == - (DemandedMask & (~RHSKnownZero))) - return I->getOperand(0); - if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) == - (DemandedMask & (~LHSKnownZero))) + if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero)) return I->getOperand(1); KnownZero = std::move(IKnownZero); @@ -861,14 +825,14 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) return Constant::getIntegerValue(ITy, IKnownOne); // If all of the demanded bits are known zero on one side, return the // other. - if ((DemandedMask & RHSKnownZero) == DemandedMask) + if (DemandedMask.isSubsetOf(RHSKnownZero)) return I->getOperand(0); - if ((DemandedMask & LHSKnownZero) == DemandedMask) + if (DemandedMask.isSubsetOf(LHSKnownZero)) return I->getOperand(1); // Output known-0 bits are known if clear or set in both the LHS & RHS. @@ -883,7 +847,7 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I, // If this user is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) + if (DemandedMask.isSubsetOf(KnownZero|KnownOne)) return Constant::getIntegerValue(ITy, KnownOne); break; @@ -1641,7 +1605,52 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, UndefElts.setHighBits(VWidth / 2); break; case Intrinsic::amdgcn_buffer_load: - case Intrinsic::amdgcn_buffer_load_format: { + case Intrinsic::amdgcn_buffer_load_format: + case Intrinsic::amdgcn_image_sample: + case Intrinsic::amdgcn_image_sample_cl: + case Intrinsic::amdgcn_image_sample_d: + case Intrinsic::amdgcn_image_sample_d_cl: + case Intrinsic::amdgcn_image_sample_l: + case Intrinsic::amdgcn_image_sample_b: + case Intrinsic::amdgcn_image_sample_b_cl: + case Intrinsic::amdgcn_image_sample_lz: + case Intrinsic::amdgcn_image_sample_cd: + case Intrinsic::amdgcn_image_sample_cd_cl: + + case Intrinsic::amdgcn_image_sample_c: + case Intrinsic::amdgcn_image_sample_c_cl: + case Intrinsic::amdgcn_image_sample_c_d: + case Intrinsic::amdgcn_image_sample_c_d_cl: + case Intrinsic::amdgcn_image_sample_c_l: + case Intrinsic::amdgcn_image_sample_c_b: + case Intrinsic::amdgcn_image_sample_c_b_cl: + case Intrinsic::amdgcn_image_sample_c_lz: + case Intrinsic::amdgcn_image_sample_c_cd: + case Intrinsic::amdgcn_image_sample_c_cd_cl: + + case Intrinsic::amdgcn_image_sample_o: + case Intrinsic::amdgcn_image_sample_cl_o: + case Intrinsic::amdgcn_image_sample_d_o: + case Intrinsic::amdgcn_image_sample_d_cl_o: + case Intrinsic::amdgcn_image_sample_l_o: + case Intrinsic::amdgcn_image_sample_b_o: + case Intrinsic::amdgcn_image_sample_b_cl_o: + case Intrinsic::amdgcn_image_sample_lz_o: + case Intrinsic::amdgcn_image_sample_cd_o: + case Intrinsic::amdgcn_image_sample_cd_cl_o: + + case Intrinsic::amdgcn_image_sample_c_o: + case Intrinsic::amdgcn_image_sample_c_cl_o: + case Intrinsic::amdgcn_image_sample_c_d_o: + case Intrinsic::amdgcn_image_sample_c_d_cl_o: + case Intrinsic::amdgcn_image_sample_c_l_o: + case Intrinsic::amdgcn_image_sample_c_b_o: + case Intrinsic::amdgcn_image_sample_c_b_cl_o: + case Intrinsic::amdgcn_image_sample_c_lz_o: + case Intrinsic::amdgcn_image_sample_c_cd_o: + case Intrinsic::amdgcn_image_sample_c_cd_cl_o: + + case Intrinsic::amdgcn_image_getlod: { if (VWidth == 1 || !DemandedElts.isMask()) return nullptr; @@ -1656,8 +1665,17 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, Type *NewTy = (NewNumElts == 1) ? EltTy : VectorType::get(EltTy, NewNumElts); - Function *NewIntrin = Intrinsic::getDeclaration(M, II->getIntrinsicID(), - NewTy); + auto IID = II->getIntrinsicID(); + + bool IsBuffer = IID == Intrinsic::amdgcn_buffer_load || + IID == Intrinsic::amdgcn_buffer_load_format; + + Function *NewIntrin = IsBuffer ? + Intrinsic::getDeclaration(M, IID, NewTy) : + // Samplers have 3 mangled types. + Intrinsic::getDeclaration(M, IID, + { NewTy, II->getArgOperand(0)->getType(), + II->getArgOperand(1)->getType()}); SmallVector<Value *, 5> Args; for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I) @@ -1669,6 +1687,29 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, CallInst *NewCall = Builder->CreateCall(NewIntrin, Args); NewCall->takeName(II); NewCall->copyMetadata(*II); + + if (!IsBuffer) { + ConstantInt *DMask = dyn_cast<ConstantInt>(NewCall->getArgOperand(3)); + if (DMask) { + unsigned DMaskVal = DMask->getZExtValue() & 0xf; + + unsigned PopCnt = 0; + unsigned NewDMask = 0; + for (unsigned I = 0; I < 4; ++I) { + const unsigned Bit = 1 << I; + if (!!(DMaskVal & Bit)) { + if (++PopCnt > NewNumElts) + break; + + NewDMask |= Bit; + } + } + + NewCall->setArgOperand(3, ConstantInt::get(DMask->getType(), NewDMask)); + } + } + + if (NewNumElts == 1) { return Builder->CreateInsertElement(UndefValue::get(V->getType()), NewCall, static_cast<uint64_t>(0)); diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index 88ef17bbc8fa6..81f2d9fa179f9 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -148,9 +148,9 @@ static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) { bool Overflow = false; if (Opcode == Instruction::Add) - BVal->sadd_ov(*CVal, Overflow); + (void)BVal->sadd_ov(*CVal, Overflow); else - BVal->ssub_ov(*CVal, Overflow); + (void)BVal->ssub_ov(*CVal, Overflow); return !Overflow; } diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 94cfc69ed5551..036dd8d39a085 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -2586,7 +2586,7 @@ void FunctionStackPoisoner::processStaticAllocas() { Value *NewAllocaPtr = IRB.CreateIntToPtr( IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), AI->getType()); - replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true); + replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/false); AI->replaceAllUsesWith(NewAllocaPtr); } diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp index fa0c7cc5a4c53..8bdd917a05966 100644 --- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -59,13 +59,8 @@ using namespace llvm; static const char *const SanCovModuleInitName = "__sanitizer_cov_module_init"; static const char *const SanCovName = "__sanitizer_cov"; static const char *const SanCovWithCheckName = "__sanitizer_cov_with_check"; -static const char *const SanCovIndirCallName = "__sanitizer_cov_indir_call16"; static const char *const SanCovTracePCIndirName = "__sanitizer_cov_trace_pc_indir"; -static const char *const SanCovTraceEnterName = - "__sanitizer_cov_trace_func_enter"; -static const char *const SanCovTraceBBName = - "__sanitizer_cov_trace_basic_block"; static const char *const SanCovTracePCName = "__sanitizer_cov_trace_pc"; static const char *const SanCovTraceCmp1 = "__sanitizer_cov_trace_cmp1"; static const char *const SanCovTraceCmp2 = "__sanitizer_cov_trace_cmp2"; @@ -86,8 +81,7 @@ static const char *const SanCovTracePCGuardInitName = static cl::opt<int> ClCoverageLevel( "sanitizer-coverage-level", cl::desc("Sanitizer Coverage. 0: none, 1: entry block, 2: all blocks, " - "3: all blocks and critical edges, " - "4: above plus indirect calls"), + "3: all blocks and critical edges"), cl::Hidden, cl::init(0)); static cl::opt<unsigned> ClCoverageBlockThreshold( @@ -96,12 +90,6 @@ static cl::opt<unsigned> ClCoverageBlockThreshold( " more than this number of blocks."), cl::Hidden, cl::init(0)); -static cl::opt<bool> - ClExperimentalTracing("sanitizer-coverage-experimental-tracing", - cl::desc("Experimental basic-block tracing: insert " - "callbacks at every basic block"), - cl::Hidden, cl::init(false)); - static cl::opt<bool> ClExperimentalTracePC("sanitizer-coverage-trace-pc", cl::desc("Experimental pc tracing"), cl::Hidden, cl::init(false)); @@ -128,16 +116,6 @@ static cl::opt<bool> cl::desc("Reduce the number of instrumented blocks"), cl::Hidden, cl::init(true)); -// Experimental 8-bit counters used as an additional search heuristic during -// coverage-guided fuzzing. -// The counters are not thread-friendly: -// - contention on these counters may cause significant slowdown; -// - the counter updates are racy and the results may be inaccurate. -// They are also inaccurate due to 8-bit integer overflow. -static cl::opt<bool> ClUse8bitCounters("sanitizer-coverage-8bit-counters", - cl::desc("Experimental 8-bit counters"), - cl::Hidden, cl::init(false)); - namespace { SanitizerCoverageOptions getOptions(int LegacyCoverageLevel) { @@ -168,11 +146,9 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) { SanitizerCoverageOptions CLOpts = getOptions(ClCoverageLevel); Options.CoverageType = std::max(Options.CoverageType, CLOpts.CoverageType); Options.IndirectCalls |= CLOpts.IndirectCalls; - Options.TraceBB |= ClExperimentalTracing; Options.TraceCmp |= ClCMPTracing; Options.TraceDiv |= ClDIVTracing; Options.TraceGep |= ClGEPTracing; - Options.Use8bitCounters |= ClUse8bitCounters; Options.TracePC |= ClExperimentalTracePC; Options.TracePCGuard |= ClTracePCGuard; return Options; @@ -212,16 +188,15 @@ private: bool UseCalls); unsigned NumberOfInstrumentedBlocks() { return SanCovFunction->getNumUses() + - SanCovWithCheckFunction->getNumUses() + SanCovTraceBB->getNumUses() + - SanCovTraceEnter->getNumUses(); + SanCovWithCheckFunction->getNumUses(); } StringRef getSanCovTracePCGuardSection() const; StringRef getSanCovTracePCGuardSectionStart() const; StringRef getSanCovTracePCGuardSectionEnd() const; Function *SanCovFunction; Function *SanCovWithCheckFunction; - Function *SanCovIndirCallFunction, *SanCovTracePCIndir; - Function *SanCovTraceEnter, *SanCovTraceBB, *SanCovTracePC, *SanCovTracePCGuard; + Function *SanCovTracePCIndir; + Function *SanCovTracePC, *SanCovTracePCGuard; Function *SanCovTraceCmpFunction[4]; Function *SanCovTraceDivFunction[2]; Function *SanCovTraceGepFunction; @@ -235,7 +210,6 @@ private: GlobalVariable *GuardArray; GlobalVariable *FunctionGuardArray; // for trace-pc-guard. - GlobalVariable *EightBitCounterArray; bool HasSancovGuardsSection; SanitizerCoverageOptions Options; @@ -267,9 +241,6 @@ bool SanitizerCoverageModule::runOnModule(Module &M) { M.getOrInsertFunction(SanCovWithCheckName, VoidTy, Int32PtrTy)); SanCovTracePCIndir = checkSanitizerInterfaceFunction( M.getOrInsertFunction(SanCovTracePCIndirName, VoidTy, IntptrTy)); - SanCovIndirCallFunction = - checkSanitizerInterfaceFunction(M.getOrInsertFunction( - SanCovIndirCallName, VoidTy, IntptrTy, IntptrTy)); SanCovTraceCmpFunction[0] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( SanCovTraceCmp1, VoidTy, IRB.getInt8Ty(), IRB.getInt8Ty())); @@ -305,24 +276,15 @@ bool SanitizerCoverageModule::runOnModule(Module &M) { M.getOrInsertFunction(SanCovTracePCName, VoidTy)); SanCovTracePCGuard = checkSanitizerInterfaceFunction(M.getOrInsertFunction( SanCovTracePCGuardName, VoidTy, Int32PtrTy)); - SanCovTraceEnter = checkSanitizerInterfaceFunction( - M.getOrInsertFunction(SanCovTraceEnterName, VoidTy, Int32PtrTy)); - SanCovTraceBB = checkSanitizerInterfaceFunction( - M.getOrInsertFunction(SanCovTraceBBName, VoidTy, Int32PtrTy)); // At this point we create a dummy array of guards because we don't // know how many elements we will need. Type *Int32Ty = IRB.getInt32Ty(); - Type *Int8Ty = IRB.getInt8Ty(); if (!Options.TracePCGuard) GuardArray = new GlobalVariable(M, Int32Ty, false, GlobalValue::ExternalLinkage, nullptr, "__sancov_gen_cov_tmp"); - if (Options.Use8bitCounters) - EightBitCounterArray = - new GlobalVariable(M, Int8Ty, false, GlobalVariable::ExternalLinkage, - nullptr, "__sancov_gen_cov_tmp"); for (auto &F : M) runOnFunction(F); @@ -344,20 +306,6 @@ bool SanitizerCoverageModule::runOnModule(Module &M) { GuardArray->eraseFromParent(); } - GlobalVariable *RealEightBitCounterArray; - if (Options.Use8bitCounters) { - // Make sure the array is 16-aligned. - static const int CounterAlignment = 16; - Type *Int8ArrayNTy = ArrayType::get(Int8Ty, alignTo(N, CounterAlignment)); - RealEightBitCounterArray = new GlobalVariable( - M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage, - Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter"); - RealEightBitCounterArray->setAlignment(CounterAlignment); - EightBitCounterArray->replaceAllUsesWith( - IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy)); - EightBitCounterArray->eraseFromParent(); - } - // Create variable for module (compilation unit) name Constant *ModNameStrConst = ConstantDataArray::getString(M.getContext(), M.getName(), true); @@ -396,10 +344,7 @@ bool SanitizerCoverageModule::runOnModule(Module &M) { M, SanCovModuleCtorName, SanCovModuleInitName, {Int32PtrTy, IntptrTy, Int8PtrTy, Int8PtrTy}, {IRB.CreatePointerCast(RealGuardArray, Int32PtrTy), - ConstantInt::get(IntptrTy, N), - Options.Use8bitCounters - ? IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy) - : Constant::getNullValue(Int8PtrTy), + ConstantInt::get(IntptrTy, N), Constant::getNullValue(Int8PtrTy), IRB.CreatePointerCast(ModuleName, Int8PtrTy)}); appendToGlobalCtors(M, CtorFunc, SanCtorAndDtorPriority); @@ -566,26 +511,15 @@ void SanitizerCoverageModule::InjectCoverageForIndirectCalls( Function &F, ArrayRef<Instruction *> IndirCalls) { if (IndirCalls.empty()) return; - const int CacheSize = 16; - const int CacheAlignment = 64; // Align for better performance. - Type *Ty = ArrayType::get(IntptrTy, CacheSize); + if (!Options.TracePC && !Options.TracePCGuard) + return; for (auto I : IndirCalls) { IRBuilder<> IRB(I); CallSite CS(I); Value *Callee = CS.getCalledValue(); if (isa<InlineAsm>(Callee)) continue; - GlobalVariable *CalleeCache = new GlobalVariable( - *F.getParent(), Ty, false, GlobalValue::PrivateLinkage, - Constant::getNullValue(Ty), "__sancov_gen_callee_cache"); - CalleeCache->setAlignment(CacheAlignment); - if (Options.TracePC || Options.TracePCGuard) - IRB.CreateCall(SanCovTracePCIndir, - IRB.CreatePointerCast(Callee, IntptrTy)); - else - IRB.CreateCall(SanCovIndirCallFunction, - {IRB.CreatePointerCast(Callee, IntptrTy), - IRB.CreatePointerCast(CalleeCache, IntptrTy)}); + IRB.CreateCall(SanCovTracePCIndir, IRB.CreatePointerCast(Callee, IntptrTy)); } } @@ -735,9 +669,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB, IRB.CreatePointerCast(GuardArray, IntptrTy), ConstantInt::get(IntptrTy, (1 + NumberOfInstrumentedBlocks()) * 4)); GuardP = IRB.CreateIntToPtr(GuardP, Int32PtrTy); - if (Options.TraceBB) { - IRB.CreateCall(IsEntryBB ? SanCovTraceEnter : SanCovTraceBB, GuardP); - } else if (UseCalls) { + if (UseCalls) { IRB.CreateCall(SanCovWithCheckFunction, GuardP); } else { LoadInst *Load = IRB.CreateLoad(GuardP); @@ -755,19 +687,6 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB, IRB.CreateCall(EmptyAsm, {}); // Avoids callback merge. } } - - if (Options.Use8bitCounters) { - IRB.SetInsertPoint(&*IP); - Value *P = IRB.CreateAdd( - IRB.CreatePointerCast(EightBitCounterArray, IntptrTy), - ConstantInt::get(IntptrTy, NumberOfInstrumentedBlocks() - 1)); - P = IRB.CreateIntToPtr(P, IRB.getInt8PtrTy()); - LoadInst *LI = IRB.CreateLoad(P); - Value *Inc = IRB.CreateAdd(LI, ConstantInt::get(IRB.getInt8Ty(), 1)); - StoreInst *SI = IRB.CreateStore(Inc, P); - SetNoSanitizeMetadata(LI); - SetNoSanitizeMetadata(SI); - } } StringRef SanitizerCoverageModule::getSanCovTracePCGuardSection() const { diff --git a/lib/Transforms/Scalar/GVNHoist.cpp b/lib/Transforms/Scalar/GVNHoist.cpp index 6adfe130d148b..b7514a6d57931 100644 --- a/lib/Transforms/Scalar/GVNHoist.cpp +++ b/lib/Transforms/Scalar/GVNHoist.cpp @@ -45,6 +45,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/MemorySSA.h" #include "llvm/Analysis/MemorySSAUpdater.h" #include "llvm/Analysis/ValueTracking.h" @@ -1010,6 +1011,7 @@ public: AU.addRequired<MemorySSAWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<MemorySSAWrapperPass>(); + AU.addPreserved<GlobalsAAWrapperPass>(); } }; } // namespace @@ -1026,6 +1028,7 @@ PreservedAnalyses GVNHoistPass::run(Function &F, FunctionAnalysisManager &AM) { PreservedAnalyses PA; PA.preserve<DominatorTreeAnalysis>(); PA.preserve<MemorySSAAnalysis>(); + PA.preserve<GlobalsAA>(); return PA; } diff --git a/lib/Transforms/Scalar/LoopLoadElimination.cpp b/lib/Transforms/Scalar/LoopLoadElimination.cpp index cf63cb660db8c..20b37c4b70e6d 100644 --- a/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -197,8 +197,7 @@ public: continue; // Only progagate the value if they are of the same type. - if (Store->getPointerOperand()->getType() != - Load->getPointerOperand()->getType()) + if (Store->getPointerOperandType() != Load->getPointerOperandType()) continue; Candidates.emplace_front(Load, Store); diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp index 86058fe0b1aaa..fd15a9014def5 100644 --- a/lib/Transforms/Scalar/LoopRerollPass.cpp +++ b/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -557,7 +557,7 @@ bool LoopReroll::isLoopControlIV(Loop *L, Instruction *IV) { Instruction *UUser = dyn_cast<Instruction>(UU); // Skip SExt if we are extending an nsw value // TODO: Allow ZExt too - if (BO->hasNoSignedWrap() && UUser && UUser->getNumUses() == 1 && + if (BO->hasNoSignedWrap() && UUser && UUser->hasOneUse() && isa<SExtInst>(UUser)) UUser = dyn_cast<Instruction>(*(UUser->user_begin())); if (!isCompareUsedByBranch(UUser)) @@ -852,7 +852,7 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) { for (auto &KV : Roots) { if (KV.first == 0) continue; - if (KV.second->getNumUses() != NumBaseUses) { + if (!KV.second->hasNUses(NumBaseUses)) { DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: " << "#Base=" << NumBaseUses << ", #Root=" << KV.second->getNumUses() << "\n"); @@ -867,7 +867,7 @@ void LoopReroll::DAGRootTracker:: findRootsRecursive(Instruction *I, SmallInstructionSet SubsumedInsts) { // Does the user look like it could be part of a root set? // All its users must be simple arithmetic ops. - if (I->getNumUses() > IL_MaxRerollIterations) + if (I->hasNUsesOrMore(IL_MaxRerollIterations + 1)) return; if (I != IV && findRootsBase(I, SubsumedInsts)) diff --git a/lib/Transforms/Scalar/NewGVN.cpp b/lib/Transforms/Scalar/NewGVN.cpp index 3d8ce888867ea..a014ddd9ba0a4 100644 --- a/lib/Transforms/Scalar/NewGVN.cpp +++ b/lib/Transforms/Scalar/NewGVN.cpp @@ -138,7 +138,8 @@ PHIExpression::~PHIExpression() = default; // It also wants to hand us SCC's that are unrelated to the phi node we ask // about, and have us process them there or risk redoing work. // Graph traits over a filter iterator also doesn't work that well here. -// This SCC finder is specialized to walk use-def chains, and only follows instructions, +// This SCC finder is specialized to walk use-def chains, and only follows +// instructions, // not generic values (arguments, etc). struct TarjanSCC { @@ -170,8 +171,10 @@ private: Root[I] = std::min(Root.lookup(I), Root.lookup(Op)); } } - // See if we really were the root of a component, by seeing if we still have our DFSNumber. - // If we do, we are the root of the component, and we have completed a component. If we do not, + // See if we really were the root of a component, by seeing if we still have + // our DFSNumber. + // If we do, we are the root of the component, and we have completed a + // component. If we do not, // we are not the root of a component, and belong on the component stack. if (Root.lookup(I) == OurDFS) { unsigned ComponentID = Components.size(); @@ -2254,12 +2257,13 @@ void NewGVN::initializeCongruenceClasses(Function &F) { MemoryAccessToClass[MSSA->getLiveOnEntryDef()] = createMemoryClass(MSSA->getLiveOnEntryDef()); - for (auto &B : F) { + for (auto DTN : nodes(DT)) { + BasicBlock *BB = DTN->getBlock(); // All MemoryAccesses are equivalent to live on entry to start. They must // be initialized to something so that initial changes are noticed. For // the maximal answer, we initialize them all to be the same as // liveOnEntry. - auto *MemoryBlockDefs = MSSA->getBlockDefs(&B); + auto *MemoryBlockDefs = MSSA->getBlockDefs(BB); if (MemoryBlockDefs) for (const auto &Def : *MemoryBlockDefs) { MemoryAccessToClass[&Def] = TOPClass; @@ -2274,7 +2278,7 @@ void NewGVN::initializeCongruenceClasses(Function &F) { if (MD && isa<StoreInst>(MD->getMemoryInst())) TOPClass->incStoreCount(); } - for (auto &I : B) { + for (auto &I : *BB) { // Don't insert void terminators into the class. We don't value number // them, and they just end up sitting in TOP. if (isa<TerminatorInst>(I) && I.getType()->isVoidTy()) @@ -2518,14 +2522,11 @@ void NewGVN::verifyMemoryCongruency() const { auto ReachableAccessPred = [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) { bool Result = ReachableBlocks.count(Pair.first->getBlock()); - if (!Result) + if (!Result || MSSA->isLiveOnEntryDef(Pair.first) || + MemoryToDFSNum(Pair.first) == 0) return false; - if (MSSA->isLiveOnEntryDef(Pair.first)) - return true; if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first)) return !isInstructionTriviallyDead(MemDef->getMemoryInst()); - if (MemoryToDFSNum(Pair.first) == 0) - return false; return true; }; @@ -2719,25 +2720,13 @@ bool NewGVN::runGVN() { } // Now a standard depth first ordering of the domtree is equivalent to RPO. - auto DFI = df_begin(DT->getRootNode()); - for (auto DFE = df_end(DT->getRootNode()); DFI != DFE; ++DFI) { - BasicBlock *B = DFI->getBlock(); + for (auto DTN : depth_first(DT->getRootNode())) { + BasicBlock *B = DTN->getBlock(); const auto &BlockRange = assignDFSNumbers(B, ICount); BlockInstRange.insert({B, BlockRange}); ICount += BlockRange.second - BlockRange.first; } - // Handle forward unreachable blocks and figure out which blocks - // have single preds. - for (auto &B : F) { - // Assign numbers to unreachable blocks. - if (!DFI.nodeVisited(DT->getNode(&B))) { - const auto &BlockRange = assignDFSNumbers(&B, ICount); - BlockInstRange.insert({&B, BlockRange}); - ICount += BlockRange.second - BlockRange.first; - } - } - TouchedInstructions.resize(ICount); // Ensure we don't end up resizing the expressionToClass map, as // that can be quite expensive. At most, we have one expression per diff --git a/lib/Transforms/Scalar/StructurizeCFG.cpp b/lib/Transforms/Scalar/StructurizeCFG.cpp index 49ce0262c97b0..659353e912fe0 100644 --- a/lib/Transforms/Scalar/StructurizeCFG.cpp +++ b/lib/Transforms/Scalar/StructurizeCFG.cpp @@ -352,10 +352,20 @@ Value *StructurizeCFG::invert(Value *Condition) { if (Instruction *Inst = dyn_cast<Instruction>(Condition)) { // Third: Check all the users for an invert BasicBlock *Parent = Inst->getParent(); - for (User *U : Condition->users()) - if (Instruction *I = dyn_cast<Instruction>(U)) + for (User *U : Condition->users()) { + if (Instruction *I = dyn_cast<Instruction>(U)) { if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition)))) return I; + } + } + + // Avoid creating a new instruction in the common case of a compare. + if (CmpInst *Cmp = dyn_cast<CmpInst>(Inst)) { + if (Cmp->hasOneUse()) { + Cmp->setPredicate(Cmp->getInversePredicate()); + return Cmp; + } + } // Last option: Create a new instruction return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator()); diff --git a/lib/Transforms/Utils/CmpInstAnalysis.cpp b/lib/Transforms/Utils/CmpInstAnalysis.cpp index 60ae3745c8357..9f4d9c7e39810 100644 --- a/lib/Transforms/Utils/CmpInstAnalysis.cpp +++ b/lib/Transforms/Utils/CmpInstAnalysis.cpp @@ -73,17 +73,17 @@ bool llvm::decomposeBitTestICmp(const ICmpInst *I, CmpInst::Predicate &Pred, default: return false; case ICmpInst::ICMP_SLT: - // X < 0 is equivalent to (X & SignBit) != 0. + // X < 0 is equivalent to (X & SignMask) != 0. if (!C->isZero()) return false; - Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth())); + Y = ConstantInt::get(I->getContext(), APInt::getSignMask(C->getBitWidth())); Pred = ICmpInst::ICMP_NE; break; case ICmpInst::ICMP_SGT: - // X > -1 is equivalent to (X & SignBit) == 0. + // X > -1 is equivalent to (X & SignMask) == 0. if (!C->isAllOnesValue()) return false; - Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth())); + Y = ConstantInt::get(I->getContext(), APInt::getSignMask(C->getBitWidth())); Pred = ICmpInst::ICMP_EQ; break; case ICmpInst::ICMP_ULT: diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp index 644d93b727b3d..82552684b832f 100644 --- a/lib/Transforms/Utils/CodeExtractor.cpp +++ b/lib/Transforms/Utils/CodeExtractor.cpp @@ -112,24 +112,6 @@ buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs) { return buildExtractionBlockSet(BBs.begin(), BBs.end()); } -/// \brief Helper to call buildExtractionBlockSet with a RegionNode. -static SetVector<BasicBlock *> -buildExtractionBlockSet(const RegionNode &RN) { - if (!RN.isSubRegion()) - // Just a single BasicBlock. - return buildExtractionBlockSet(RN.getNodeAs<BasicBlock>()); - - const Region &R = *RN.getNodeAs<Region>(); - - return buildExtractionBlockSet(R.block_begin(), R.block_end()); -} - -CodeExtractor::CodeExtractor(BasicBlock *BB, bool AggregateArgs, - BlockFrequencyInfo *BFI, - BranchProbabilityInfo *BPI) - : DT(nullptr), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI), - BPI(BPI), Blocks(buildExtractionBlockSet(BB)), NumExitBlocks(~0U) {} - CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, bool AggregateArgs, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI) @@ -143,12 +125,6 @@ CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs, BPI(BPI), Blocks(buildExtractionBlockSet(L.getBlocks())), NumExitBlocks(~0U) {} -CodeExtractor::CodeExtractor(DominatorTree &DT, const RegionNode &RN, - bool AggregateArgs, BlockFrequencyInfo *BFI, - BranchProbabilityInfo *BPI) - : DT(&DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI), - BPI(BPI), Blocks(buildExtractionBlockSet(RN)), NumExitBlocks(~0U) {} - /// definedInRegion - Return true if the specified value is defined in the /// extracted region. static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) { diff --git a/lib/Transforms/Utils/LCSSA.cpp b/lib/Transforms/Utils/LCSSA.cpp index 49b4bd92faf4b..089f2b5f3b181 100644 --- a/lib/Transforms/Utils/LCSSA.cpp +++ b/lib/Transforms/Utils/LCSSA.cpp @@ -85,6 +85,7 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist, UsesToRewrite.clear(); Instruction *I = Worklist.pop_back_val(); + assert(!I->getType()->isTokenTy() && "Tokens shouldn't be in the worklist"); BasicBlock *InstBB = I->getParent(); Loop *L = LI.getLoopFor(InstBB); assert(L && "Instruction belongs to a BB that's not part of a loop"); @@ -96,13 +97,6 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist, if (ExitBlocks.empty()) continue; - // Tokens cannot be used in PHI nodes, so we skip over them. - // We can run into tokens which are live out of a loop with catchswitch - // instructions in Windows EH if the catchswitch has one catchpad which - // is inside the loop and another which is not. - if (I->getType()->isTokenTy()) - continue; - for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); BasicBlock *UserBB = User->getParent(); @@ -214,13 +208,9 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist, // Post process PHI instructions that were inserted into another disjoint // loop and update their exits properly. - for (auto *PostProcessPN : PostProcessPHIs) { - if (PostProcessPN->use_empty()) - continue; - - // Reprocess each PHI instruction. - Worklist.push_back(PostProcessPN); - } + for (auto *PostProcessPN : PostProcessPHIs) + if (!PostProcessPN->use_empty()) + Worklist.push_back(PostProcessPN); // Keep track of PHI nodes that we want to remove because they did not have // any uses rewritten. @@ -241,7 +231,7 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist, // Compute the set of BasicBlocks in the loop `L` dominating at least one exit. static void computeBlocksDominatingExits( Loop &L, DominatorTree &DT, SmallVector<BasicBlock *, 8> &ExitBlocks, - SmallPtrSet<BasicBlock *, 8> &BlocksDominatingExits) { + SmallSetVector<BasicBlock *, 8> &BlocksDominatingExits) { SmallVector<BasicBlock *, 8> BBWorklist; // We start from the exit blocks, as every block trivially dominates itself @@ -279,7 +269,7 @@ static void computeBlocksDominatingExits( if (!L.contains(IDomBB)) continue; - if (BlocksDominatingExits.insert(IDomBB).second) + if (BlocksDominatingExits.insert(IDomBB)) BBWorklist.push_back(IDomBB); } } @@ -293,7 +283,7 @@ bool llvm::formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI, if (ExitBlocks.empty()) return false; - SmallPtrSet<BasicBlock *, 8> BlocksDominatingExits; + SmallSetVector<BasicBlock *, 8> BlocksDominatingExits; // We want to avoid use-scanning leveraging dominance informations. // If a block doesn't dominate any of the loop exits, the none of the values @@ -315,6 +305,13 @@ bool llvm::formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI, !isa<PHINode>(I.user_back()))) continue; + // Tokens cannot be used in PHI nodes, so we skip over them. + // We can run into tokens which are live out of a loop with catchswitch + // instructions in Windows EH if the catchswitch has one catchpad which + // is inside the loop and another which is not. + if (I.getType()->isTokenTy()) + continue; + Worklist.push_back(&I); } } diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp index 18b29226c2ef5..8c5442762643b 100644 --- a/lib/Transforms/Utils/Local.cpp +++ b/lib/Transforms/Utils/Local.cpp @@ -1227,13 +1227,9 @@ bool llvm::LowerDbgDeclare(Function &F) { // This is a call by-value or some other instruction that // takes a pointer to the variable. Insert a *value* // intrinsic that describes the alloca. - SmallVector<uint64_t, 1> NewDIExpr; - auto *DIExpr = DDI->getExpression(); - NewDIExpr.push_back(dwarf::DW_OP_deref); - NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end()); DIB.insertDbgValueIntrinsic(AI, 0, DDI->getVariable(), - DIB.createExpression(NewDIExpr), - DDI->getDebugLoc(), CI); + DDI->getExpression(), DDI->getDebugLoc(), + CI); } } DDI->eraseFromParent(); diff --git a/lib/Transforms/Utils/LoopUnrollPeel.cpp b/lib/Transforms/Utils/LoopUnrollPeel.cpp index 73c14f5606b73..5c21490793e79 100644 --- a/lib/Transforms/Utils/LoopUnrollPeel.cpp +++ b/lib/Transforms/Utils/LoopUnrollPeel.cpp @@ -46,6 +46,11 @@ static cl::opt<unsigned> UnrollForcePeelCount( "unroll-force-peel-count", cl::init(0), cl::Hidden, cl::desc("Force a peel count regardless of profiling information.")); +// Designates that a Phi is estimated to become invariant after an "infinite" +// number of loop iterations (i.e. only may become an invariant if the loop is +// fully unrolled). +static const unsigned InfiniteIterationsToInvariance = UINT_MAX; + // Check whether we are capable of peeling this loop. static bool canPeel(Loop *L) { // Make sure the loop is in simplified form @@ -66,10 +71,62 @@ static bool canPeel(Loop *L) { return true; } +// This function calculates the number of iterations after which the given Phi +// becomes an invariant. The pre-calculated values are memorized in the map. The +// function (shortcut is I) is calculated according to the following definition: +// Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge]. +// If %y is a loop invariant, then I(%x) = 1. +// If %y is a Phi from the loop header, I(%x) = I(%y) + 1. +// Otherwise, I(%x) is infinite. +// TODO: Actually if %y is an expression that depends only on Phi %z and some +// loop invariants, we can estimate I(%x) = I(%z) + 1. The example +// looks like: +// %x = phi(0, %a), <-- becomes invariant starting from 3rd iteration. +// %y = phi(0, 5), +// %a = %y + 1. +static unsigned calculateIterationsToInvariance( + PHINode *Phi, Loop *L, BasicBlock *BackEdge, + SmallDenseMap<PHINode *, unsigned> &IterationsToInvariance) { + assert(Phi->getParent() == L->getHeader() && + "Non-loop Phi should not be checked for turning into invariant."); + assert(BackEdge == L->getLoopLatch() && "Wrong latch?"); + // If we already know the answer, take it from the map. + auto I = IterationsToInvariance.find(Phi); + if (I != IterationsToInvariance.end()) + return I->second; + + // Otherwise we need to analyze the input from the back edge. + Value *Input = Phi->getIncomingValueForBlock(BackEdge); + // Place infinity to map to avoid infinite recursion for cycled Phis. Such + // cycles can never stop on an invariant. + IterationsToInvariance[Phi] = InfiniteIterationsToInvariance; + unsigned ToInvariance = InfiniteIterationsToInvariance; + + if (L->isLoopInvariant(Input)) + ToInvariance = 1u; + else if (PHINode *IncPhi = dyn_cast<PHINode>(Input)) { + // Only consider Phis in header block. + if (IncPhi->getParent() != L->getHeader()) + return InfiniteIterationsToInvariance; + // If the input becomes an invariant after X iterations, then our Phi + // becomes an invariant after X + 1 iterations. + unsigned InputToInvariance = calculateIterationsToInvariance( + IncPhi, L, BackEdge, IterationsToInvariance); + if (InputToInvariance != InfiniteIterationsToInvariance) + ToInvariance = InputToInvariance + 1u; + } + + // If we found that this Phi lies in an invariant chain, update the map. + if (ToInvariance != InfiniteIterationsToInvariance) + IterationsToInvariance[Phi] = ToInvariance; + return ToInvariance; +} + // Return the number of iterations we want to peel off. void llvm::computePeelCount(Loop *L, unsigned LoopSize, TargetTransformInfo::UnrollingPreferences &UP, unsigned &TripCount) { + assert(LoopSize > 0 && "Zero loop size is not allowed!"); UP.PeelCount = 0; if (!canPeel(L)) return; @@ -78,30 +135,37 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize, if (!L->empty()) return; - // Try to find a Phi node that has the same loop invariant as an input from - // its only back edge. If there is such Phi, peeling 1 iteration from the - // loop is profitable, because starting from 2nd iteration we will have an - // invariant instead of this Phi. - if (LoopSize <= UP.Threshold) { + // Here we try to get rid of Phis which become invariants after 1, 2, ..., N + // iterations of the loop. For this we compute the number for iterations after + // which every Phi is guaranteed to become an invariant, and try to peel the + // maximum number of iterations among these values, thus turning all those + // Phis into invariants. + // First, check that we can peel at least one iteration. + if (2 * LoopSize <= UP.Threshold && UnrollPeelMaxCount > 0) { + // Store the pre-calculated values here. + SmallDenseMap<PHINode *, unsigned> IterationsToInvariance; + // Now go through all Phis to calculate their the number of iterations they + // need to become invariants. + unsigned DesiredPeelCount = 0; BasicBlock *BackEdge = L->getLoopLatch(); assert(BackEdge && "Loop is not in simplified form?"); - BasicBlock *Header = L->getHeader(); - // Iterate over Phis to find one with invariant input on back edge. - bool FoundCandidate = false; - PHINode *Phi; - for (auto BI = Header->begin(); isa<PHINode>(&*BI); ++BI) { - Phi = cast<PHINode>(&*BI); - Value *Input = Phi->getIncomingValueForBlock(BackEdge); - if (L->isLoopInvariant(Input)) { - FoundCandidate = true; - break; - } + for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI); ++BI) { + PHINode *Phi = cast<PHINode>(&*BI); + unsigned ToInvariance = calculateIterationsToInvariance( + Phi, L, BackEdge, IterationsToInvariance); + if (ToInvariance != InfiniteIterationsToInvariance) + DesiredPeelCount = std::max(DesiredPeelCount, ToInvariance); } - if (FoundCandidate) { - DEBUG(dbgs() << "Peel one iteration to get rid of " << *Phi - << " because starting from 2nd iteration it is always" - << " an invariant\n"); - UP.PeelCount = 1; + if (DesiredPeelCount > 0) { + // Pay respect to limitations implied by loop size and the max peel count. + unsigned MaxPeelCount = UnrollPeelMaxCount; + MaxPeelCount = std::min(MaxPeelCount, UP.Threshold / LoopSize - 1); + DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount); + // Consider max peel count limitation. + assert(DesiredPeelCount > 0 && "Wrong loop size estimation?"); + DEBUG(dbgs() << "Peel " << DesiredPeelCount << " iteration(s) to turn" + << " some Phis into invariants.\n"); + UP.PeelCount = DesiredPeelCount; return; } } diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index 127a44df5344f..2f575b9d50272 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -3086,7 +3086,7 @@ static bool mergeConditionalStores(BranchInst *PBI, BranchInst *QBI) { if ((PTB && !HasOnePredAndOneSucc(PTB, PBI->getParent(), QBI->getParent())) || (QTB && !HasOnePredAndOneSucc(QTB, QBI->getParent(), PostBB))) return false; - if (PostBB->getNumUses() != 2 || QBI->getParent()->getNumUses() != 2) + if (!PostBB->hasNUses(2) || !QBI->getParent()->hasNUses(2)) return false; // OK, this is a sequence of two diamonds or triangles. diff --git a/lib/Transforms/Utils/VNCoercion.cpp b/lib/Transforms/Utils/VNCoercion.cpp index 4aeea02b1b1bf..83bd29dbca651 100644 --- a/lib/Transforms/Utils/VNCoercion.cpp +++ b/lib/Transforms/Utils/VNCoercion.cpp @@ -24,6 +24,11 @@ bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, if (DL.getTypeSizeInBits(StoredVal->getType()) < DL.getTypeSizeInBits(LoadTy)) return false; + // Don't coerce non-integral pointers to integers or vice versa. + if (DL.isNonIntegralPointerType(StoredVal->getType()) != + DL.isNonIntegralPointerType(LoadTy)) + return false; + return true; } diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 595b2ec889435..7eb8fabe0b2f0 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -422,7 +422,8 @@ protected: // When we if-convert we need to create edge masks. We have to cache values // so that we don't end up with exponential recursion/IR. typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> - EdgeMaskCache; + EdgeMaskCacheTy; + typedef DenseMap<BasicBlock *, VectorParts> BlockMaskCacheTy; /// Create an empty loop, based on the loop ranges of the old loop. void createEmptyLoop(); @@ -785,7 +786,8 @@ protected: /// Store instructions that should be predicated, as a pair /// <StoreInst, Predicate> SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; - EdgeMaskCache MaskCache; + EdgeMaskCacheTy EdgeMaskCache; + BlockMaskCacheTy BlockMaskCache; /// Trip count of the original loop. Value *TripCount; /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) @@ -4560,8 +4562,8 @@ InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { // Look for cached value. std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); - EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); - if (ECEntryIt != MaskCache.end()) + EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); + if (ECEntryIt != EdgeMaskCache.end()) return ECEntryIt->second; VectorParts SrcMask = createBlockInMask(Src); @@ -4580,11 +4582,11 @@ InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { for (unsigned part = 0; part < UF; ++part) EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); - MaskCache[Edge] = EdgeMask; + EdgeMaskCache[Edge] = EdgeMask; return EdgeMask; } - MaskCache[Edge] = SrcMask; + EdgeMaskCache[Edge] = SrcMask; return SrcMask; } @@ -4592,10 +4594,17 @@ InnerLoopVectorizer::VectorParts InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); + // Look for cached value. + BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); + if (BCEntryIt != BlockMaskCache.end()) + return BCEntryIt->second; + // Loop incoming mask is all-one. if (OrigLoop->getHeader() == BB) { Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); - return getVectorValue(C); + const VectorParts &BlockMask = getVectorValue(C); + BlockMaskCache[BB] = BlockMask; + return BlockMask; } // This is the block mask. We OR all incoming edges, and with zero. @@ -4609,6 +4618,7 @@ InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); } + BlockMaskCache[BB] = BlockMask; return BlockMask; } diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index da3ac06ab464e..5549444047088 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -4146,8 +4146,8 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, if (AllowReorder && R.shouldReorder()) { // Conceptually, there is nothing actually preventing us from trying to // reorder a larger list. In fact, we do exactly this when vectorizing - // reductions. However, at this point, we only expect to get here from - // tryToVectorizePair(). + // reductions. However, at this point, we only expect to get here when + // there are exactly two operations. assert(Ops.size() == 2); assert(BuildVectorSlice.empty()); Value *ReorderedOps[] = {Ops[1], Ops[0]}; @@ -4904,7 +4904,13 @@ bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { // Try to vectorize them. unsigned NumElts = (SameTypeIt - IncIt); DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); - if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { + // The order in which the phi nodes appear in the program does not matter. + // So allow tryToVectorizeList to reorder them if it is beneficial. This + // is done when there are exactly two elements since tryToVectorizeList + // asserts that there are only two values when AllowReorder is true. + bool AllowReorder = NumElts == 2; + if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, + None, AllowReorder)) { // Success start over because instructions might have been changed. HaveVectorizedPhiNodes = true; Changed = true; diff --git a/test/Analysis/BranchProbabilityInfo/basic.ll b/test/Analysis/BranchProbabilityInfo/basic.ll index 94ea5a3d1d8ea..84936b7761caa 100644 --- a/test/Analysis/BranchProbabilityInfo/basic.ll +++ b/test/Analysis/BranchProbabilityInfo/basic.ll @@ -372,3 +372,228 @@ exit: ret i32 %result } +define i32 @test_unreachable_with_prof_greater(i32 %a, i32 %b) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_prof_greater' +entry: + %cond = icmp eq i32 %a, 42 + br i1 %cond, label %exit, label %unr, !prof !4 + +; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> unr probability is 0x00000800 / 0x80000000 = 0.00% + +unr: + unreachable + +exit: + ret i32 %b +} + +!4 = !{!"branch_weights", i32 0, i32 1} + +define i32 @test_unreachable_with_prof_equal(i32 %a, i32 %b) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_prof_equal' +entry: + %cond = icmp eq i32 %a, 42 + br i1 %cond, label %exit, label %unr, !prof !5 + +; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> unr probability is 0x00000800 / 0x80000000 = 0.00% + +unr: + unreachable + +exit: + ret i32 %b +} + +!5 = !{!"branch_weights", i32 1048575, i32 1} + +define i32 @test_unreachable_with_prof_zero(i32 %a, i32 %b) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_prof_zero' +entry: + %cond = icmp eq i32 %a, 42 + br i1 %cond, label %exit, label %unr, !prof !6 + +; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> unr probability is 0x00000800 / 0x80000000 = 0.00% + +unr: + unreachable + +exit: + ret i32 %b +} + +!6 = !{!"branch_weights", i32 0, i32 0} + +define i32 @test_unreachable_with_prof_less(i32 %a, i32 %b) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_prof_less' +entry: + %cond = icmp eq i32 %a, 42 + br i1 %cond, label %exit, label %unr, !prof !7 + +; CHECK: edge entry -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> unr probability is 0x00000000 / 0x80000000 = 0.00% + +unr: + unreachable + +exit: + ret i32 %b +} + +!7 = !{!"branch_weights", i32 1, i32 0} + +define i32 @test_unreachable_with_switch_prof1(i32 %i, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_switch_prof1' +entry: + switch i32 %i, label %case_a [ i32 1, label %case_b + i32 2, label %case_c + i32 3, label %case_d + i32 4, label %case_e ], !prof !8 +; CHECK: edge entry -> case_a probability is 0x00000800 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_b probability is 0x07fffe01 / 0x80000000 = 6.25% +; CHECK: edge entry -> case_c probability is 0x67fffdff / 0x80000000 = 81.25% [HOT edge] +; CHECK: edge entry -> case_d probability is 0x07fffdff / 0x80000000 = 6.25% +; CHECK: edge entry -> case_e probability is 0x07fffdff / 0x80000000 = 6.25% + +case_a: + unreachable + +case_b: + br label %exit +; CHECK: edge case_b -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +case_c: + br label %exit +; CHECK: edge case_c -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +case_d: + br label %exit +; CHECK: edge case_d -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +case_e: + br label %exit +; CHECK: edge case_e -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +exit: + %result = phi i32 [ %b, %case_b ], + [ %c, %case_c ], + [ %d, %case_d ], + [ %e, %case_e ] + ret i32 %result +} + +!8 = !{!"branch_weights", i32 4, i32 4, i32 64, i32 4, i32 4} + +define i32 @test_unreachable_with_switch_prof2(i32 %i, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_switch_prof2' +entry: + switch i32 %i, label %case_a [ i32 1, label %case_b + i32 2, label %case_c + i32 3, label %case_d + i32 4, label %case_e ], !prof !9 +; CHECK: edge entry -> case_a probability is 0x00000400 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_b probability is 0x00000400 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_c probability is 0x6aaaa800 / 0x80000000 = 83.33% [HOT edge] +; CHECK: edge entry -> case_d probability is 0x0aaaa7ff / 0x80000000 = 8.33% +; CHECK: edge entry -> case_e probability is 0x0aaaa7ff / 0x80000000 = 8.33% + +case_a: + unreachable + +case_b: + unreachable + +case_c: + br label %exit +; CHECK: edge case_c -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +case_d: + br label %exit +; CHECK: edge case_d -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +case_e: + br label %exit +; CHECK: edge case_e -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +exit: + %result = phi i32 [ %c, %case_c ], + [ %d, %case_d ], + [ %e, %case_e ] + ret i32 %result +} + +!9 = !{!"branch_weights", i32 4, i32 4, i32 64, i32 4, i32 4} + +define i32 @test_unreachable_with_switch_prof3(i32 %i, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_switch_prof3' +entry: + switch i32 %i, label %case_a [ i32 1, label %case_b + i32 2, label %case_c + i32 3, label %case_d + i32 4, label %case_e ], !prof !10 +; CHECK: edge entry -> case_a probability is 0x00000000 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_b probability is 0x00000400 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_c probability is 0x6e08fa2e / 0x80000000 = 85.96% [HOT edge] +; CHECK: edge entry -> case_d probability is 0x08fb80e9 / 0x80000000 = 7.02% +; CHECK: edge entry -> case_e probability is 0x08fb80e9 / 0x80000000 = 7.02% + +case_a: + unreachable + +case_b: + unreachable + +case_c: + br label %exit +; CHECK: edge case_c -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +case_d: + br label %exit +; CHECK: edge case_d -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +case_e: + br label %exit +; CHECK: edge case_e -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge] + +exit: + %result = phi i32 [ %c, %case_c ], + [ %d, %case_d ], + [ %e, %case_e ] + ret i32 %result +} + +!10 = !{!"branch_weights", i32 0, i32 4, i32 64, i32 4, i32 4} + +define i32 @test_unreachable_with_switch_prof4(i32 %i, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) { +; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_switch_prof4' +entry: + switch i32 %i, label %case_a [ i32 1, label %case_b + i32 2, label %case_c + i32 3, label %case_d + i32 4, label %case_e ], !prof !11 +; CHECK: edge entry -> case_a probability is 0x1999999a / 0x80000000 = 20.00% +; CHECK: edge entry -> case_b probability is 0x1999999a / 0x80000000 = 20.00% +; CHECK: edge entry -> case_c probability is 0x1999999a / 0x80000000 = 20.00% +; CHECK: edge entry -> case_d probability is 0x1999999a / 0x80000000 = 20.00% +; CHECK: edge entry -> case_e probability is 0x1999999a / 0x80000000 = 20.00% + +case_a: + unreachable + +case_b: + unreachable + +case_c: + unreachable + +case_d: + unreachable + +case_e: + unreachable + +} + +!11 = !{!"branch_weights", i32 0, i32 4, i32 64, i32 4, i32 4} diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll index 0acb050c2519e..8ae44387c1da7 100644 --- a/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll +++ b/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll @@ -1,5 +1,6 @@ ; RUN: opt %s -mtriple amdgcn-- -analyze -divergence | FileCheck %s +; CHECK-LABEL: Printing analysis 'Divergence Analysis' for function 'test_amdgpu_ps': ; CHECK: DIVERGENT: ; CHECK-NOT: %arg0 ; CHECK-NOT: %arg1 @@ -9,7 +10,31 @@ ; CHECK: DIVERGENT: float %arg5 ; CHECK: DIVERGENT: i32 %arg6 -define amdgpu_ps void @main([4 x <16 x i8>] addrspace(2)* byval %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 { +define amdgpu_ps void @test_amdgpu_ps([4 x <16 x i8>] addrspace(2)* byval %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 { + ret void +} + +; CHECK-LABEL: Printing analysis 'Divergence Analysis' for function 'test_amdgpu_kernel': +; CHECK-NOT: %arg0 +; CHECK-NOT: %arg1 +; CHECK-NOT: %arg2 +; CHECK-NOT: %arg3 +; CHECK-NOT: %arg4 +; CHECK-NOT: %arg5 +; CHECK-NOT: %arg6 +define amdgpu_kernel void @test_amdgpu_kernel([4 x <16 x i8>] addrspace(2)* byval %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 { + ret void +} + +; CHECK-LABEL: Printing analysis 'Divergence Analysis' for function 'test_c': +; CHECK: DIVERGENT: +; CHECK: DIVERGENT: +; CHECK: DIVERGENT: +; CHECK: DIVERGENT: +; CHECK: DIVERGENT: +; CHECK: DIVERGENT: +; CHECK: DIVERGENT: +define void @test_c([4 x <16 x i8>] addrspace(2)* byval %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 { ret void } diff --git a/test/Analysis/ScalarEvolution/or-as-add.ll b/test/Analysis/ScalarEvolution/or-as-add.ll new file mode 100644 index 0000000000000..ac4e65a20f218 --- /dev/null +++ b/test/Analysis/ScalarEvolution/or-as-add.ll @@ -0,0 +1,38 @@ +; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s + +declare void @z(i32) +declare void @z2(i64) + +define void @fun(i1 %bool, i32 %x) { +entry: + br label %body +body: + %i = phi i32 [ 0, %entry ], [ %i.next, %body ] + %bottom_zero = mul i32 %i, 2 + %a = or i32 %bottom_zero, 1 + call void @z(i32 %a) + %bool_ext = zext i1 %bool to i32 + %b = or i32 %bool_ext, %bottom_zero + call void @z(i32 %b) + %shifted = lshr i32 %x, 31 + %c = or i32 %shifted, %bottom_zero + call void @z(i32 %c) + %i_ext = zext i32 %i to i64 + %d = or i64 %i_ext, 4294967296 + call void @z2(i64 %d) + %i.next = add i32 %i, 1 + %cond = icmp eq i32 %i.next, 10 + br i1 %cond, label %exit, label %body +exit: + ret void +} + +; CHECK: %a = or i32 %bottom_zero, 1 +; CHECK-NEXT: --> {1,+,2}<%body> +; CHECK: %b = or i32 %bool_ext, %bottom_zero +; CHECK-NEXT: --> {(zext i1 %bool to i32),+,2} +; CHECK: %c = or i32 %shifted, %bottom_zero +; CHECK-NEXT: --> {(%x /u -2147483648),+,2}<%body> +; CHECK: %d = or i64 %i_ext, 4294967296 +; CHECK-NEXT: --> {4294967296,+,1}<nuw><nsw><%body> + diff --git a/test/Bitcode/DIExpression-aggresult.ll b/test/Bitcode/DIExpression-aggresult.ll new file mode 100644 index 0000000000000..5ce936d7074da --- /dev/null +++ b/test/Bitcode/DIExpression-aggresult.ll @@ -0,0 +1,36 @@ +; RUN: llvm-dis -o - %s.bc | FileCheck %s +%class.A = type { i32, i32, i32, i32 } + +define void @_Z3fooi(%class.A* sret %agg.result) #0 !dbg !3 { + ; CHECK: call void @llvm.dbg.declare({{.*}}, metadata ![[EXPR:[0-9]+]]), !dbg + ; CHECK: ![[EXPR]] = !DIExpression() + call void @llvm.dbg.declare(metadata %class.A* %agg.result, metadata !13, metadata !16), !dbg !17 + ret void, !dbg !17 +} + +declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 + +attributes #0 = { ssp } +attributes #1 = { nounwind readnone } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug) +!1 = !DIFile(filename: "a.cc", directory: "/tmp") +!2 = !{i32 1, !"Debug Info Version", i32 3} +!3 = distinct !DISubprogram(name: "foo", linkageName: "_Z3fooi", scope: !1, file: !1, line: 4, type: !4, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0) +!4 = !DISubroutineType(types: !5) +!5 = !{!6} +!6 = !DICompositeType(tag: DW_TAG_class_type, name: "A", scope: !0, file: !1, line: 2, size: 128, align: 32, elements: !7) +!7 = !{!8, !10, !11, !12} +!8 = !DIDerivedType(tag: DW_TAG_member, name: "x", scope: !1, file: !1, line: 2, baseType: !9, size: 32, align: 32) +!9 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed) +!10 = !DIDerivedType(tag: DW_TAG_member, name: "y", scope: !1, file: !1, line: 2, baseType: !9, size: 32, align: 32, offset: 32) +!11 = !DIDerivedType(tag: DW_TAG_member, name: "z", scope: !1, file: !1, line: 2, baseType: !9, size: 32, align: 32, offset: 64) +!12 = !DIDerivedType(tag: DW_TAG_member, name: "o", scope: !1, file: !1, line: 2, baseType: !9, size: 32, align: 32, offset: 96) +!13 = !DILocalVariable(name: "my_a", scope: !14, file: !1, line: 9, type: !15) +!14 = distinct !DILexicalBlock(scope: !3, file: !1, line: 4, column: 14) +!15 = !DIDerivedType(tag: DW_TAG_reference_type, file: !1, baseType: !6) +!16 = !DIExpression(DW_OP_deref) +!17 = !DILocation(line: 9, column: 5, scope: !3) diff --git a/test/Bitcode/DIExpression-aggresult.ll.bc b/test/Bitcode/DIExpression-aggresult.ll.bc Binary files differnew file mode 100644 index 0000000000000..bcf6e175b4d04 --- /dev/null +++ b/test/Bitcode/DIExpression-aggresult.ll.bc diff --git a/test/Bitcode/DIExpression-deref.ll b/test/Bitcode/DIExpression-deref.ll new file mode 100644 index 0000000000000..3a161b8ee4d29 --- /dev/null +++ b/test/Bitcode/DIExpression-deref.ll @@ -0,0 +1,27 @@ +; RUN: llvm-dis -o - %s.bc | FileCheck %s + +!llvm.dbg.cu = !{!1} +!llvm.module.flags = !{!20, !21} + +!0 = distinct !DIGlobalVariable(name: "g", scope: !1, file: !2, line: 1, type: !5, isLocal: false, isDefinition: true) +!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2, producer: "clang (llvm/trunk 288154)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !3, globals: !4) +!2 = !DIFile(filename: "a.c", directory: "/") +!3 = !{} +!4 = !{!10, !11, !12, !13} +!5 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +; DW_OP_deref should be moved to the back of the expression. +; +; CHECK: !DIExpression(DW_OP_plus, 0, DW_OP_deref, DW_OP_LLVM_fragment, 8, 32) +!6 = !DIExpression(DW_OP_deref, DW_OP_plus, 0, DW_OP_LLVM_fragment, 8, 32) +; CHECK: !DIExpression(DW_OP_plus, 0, DW_OP_deref) +!7 = !DIExpression(DW_OP_deref, DW_OP_plus, 0) +; CHECK: !DIExpression(DW_OP_plus, 1, DW_OP_deref) +!8 = !DIExpression(DW_OP_plus, 1, DW_OP_deref) +; CHECK: !DIExpression(DW_OP_deref) +!9 = !DIExpression(DW_OP_deref) +!10 = !DIGlobalVariableExpression(var: !0, expr: !6) +!11 = !DIGlobalVariableExpression(var: !0, expr: !7) +!12 = !DIGlobalVariableExpression(var: !0, expr: !8) +!13 = !DIGlobalVariableExpression(var: !0, expr: !9) +!20 = !{i32 2, !"Dwarf Version", i32 4} +!21 = !{i32 2, !"Debug Info Version", i32 3} diff --git a/test/Bitcode/DIExpression-deref.ll.bc b/test/Bitcode/DIExpression-deref.ll.bc Binary files differnew file mode 100644 index 0000000000000..5297bf9f17b49 --- /dev/null +++ b/test/Bitcode/DIExpression-deref.ll.bc diff --git a/test/Bitcode/thinlto-alias.ll b/test/Bitcode/thinlto-alias.ll index cfdf8f7b0bd96..2c235f0620ecb 100644 --- a/test/Bitcode/thinlto-alias.ll +++ b/test/Bitcode/thinlto-alias.ll @@ -5,33 +5,31 @@ ; RUN: llvm-lto -thinlto -o %t3 %t.o %t2.o ; RUN: llvm-bcanalyzer -dump %t3.thinlto.bc | FileCheck %s --check-prefix=COMBINED +; CHECK: <SOURCE_FILENAME +; "main" +; CHECK-NEXT: <FUNCTION op0=0 op1=4 +; "analias" +; CHECK-NEXT: <FUNCTION op0=4 op1=7 ; CHECK: <GLOBALVAL_SUMMARY_BLOCK ; CHECK-NEXT: <VERSION -; See if the call to func is registered, using the expected callsite count -; and value id matching the subsequent value symbol table. -; CHECK-NEXT: <PERMODULE {{.*}} op4=[[FUNCID:[0-9]+]]/> +; See if the call to func is registered. +; The value id 1 matches the second FUNCTION record above. +; CHECK-NEXT: <PERMODULE {{.*}} op4=1/> ; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK> -; CHECK-NEXT: <VALUE_SYMTAB -; CHECK-NEXT: <FNENTRY {{.*}} record string = 'main' -; External function analias should have entry with value id FUNCID -; CHECK-NEXT: <ENTRY {{.*}} op0=[[FUNCID]] {{.*}} record string = 'analias' -; CHECK-NEXT: </VALUE_SYMTAB> + +; CHECK: <STRTAB_BLOCK +; CHECK-NEXT: blob data = 'mainanalias' ; COMBINED: <GLOBALVAL_SUMMARY_BLOCK ; COMBINED-NEXT: <VERSION -; See if the call to analias is registered, using the expected callsite count -; and value id matching the subsequent value symbol table. -; COMBINED-NEXT: <COMBINED {{.*}} op5=[[ALIASID:[0-9]+]]/> -; Followed by the alias and aliasee +; See if the call to analias is registered, using the expected value id. +; COMBINED-NEXT: <VALUE_GUID op0=[[ALIASID:[0-9]+]] op1=-5751648690987223394/> +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID op0=[[ALIASEEID:[0-9]+]] op1=-1039159065113703048/> +; COMBINED-NEXT: <COMBINED {{.*}} op5=[[ALIASID]]/> ; COMBINED-NEXT: <COMBINED {{.*}} -; COMBINED-NEXT: <COMBINED_ALIAS {{.*}} op3=[[ALIASEEID:[0-9]+]] +; COMBINED-NEXT: <COMBINED_ALIAS {{.*}} op3=[[ALIASEEID]] ; COMBINED-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; COMBINED-NEXT: <VALUE_SYMTAB -; Entry for function func should have entry with value id ALIASID -; COMBINED-NEXT: <COMBINED_ENTRY {{.*}} op0=[[ALIASID]] op1=-5751648690987223394/> -; COMBINED-NEXT: <COMBINED -; COMBINED-NEXT: <COMBINED_ENTRY {{.*}} op0=[[ALIASEEID]] op1=-1039159065113703048/> -; COMBINED-NEXT: </VALUE_SYMTAB> ; ModuleID = 'thinlto-function-summary-callgraph.ll' target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll b/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll index 713e36dd14d60..7f9d6d95f506b 100644 --- a/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll +++ b/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll @@ -10,31 +10,27 @@ ; RUN: llvm-lto -thinlto-index-stats %p/Inputs/thinlto-function-summary-callgraph-pgo.1.bc | FileCheck %s --check-prefix=OLD ; RUN: llvm-lto -thinlto-index-stats %p/Inputs/thinlto-function-summary-callgraph-pgo-combined.1.bc | FileCheck %s --check-prefix=OLD-COMBINED +; CHECK: <SOURCE_FILENAME +; CHECK-NEXT: <FUNCTION +; "func" +; CHECK-NEXT: <FUNCTION op0=4 op1=4 ; CHECK: <GLOBALVAL_SUMMARY_BLOCK ; CHECK-NEXT: <VERSION -; See if the call to func is registered, using the expected callsite count -; and hotness type, with value id matching the subsequent value symbol table. -; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=[[FUNCID:[0-9]+]] op5=2/> +; See if the call to func is registered, using the expected hotness type. +; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=2/> ; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK> -; CHECK-NEXT: <VALUE_SYMTAB -; CHECK-NEXT: <FNENTRY {{.*}} record string = 'main' -; External function func should have entry with value id FUNCID -; CHECK-NEXT: <ENTRY {{.*}} op0=[[FUNCID]] {{.*}} record string = 'func' -; CHECK-NEXT: </VALUE_SYMTAB> +; CHECK: <STRTAB_BLOCK +; CHECK-NEXT: blob data = 'mainfunc' ; COMBINED: <GLOBALVAL_SUMMARY_BLOCK ; COMBINED-NEXT: <VERSION +; COMBINED-NEXT: <VALUE_GUID op0=[[FUNCID:[0-9]+]] op1=7289175272376759421/> +; COMBINED-NEXT: <VALUE_GUID ; COMBINED-NEXT: <COMBINED -; See if the call to func is registered, using the expected callsite count -; and hotness type, with value id matching the subsequent value symbol table. +; See if the call to func is registered, using the expected hotness type. ; op6=2 which is hotnessType::None. -; COMBINED-NEXT: <COMBINED_PROFILE {{.*}} op5=[[FUNCID:[0-9]+]] op6=2/> +; COMBINED-NEXT: <COMBINED_PROFILE {{.*}} op5=[[FUNCID]] op6=2/> ; COMBINED-NEXT: </GLOBALVAL_SUMMARY_BLOCK> -; COMBINED-NEXT: <VALUE_SYMTAB -; Entry for function func should have entry with value id FUNCID -; COMBINED-NEXT: <COMBINED_ENTRY {{.*}} op0=[[FUNCID]] op1=7289175272376759421/> -; COMBINED-NEXT: <COMBINED -; COMBINED-NEXT: </VALUE_SYMTAB> ; ModuleID = 'thinlto-function-summary-callgraph.ll' target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll index 3a5adea202e2a..982bb5cb7e531 100644 --- a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll +++ b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll @@ -6,27 +6,45 @@ ; RUN: llvm-bcanalyzer -dump %t3.thinlto.bc | FileCheck %s --check-prefix=COMBINED +; CHECK: <SOURCE_FILENAME +; "hot_function" +; CHECK-NEXT: <FUNCTION op0=0 op1=12 +; "hot1" +; CHECK-NEXT: <FUNCTION op0=12 op1=4 +; "hot2" +; CHECK-NEXT: <FUNCTION op0=16 op1=4 +; "hot3" +; CHECK-NEXT: <FUNCTION op0=20 op1=4 +; "hot4" +; CHECK-NEXT: <FUNCTION op0=24 op1=4 +; "cold" +; CHECK-NEXT: <FUNCTION op0=28 op1=4 +; "none1" +; CHECK-NEXT: <FUNCTION op0=32 op1=5 +; "none2" +; CHECK-NEXT: <FUNCTION op0=37 op1=5 +; "none3" +; CHECK-NEXT: <FUNCTION op0=42 op1=5 ; CHECK-LABEL: <GLOBALVAL_SUMMARY_BLOCK ; CHECK-NEXT: <VERSION -; See if the call to func is registered, using the expected callsite count -; and profile count, with value id matching the subsequent value symbol table. -; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=[[HOT1:.*]] op5=3 op6=[[COLD:.*]] op7=1 op8=[[HOT2:.*]] op9=3 op10=[[HOT4:.*]] op11=3 op12=[[NONE1:.*]] op13=2 op14=[[HOT3:.*]] op15=3 op16=[[NONE2:.*]] op17=2 op18=[[NONE3:.*]] op19=2 op20=[[LEGACY:.*]] op21=3/> +; CHECK-NEXT: <VALUE_GUID op0=25 op1=123/> +; op4=hot1 op6=cold op8=hot2 op10=hot4 op12=none1 op14=hot3 op16=none2 op18=none3 op20=123 +; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=3 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=3/> ; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK> -; CHECK-LABEL: <VALUE_SYMTAB -; CHECK-NEXT: <FNENTRY {{.*}} record string = 'hot_function -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[NONE1]] {{.*}} record string = 'none1' -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[COLD]] {{.*}} record string = 'cold' -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[NONE2]] {{.*}} record string = 'none2' -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[NONE3]] {{.*}} record string = 'none3' -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT1]] {{.*}} record string = 'hot1' -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT2]] {{.*}} record string = 'hot2' -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT3]] {{.*}} record string = 'hot3' -; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT4]] {{.*}} record string = 'hot4' -; CHECK-DAG: <COMBINED_ENTRY abbrevid=11 op0=[[LEGACY]] op1=123/> -; CHECK-LABEL: </VALUE_SYMTAB> + +; CHECK: <STRTAB_BLOCK +; CHECK-NEXT: blob data = 'hot_functionhot1hot2hot3hot4coldnone1none2none3' ; COMBINED: <GLOBALVAL_SUMMARY_BLOCK ; COMBINED-NEXT: <VERSION +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID +; COMBINED-NEXT: <VALUE_GUID ; COMBINED-NEXT: <COMBINED abbrevid= ; COMBINED-NEXT: <COMBINED abbrevid= ; COMBINED-NEXT: <COMBINED abbrevid= diff --git a/test/Bitcode/thinlto-function-summary-callgraph.ll b/test/Bitcode/thinlto-function-summary-callgraph.ll index c00907b7fb291..8cc60ad633621 100644 --- a/test/Bitcode/thinlto-function-summary-callgraph.ll +++ b/test/Bitcode/thinlto-function-summary-callgraph.ll @@ -10,30 +10,27 @@ ; RUN: llvm-lto -thinlto-index-stats %p/Inputs/thinlto-function-summary-callgraph.1.bc | FileCheck %s --check-prefix=OLD ; RUN: llvm-lto -thinlto-index-stats %p/Inputs/thinlto-function-summary-callgraph-combined.1.bc | FileCheck %s --check-prefix=OLD-COMBINED +; CHECK: <SOURCE_FILENAME +; CHECK-NEXT: <FUNCTION +; "func" +; CHECK-NEXT: <FUNCTION op0=4 op1=4 ; CHECK: <GLOBALVAL_SUMMARY_BLOCK ; CHECK-NEXT: <VERSION -; See if the call to func is registered, using the expected callsite count -; and value id matching the subsequent value symbol table. -; CHECK-NEXT: <PERMODULE {{.*}} op4=[[FUNCID:[0-9]+]]/> +; See if the call to func is registered. +; CHECK-NEXT: <PERMODULE {{.*}} op4=1/> ; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK> -; CHECK-NEXT: <VALUE_SYMTAB -; CHECK-NEXT: <FNENTRY {{.*}} record string = 'main' -; External function func should have entry with value id FUNCID -; CHECK-NEXT: <ENTRY {{.*}} op0=[[FUNCID]] {{.*}} record string = 'func' -; CHECK-NEXT: </VALUE_SYMTAB> +; CHECK: <STRTAB_BLOCK +; CHECK-NEXT: blob data = 'mainfunc' + ; COMBINED: <GLOBALVAL_SUMMARY_BLOCK ; COMBINED-NEXT: <VERSION +; COMBINED-NEXT: <VALUE_GUID op0=[[FUNCID:[0-9]+]] op1=7289175272376759421/> +; COMBINED-NEXT: <VALUE_GUID ; COMBINED-NEXT: <COMBINED -; See if the call to func is registered, using the expected callsite count -; and value id matching the subsequent value symbol table. -; COMBINED-NEXT: <COMBINED {{.*}} op5=[[FUNCID:[0-9]+]]/> +; See if the call to func is registered. +; COMBINED-NEXT: <COMBINED {{.*}} op5=[[FUNCID]]/> ; COMBINED-NEXT: </GLOBALVAL_SUMMARY_BLOCK> -; COMBINED-NEXT: <VALUE_SYMTAB -; Entry for function func should have entry with value id FUNCID -; COMBINED-NEXT: <COMBINED_ENTRY {{.*}} op0=[[FUNCID]] op1=7289175272376759421/> -; COMBINED-NEXT: <COMBINED -; COMBINED-NEXT: </VALUE_SYMTAB> ; ModuleID = 'thinlto-function-summary-callgraph.ll' target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @@ -49,4 +46,4 @@ entry: declare void @func(...) #1 ; OLD: Index {{.*}} contains 1 nodes (1 functions, 0 alias, 0 globals) and 1 edges (0 refs and 1 calls) -; OLD-COMBINED: Index {{.*}} contains 2 nodes (2 functions, 0 alias, 0 globals) and 1 edges (0 refs and 1 calls)
\ No newline at end of file +; OLD-COMBINED: Index {{.*}} contains 2 nodes (2 functions, 0 alias, 0 globals) and 1 edges (0 refs and 1 calls) diff --git a/test/Bitcode/thinlto-function-summary-originalnames.ll b/test/Bitcode/thinlto-function-summary-originalnames.ll index 8777bd9160765..afc9772484ef0 100644 --- a/test/Bitcode/thinlto-function-summary-originalnames.ll +++ b/test/Bitcode/thinlto-function-summary-originalnames.ll @@ -5,6 +5,9 @@ ; COMBINED: <GLOBALVAL_SUMMARY_BLOCK ; COMBINED-NEXT: <VERSION +; COMBINED-NEXT: <VALUE_GUID {{.*}} op1=4947176790635855146/> +; COMBINED-NEXT: <VALUE_GUID {{.*}} op1=-6591587165810580810/> +; COMBINED-NEXT: <VALUE_GUID {{.*}} op1=-4377693495213223786/> ; COMBINED-DAG: <COMBINED ; COMBINED-DAG: <COMBINED_ORIGINAL_NAME op0=6699318081062747564/> ; COMBINED-DAG: <COMBINED_GLOBALVAR_INIT_REFS @@ -12,11 +15,6 @@ ; COMBINED-DAG: <COMBINED_ALIAS ; COMBINED-DAG: <COMBINED_ORIGINAL_NAME op0=-4170563161550796836/> ; COMBINED-NEXT: </GLOBALVAL_SUMMARY_BLOCK> -; COMBINED-NEXT: <VALUE_SYMTAB -; COMBINED-NEXT: <COMBINED_ENTRY {{.*}} op1=4947176790635855146/> -; COMBINED-NEXT: <COMBINED_ENTRY {{.*}} op1=-6591587165810580810/> -; COMBINED-NEXT: <COMBINED_ENTRY {{.*}} op1=-4377693495213223786/> -; COMBINED-NEXT: </VALUE_SYMTAB> source_filename = "/path/to/source.c" diff --git a/test/Bitcode/thinlto-function-summary-refgraph.ll b/test/Bitcode/thinlto-function-summary-refgraph.ll index 882f86509ab1a..b52fce7917911 100644 --- a/test/Bitcode/thinlto-function-summary-refgraph.ll +++ b/test/Bitcode/thinlto-function-summary-refgraph.ll @@ -2,6 +2,32 @@ ; RUN: opt -module-summary %s -o %t.o ; RUN: llvm-bcanalyzer -dump %t.o | FileCheck %s +; CHECK: <SOURCE_FILENAME +; "bar" +; CHECK-NEXT: <GLOBALVAR {{.*}} op0=0 op1=3 +; "globalvar" +; CHECK-NEXT: <GLOBALVAR {{.*}} op0=3 op1=9 +; "func" +; CHECK-NEXT: <FUNCTION op0=12 op1=4 +; "func2" +; CHECK-NEXT: <FUNCTION op0=16 op1=5 +; "foo" +; CHECK-NEXT: <FUNCTION op0=21 op1=3 +; "func3" +; CHECK-NEXT: <FUNCTION op0=24 op1=5 +; "W" +; CHECK-NEXT: <FUNCTION op0=29 op1=1 +; "X" +; CHECK-NEXT: <FUNCTION op0=30 op1=1 +; "Y" +; CHECK-NEXT: <FUNCTION op0=31 op1=1 +; "Z" +; CHECK-NEXT: <FUNCTION op0=32 op1=1 +; "llvm.ctpop.i8" +; CHECK-NEXT: <FUNCTION op0=33 op1=13 +; "main" +; CHECK-NEXT: <FUNCTION op0=46 op1=4 + ; See if the calls and other references are recorded properly using the ; expected value id and other information as appropriate (callsite cout ; for calls). Use different linkage types for the various test cases to @@ -11,37 +37,32 @@ ; llvm.ctpop.i8. ; CHECK: <GLOBALVAL_SUMMARY_BLOCK ; Function main contains call to func, as well as address reference to func: -; CHECK-DAG: <PERMODULE {{.*}} op0=[[MAINID:[0-9]+]] op1=0 {{.*}} op3=1 op4=[[FUNCID:[0-9]+]] op5=[[FUNCID]]/> +; op0=main op4=func op5=func +; CHECK-DAG: <PERMODULE {{.*}} op0=11 op1=0 {{.*}} op3=1 op4=2 op5=2/> ; Function W contains a call to func3 as well as a reference to globalvar: -; CHECK-DAG: <PERMODULE {{.*}} op0=[[WID:[0-9]+]] op1=5 {{.*}} op3=1 op4=[[GLOBALVARID:[0-9]+]] op5=[[FUNC3ID:[0-9]+]]/> +; op0=W op4=globalvar op5=func3 +; CHECK-DAG: <PERMODULE {{.*}} op0=6 op1=5 {{.*}} op3=1 op4=1 op5=5/> ; Function X contains call to foo, as well as address reference to foo ; which is in the same instruction as the call: -; CHECK-DAG: <PERMODULE {{.*}} op0=[[XID:[0-9]+]] op1=1 {{.*}} op3=1 op4=[[FOOID:[0-9]+]] op5=[[FOOID]]/> +; op0=X op4=foo op5=foo +; CHECK-DAG: <PERMODULE {{.*}} op0=7 op1=1 {{.*}} op3=1 op4=4 op5=4/> ; Function Y contains call to func2, and ensures we don't incorrectly add ; a reference to it when reached while earlier analyzing the phi using its ; return value: -; CHECK-DAG: <PERMODULE {{.*}} op0=[[YID:[0-9]+]] op1=8 {{.*}} op3=0 op4=[[FUNC2ID:[0-9]+]]/> +; op0=Y op4=func2 +; CHECK-DAG: <PERMODULE {{.*}} op0=8 op1=8 {{.*}} op3=0 op4=3/> ; Function Z contains call to func2, and ensures we don't incorrectly add ; a reference to it when reached while analyzing subsequent use of its return ; value: -; CHECK-DAG: <PERMODULE {{.*}} op0=[[ZID:[0-9]+]] op1=3 {{.*}} op3=0 op4=[[FUNC2ID:[0-9]+]]/> +; op0=Z op4=func2 +; CHECK-DAG: <PERMODULE {{.*}} op0=9 op1=3 {{.*}} op3=0 op4=3/> ; Variable bar initialization contains address reference to func: -; CHECK-DAG: <PERMODULE_GLOBALVAR_INIT_REFS {{.*}} op0=[[BARID:[0-9]+]] op1=0 op2=[[FUNCID]]/> +; op0=bar op2=func +; CHECK-DAG: <PERMODULE_GLOBALVAR_INIT_REFS {{.*}} op0=0 op1=0 op2=2/> ; CHECK: </GLOBALVAL_SUMMARY_BLOCK> -; CHECK-NEXT: <VALUE_SYMTAB -; CHECK-DAG: <ENTRY {{.*}} op0=[[BARID]] {{.*}} record string = 'bar' -; CHECK-DAG: <ENTRY {{.*}} op0=[[FUNCID]] {{.*}} record string = 'func' -; CHECK-DAG: <ENTRY {{.*}} op0=[[FOOID]] {{.*}} record string = 'foo' -; CHECK-DAG: <FNENTRY {{.*}} op0=[[MAINID]] {{.*}} record string = 'main' -; CHECK-DAG: <FNENTRY {{.*}} op0=[[WID]] {{.*}} record string = 'W' -; CHECK-DAG: <FNENTRY {{.*}} op0=[[XID]] {{.*}} record string = 'X' -; CHECK-DAG: <FNENTRY {{.*}} op0=[[YID]] {{.*}} record string = 'Y' -; CHECK-DAG: <FNENTRY {{.*}} op0=[[ZID]] {{.*}} record string = 'Z' -; CHECK-DAG: <ENTRY {{.*}} op0=[[FUNC2ID]] {{.*}} record string = 'func2' -; CHECK-DAG: <ENTRY {{.*}} op0=[[FUNC3ID]] {{.*}} record string = 'func3' -; CHECK-DAG: <ENTRY {{.*}} op0=[[GLOBALVARID]] {{.*}} record string = 'globalvar' -; CHECK: </VALUE_SYMTAB> +; CHECK: <STRTAB_BLOCK +; CHECK-NEXT: blob data = 'barglobalvarfuncfunc2foofunc3WXYZllvm.ctpop.i8main' ; ModuleID = 'thinlto-function-summary-refgraph.ll' target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/test/Bitcode/thinlto-function-summary.ll b/test/Bitcode/thinlto-function-summary.ll index ff61b7713f0f4..6b8bfbb292cd2 100644 --- a/test/Bitcode/thinlto-function-summary.ll +++ b/test/Bitcode/thinlto-function-summary.ll @@ -2,9 +2,19 @@ ; RUN: opt -passes=name-anon-globals -module-summary < %s | llvm-bcanalyzer -dump | FileCheck %s -check-prefix=BC ; Check for summary block/records. -; Check the value ids in the summary entries against the -; same in the ValueSumbolTable, to ensure the ordering is stable. -; Also check the linkage field on the summary entries. +; BC: <SOURCE_FILENAME +; "h" +; BC-NEXT: <GLOBALVAR {{.*}} op0=0 op1=1 +; "foo" +; BC-NEXT: <FUNCTION op0=1 op1=3 +; "bar" +; BC-NEXT: <FUNCTION op0=4 op1=3 +; "anon.[32 chars].0" +; BC-NEXT: <FUNCTION op0=7 op1=39 +; "variadic" +; BC-NEXT: <FUNCTION op0=46 op1=8 +; "f" +; BC-NEXT: <ALIAS op0=54 op1=1 ; BC: <GLOBALVAL_SUMMARY_BLOCK ; BC-NEXT: <VERSION ; BC-NEXT: <PERMODULE {{.*}} op0=1 op1=0 @@ -13,13 +23,8 @@ ; BC-NEXT: <PERMODULE {{.*}} op0=4 op1=16 ; BC-NEXT: <ALIAS {{.*}} op0=5 op1=0 op2=3 ; BC-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; BC-NEXT: <VALUE_SYMTAB -; BC-NEXT: <FNENTRY {{.*}} op0=4 {{.*}}> record string = 'variadic' -; BC-NEXT: <FNENTRY {{.*}} op0=1 {{.*}}> record string = 'foo' -; BC-NEXT: <FNENTRY {{.*}} op0=2 {{.*}}> record string = 'bar' -; BC-NEXT: <ENTRY {{.*}} op0=5 {{.*}}> record string = 'f' -; BC-NEXT: <ENTRY {{.*}} record string = 'h' -; BC-NEXT: <FNENTRY {{.*}} op0=3 {{.*}}> record string = 'anon. +; BC: <STRTAB_BLOCK +; BC-NEXT: blob data = 'hfoobaranon.{{................................}}.0variadicf' ; RUN: opt -name-anon-globals -module-summary < %s | llvm-dis | FileCheck %s diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll index e40199d82c9dd..71ea9d54f647a 100644 --- a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -154,3 +154,19 @@ continue: define fp128 @test_quad_dump() { ret fp128 0xL00000000000000004000000000000000 } + +; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(p0) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg2; (in function: vector_of_pointers_extractelement) +; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement +; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement: +define void @vector_of_pointers_extractelement() { + %dummy = extractelement <2 x i16*> undef, i32 0 + ret void +} + +; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(<2 x p0>) = G_INSERT_VECTOR_ELT %vreg1, %vreg2, %vreg3; (in function: vector_of_pointers_insertelement +; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement +; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement: +define void @vector_of_pointers_insertelement() { + %dummy = insertelement <2 x i16*> undef, i16* null, i32 0 + ret void +} diff --git a/test/CodeGen/AArch64/arm64-abi.ll b/test/CodeGen/AArch64/arm64-abi.ll index 6cf0ab35b9b52..5be84b7d493b7 100644 --- a/test/CodeGen/AArch64/arm64-abi.ll +++ b/test/CodeGen/AArch64/arm64-abi.ll @@ -43,9 +43,7 @@ entry: ; CHECK-LABEL: i8i16caller ; The 8th, 9th, 10th and 11th arguments are passed at sp, sp+2, sp+4, sp+5. ; They are i8, i16, i8 and i8. -; CHECK-DAG: strb {{w[0-9]+}}, [sp, #5] -; CHECK-DAG: strb {{w[0-9]+}}, [sp, #4] -; CHECK-DAG: strh {{w[0-9]+}}, [sp, #2] +; CHECK-DAG: stur {{w[0-9]+}}, [sp, #2] ; CHECK-DAG: strb {{w[0-9]+}}, [sp] ; CHECK: bl ; FAST-LABEL: i8i16caller diff --git a/test/CodeGen/AArch64/nonlazybind.ll b/test/CodeGen/AArch64/nonlazybind.ll new file mode 100644 index 0000000000000..4355d45fe84da --- /dev/null +++ b/test/CodeGen/AArch64/nonlazybind.ll @@ -0,0 +1,40 @@ +; RUN: llc -mtriple=aarch64-apple-ios %s -o - -aarch64-enable-nonlazybind | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-ios %s -o - | FileCheck %s --check-prefix=CHECK-NORMAL + +define void @local() nonlazybind { + ret void +} + +declare void @nonlocal() nonlazybind + +define void @test_laziness() { +; CHECK-LABEL: test_laziness: + +; CHECK: bl _local + +; CHECK: adrp x[[TMP:[0-9]+]], _nonlocal@GOTPAGE +; CHECK: ldr [[FUNC:x[0-9]+]], [x[[TMP]], _nonlocal@GOTPAGEOFF] +; CHECK: blr [[FUNC]] + +; CHECK-NORMAL-LABEL: test_laziness: +; CHECK-NORMAL: bl _local +; CHEKC-NORMAL: bl _nonlocal + + call void @local() + call void @nonlocal() + ret void +} + +define void @test_laziness_tail() { +; CHECK-LABEL: test_laziness_tail: + +; CHECK: adrp x[[TMP:[0-9]+]], _nonlocal@GOTPAGE +; CHECK: ldr [[FUNC:x[0-9]+]], [x[[TMP]], _nonlocal@GOTPAGEOFF] +; CHECK: br [[FUNC]] + +; CHECK-NORMAL-LABEL: test_laziness_tail: +; CHECK-NORMAL: b _nonlocal + + tail call void @nonlocal() + ret void +} diff --git a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll index 95a206e1dd00d..8e5a512dd3c91 100644 --- a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll +++ b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll @@ -3,333 +3,358 @@ ; GCN-LABEL: @add_i3( ; SI: %r = add i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @add_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @add_i3(i3 %a, i3 %b) { %r = add i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nsw_i3( ; SI: %r = add nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @add_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @add_nsw_i3(i3 %a, i3 %b) { %r = add nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_i3( ; SI: %r = add nuw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @add_nuw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @add_nuw_i3(i3 %a, i3 %b) { %r = add nuw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_nsw_i3( ; SI: %r = add nuw nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @add_nuw_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @add_nuw_nsw_i3(i3 %a, i3 %b) { %r = add nuw nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_i3( ; SI: %r = sub i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @sub_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @sub_i3(i3 %a, i3 %b) { %r = sub i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nsw_i3( ; SI: %r = sub nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @sub_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @sub_nsw_i3(i3 %a, i3 %b) { %r = sub nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_i3( ; SI: %r = sub nuw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @sub_nuw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @sub_nuw_i3(i3 %a, i3 %b) { %r = sub nuw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_nsw_i3( ; SI: %r = sub nuw nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @sub_nuw_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @sub_nuw_nsw_i3(i3 %a, i3 %b) { %r = sub nuw nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_i3( ; SI: %r = mul i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @mul_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @mul_i3(i3 %a, i3 %b) { %r = mul i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nsw_i3( ; SI: %r = mul nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @mul_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @mul_nsw_i3(i3 %a, i3 %b) { %r = mul nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_i3( ; SI: %r = mul nuw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @mul_nuw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @mul_nuw_i3(i3 %a, i3 %b) { %r = mul nuw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_nsw_i3( ; SI: %r = mul nuw nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @mul_nuw_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @mul_nuw_nsw_i3(i3 %a, i3 %b) { %r = mul nuw nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @urem_i3( ; SI: %r = urem i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = urem i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @urem_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @urem_i3(i3 %a, i3 %b) { %r = urem i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @srem_i3( ; SI: %r = srem i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = sext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = srem i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @srem_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @srem_i3(i3 %a, i3 %b) { %r = srem i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_i3( ; SI: %r = shl i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @shl_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @shl_i3(i3 %a, i3 %b) { %r = shl i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nsw_i3( ; SI: %r = shl nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @shl_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @shl_nsw_i3(i3 %a, i3 %b) { %r = shl nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_i3( ; SI: %r = shl nuw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @shl_nuw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @shl_nuw_i3(i3 %a, i3 %b) { %r = shl nuw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_nsw_i3( ; SI: %r = shl nuw nsw i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @shl_nuw_nsw_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @shl_nuw_nsw_i3(i3 %a, i3 %b) { %r = shl nuw nsw i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_i3( ; SI: %r = lshr i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = lshr i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @lshr_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @lshr_i3(i3 %a, i3 %b) { %r = lshr i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_exact_i3( ; SI: %r = lshr exact i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = lshr exact i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @lshr_exact_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @lshr_exact_i3(i3 %a, i3 %b) { %r = lshr exact i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_i3( ; SI: %r = ashr i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = sext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = ashr i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @ashr_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @ashr_i3(i3 %a, i3 %b) { %r = ashr i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_exact_i3( ; SI: %r = ashr exact i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = sext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = ashr exact i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @ashr_exact_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @ashr_exact_i3(i3 %a, i3 %b) { %r = ashr exact i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @and_i3( ; SI: %r = and i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = and i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @and_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @and_i3(i3 %a, i3 %b) { %r = and i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @or_i3( ; SI: %r = or i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = or i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @or_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @or_i3(i3 %a, i3 %b) { %r = or i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @xor_i3( ; SI: %r = xor i3 %a, %b -; SI-NEXT: ret i3 %r +; SI-NEXT: store volatile i3 %r ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = xor i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @xor_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @xor_i3(i3 %a, i3 %b) { %r = xor i3 %a, %b - ret i3 %r + store volatile i3 %r, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_eq_i3( ; SI: %cmp = icmp eq i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp eq i32 %[[A_32_0]], %[[B_32_0]] @@ -337,17 +362,18 @@ define i3 @xor_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_eq_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_eq_i3(i3 %a, i3 %b) { %cmp = icmp eq i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ne_i3( ; SI: %cmp = icmp ne i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ne i32 %[[A_32_0]], %[[B_32_0]] @@ -355,17 +381,18 @@ define i3 @select_eq_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_ne_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_ne_i3(i3 %a, i3 %b) { %cmp = icmp ne i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ugt_i3( ; SI: %cmp = icmp ugt i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ugt i32 %[[A_32_0]], %[[B_32_0]] @@ -373,17 +400,18 @@ define i3 @select_ne_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_ugt_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_ugt_i3(i3 %a, i3 %b) { %cmp = icmp ugt i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_uge_i3( ; SI: %cmp = icmp uge i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp uge i32 %[[A_32_0]], %[[B_32_0]] @@ -391,17 +419,18 @@ define i3 @select_ugt_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_uge_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_uge_i3(i3 %a, i3 %b) { %cmp = icmp uge i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ult_i3( ; SI: %cmp = icmp ult i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ult i32 %[[A_32_0]], %[[B_32_0]] @@ -409,17 +438,18 @@ define i3 @select_uge_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_ult_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_ult_i3(i3 %a, i3 %b) { %cmp = icmp ult i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ule_i3( ; SI: %cmp = icmp ule i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ule i32 %[[A_32_0]], %[[B_32_0]] @@ -427,17 +457,18 @@ define i3 @select_ult_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_ule_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_ule_i3(i3 %a, i3 %b) { %cmp = icmp ule i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sgt_i3( ; SI: %cmp = icmp sgt i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sgt i32 %[[A_32_0]], %[[B_32_0]] @@ -445,17 +476,18 @@ define i3 @select_ule_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_sgt_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_sgt_i3(i3 %a, i3 %b) { %cmp = icmp sgt i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sge_i3( ; SI: %cmp = icmp sge i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sge i32 %[[A_32_0]], %[[B_32_0]] @@ -463,17 +495,18 @@ define i3 @select_sgt_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_sge_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_sge_i3(i3 %a, i3 %b) { %cmp = icmp sge i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_slt_i3( ; SI: %cmp = icmp slt i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp slt i32 %[[A_32_0]], %[[B_32_0]] @@ -481,17 +514,18 @@ define i3 @select_sge_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_slt_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_slt_i3(i3 %a, i3 %b) { %cmp = icmp slt i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sle_i3( ; SI: %cmp = icmp sle i3 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i3 %a, i3 %b -; SI-NEXT: ret i3 %sel +; SI-NEXT: store volatile i3 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i3 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sle i32 %[[A_32_0]], %[[B_32_0]] @@ -499,384 +533,415 @@ define i3 @select_slt_i3(i3 %a, i3 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i3 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_3:[0-9]+]] = trunc i32 %[[SEL_32]] to i3 -; VI-NEXT: ret i3 %[[SEL_3]] -define i3 @select_sle_i3(i3 %a, i3 %b) { +; VI-NEXT: store volatile i3 %[[SEL_3]] +define amdgpu_kernel void @select_sle_i3(i3 %a, i3 %b) { %cmp = icmp sle i3 %a, %b %sel = select i1 %cmp, i3 %a, i3 %b - ret i3 %sel + store volatile i3 %sel, i3 addrspace(1)* undef + ret void } declare i3 @llvm.bitreverse.i3(i3) ; GCN-LABEL: @bitreverse_i3( ; SI: %brev = call i3 @llvm.bitreverse.i3(i3 %a) -; SI-NEXT: ret i3 %brev +; SI-NEXT: store volatile i3 %brev ; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = call i32 @llvm.bitreverse.i32(i32 %[[A_32]]) ; VI-NEXT: %[[S_32:[0-9]+]] = lshr i32 %[[R_32]], 29 ; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[S_32]] to i3 -; VI-NEXT: ret i3 %[[R_3]] -define i3 @bitreverse_i3(i3 %a) { +; VI-NEXT: store volatile i3 %[[R_3]] +define amdgpu_kernel void @bitreverse_i3(i3 %a) { %brev = call i3 @llvm.bitreverse.i3(i3 %a) - ret i3 %brev + store volatile i3 %brev, i3 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_i16( ; SI: %r = add i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @add_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @add_i16(i16 %a, i16 %b) { %r = add i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @constant_add_i16( -; VI: ret i16 3 -define i16 @constant_add_i16() { +; VI: store volatile i16 3 +define amdgpu_kernel void @constant_add_i16() { %r = add i16 1, 2 - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @constant_add_nsw_i16( -; VI: ret i16 3 -define i16 @constant_add_nsw_i16() { +; VI: store volatile i16 3 +define amdgpu_kernel void @constant_add_nsw_i16() { %r = add nsw i16 1, 2 - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @constant_add_nuw_i16( -; VI: ret i16 3 -define i16 @constant_add_nuw_i16() { +; VI: store volatile i16 3 +define amdgpu_kernel void @constant_add_nuw_i16() { %r = add nsw i16 1, 2 - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nsw_i16( ; SI: %r = add nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @add_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @add_nsw_i16(i16 %a, i16 %b) { %r = add nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_i16( ; SI: %r = add nuw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @add_nuw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @add_nuw_i16(i16 %a, i16 %b) { %r = add nuw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_nsw_i16( ; SI: %r = add nuw nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @add_nuw_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @add_nuw_nsw_i16(i16 %a, i16 %b) { %r = add nuw nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_i16( ; SI: %r = sub i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @sub_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @sub_i16(i16 %a, i16 %b) { %r = sub i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nsw_i16( ; SI: %r = sub nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @sub_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @sub_nsw_i16(i16 %a, i16 %b) { %r = sub nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_i16( ; SI: %r = sub nuw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @sub_nuw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @sub_nuw_i16(i16 %a, i16 %b) { %r = sub nuw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_nsw_i16( ; SI: %r = sub nuw nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @sub_nuw_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @sub_nuw_nsw_i16(i16 %a, i16 %b) { %r = sub nuw nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_i16( ; SI: %r = mul i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @mul_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @mul_i16(i16 %a, i16 %b) { %r = mul i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nsw_i16( ; SI: %r = mul nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @mul_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @mul_nsw_i16(i16 %a, i16 %b) { %r = mul nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_i16( ; SI: %r = mul nuw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @mul_nuw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @mul_nuw_i16(i16 %a, i16 %b) { %r = mul nuw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_nsw_i16( ; SI: %r = mul nuw nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @mul_nuw_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @mul_nuw_nsw_i16(i16 %a, i16 %b) { %r = mul nuw nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @urem_i16( ; SI: %r = urem i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = urem i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @urem_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @urem_i16(i16 %a, i16 %b) { %r = urem i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @srem_i16( ; SI: %r = srem i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = sext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = srem i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @srem_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @srem_i16(i16 %a, i16 %b) { %r = srem i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_i16( ; SI: %r = shl i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @shl_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @shl_i16(i16 %a, i16 %b) { %r = shl i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nsw_i16( ; SI: %r = shl nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @shl_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @shl_nsw_i16(i16 %a, i16 %b) { %r = shl nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_i16( ; SI: %r = shl nuw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @shl_nuw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @shl_nuw_i16(i16 %a, i16 %b) { %r = shl nuw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_nsw_i16( ; SI: %r = shl nuw nsw i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @shl_nuw_nsw_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @shl_nuw_nsw_i16(i16 %a, i16 %b) { %r = shl nuw nsw i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_i16( ; SI: %r = lshr i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = lshr i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @lshr_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @lshr_i16(i16 %a, i16 %b) { %r = lshr i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_exact_i16( ; SI: %r = lshr exact i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = lshr exact i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @lshr_exact_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @lshr_exact_i16(i16 %a, i16 %b) { %r = lshr exact i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_i16( ; SI: %r = ashr i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = sext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = ashr i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @ashr_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @ashr_i16(i16 %a, i16 %b) { %r = ashr i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_exact_i16( ; SI: %r = ashr exact i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = sext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = ashr exact i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @ashr_exact_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @ashr_exact_i16(i16 %a, i16 %b) { %r = ashr exact i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @constant_lshr_exact_i16( -; VI: ret i16 2 -define i16 @constant_lshr_exact_i16(i16 %a, i16 %b) { +; VI: store volatile i16 2 +define amdgpu_kernel void @constant_lshr_exact_i16(i16 %a, i16 %b) { %r = lshr exact i16 4, 1 - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @and_i16( ; SI: %r = and i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = and i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @and_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @and_i16(i16 %a, i16 %b) { %r = and i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @or_i16( ; SI: %r = or i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = or i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @or_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @or_i16(i16 %a, i16 %b) { %r = or i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @xor_i16( ; SI: %r = xor i16 %a, %b -; SI-NEXT: ret i16 %r +; SI-NEXT: store volatile i16 %r ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = xor i32 %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @xor_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @xor_i16(i16 %a, i16 %b) { %r = xor i16 %a, %b - ret i16 %r + store volatile i16 %r, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_eq_i16( ; SI: %cmp = icmp eq i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp eq i32 %[[A_32_0]], %[[B_32_0]] @@ -884,17 +949,18 @@ define i16 @xor_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_eq_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_eq_i16(i16 %a, i16 %b) { %cmp = icmp eq i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ne_i16( ; SI: %cmp = icmp ne i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ne i32 %[[A_32_0]], %[[B_32_0]] @@ -902,17 +968,18 @@ define i16 @select_eq_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_ne_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_ne_i16(i16 %a, i16 %b) { %cmp = icmp ne i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ugt_i16( ; SI: %cmp = icmp ugt i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ugt i32 %[[A_32_0]], %[[B_32_0]] @@ -920,17 +987,18 @@ define i16 @select_ne_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_ugt_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_ugt_i16(i16 %a, i16 %b) { %cmp = icmp ugt i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_uge_i16( ; SI: %cmp = icmp uge i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp uge i32 %[[A_32_0]], %[[B_32_0]] @@ -938,17 +1006,18 @@ define i16 @select_ugt_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_uge_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_uge_i16(i16 %a, i16 %b) { %cmp = icmp uge i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ult_i16( ; SI: %cmp = icmp ult i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ult i32 %[[A_32_0]], %[[B_32_0]] @@ -956,17 +1025,18 @@ define i16 @select_uge_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_ult_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_ult_i16(i16 %a, i16 %b) { %cmp = icmp ult i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ule_i16( ; SI: %cmp = icmp ule i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ule i32 %[[A_32_0]], %[[B_32_0]] @@ -974,17 +1044,18 @@ define i16 @select_ult_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_ule_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_ule_i16(i16 %a, i16 %b) { %cmp = icmp ule i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sgt_i16( ; SI: %cmp = icmp sgt i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sgt i32 %[[A_32_0]], %[[B_32_0]] @@ -992,17 +1063,18 @@ define i16 @select_ule_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_sgt_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_sgt_i16(i16 %a, i16 %b) { %cmp = icmp sgt i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sge_i16( ; SI: %cmp = icmp sge i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sge i32 %[[A_32_0]], %[[B_32_0]] @@ -1010,17 +1082,18 @@ define i16 @select_sgt_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_sge_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_sge_i16(i16 %a, i16 %b) { %cmp = icmp sge i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_slt_i16( ; SI: %cmp = icmp slt i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp slt i32 %[[A_32_0]], %[[B_32_0]] @@ -1028,17 +1101,18 @@ define i16 @select_sge_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_slt_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_slt_i16(i16 %a, i16 %b) { %cmp = icmp slt i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sle_i16( ; SI: %cmp = icmp sle i16 %a, %b ; SI-NEXT: %sel = select i1 %cmp, i16 %a, i16 %b -; SI-NEXT: ret i16 %sel +; SI-NEXT: store volatile i16 %sel ; VI: %[[A_32_0:[0-9]+]] = sext i16 %a to i32 ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sle i32 %[[A_32_0]], %[[B_32_0]] @@ -1046,356 +1120,384 @@ define i16 @select_slt_i16(i16 %a, i16 %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext i16 %b to i32 ; VI-NEXT: %[[SEL_32:[0-9]+]] = select i1 %[[CMP]], i32 %[[A_32_1]], i32 %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc i32 %[[SEL_32]] to i16 -; VI-NEXT: ret i16 %[[SEL_16]] -define i16 @select_sle_i16(i16 %a, i16 %b) { +; VI-NEXT: store volatile i16 %[[SEL_16]] +define amdgpu_kernel void @select_sle_i16(i16 %a, i16 %b) { %cmp = icmp sle i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b - ret i16 %sel + store volatile i16 %sel, i16 addrspace(1)* undef + ret void } declare i16 @llvm.bitreverse.i16(i16) + ; GCN-LABEL: @bitreverse_i16( ; SI: %brev = call i16 @llvm.bitreverse.i16(i16 %a) -; SI-NEXT: ret i16 %brev +; SI-NEXT: store volatile i16 %brev ; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32 ; VI-NEXT: %[[R_32:[0-9]+]] = call i32 @llvm.bitreverse.i32(i32 %[[A_32]]) ; VI-NEXT: %[[S_32:[0-9]+]] = lshr i32 %[[R_32]], 16 ; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[S_32]] to i16 -; VI-NEXT: ret i16 %[[R_16]] -define i16 @bitreverse_i16(i16 %a) { +; VI-NEXT: store volatile i16 %[[R_16]] +define amdgpu_kernel void @bitreverse_i16(i16 %a) { %brev = call i16 @llvm.bitreverse.i16(i16 %a) - ret i16 %brev + store volatile i16 %brev, i16 addrspace(1)* undef + ret void } ; GCN-LABEL: @add_3xi15( ; SI: %r = add <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @add_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @add_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = add <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nsw_3xi15( ; SI: %r = add nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @add_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @add_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = add nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_3xi15( ; SI: %r = add nuw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @add_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @add_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = add nuw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_nsw_3xi15( ; SI: %r = add nuw nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @add_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @add_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = add nuw nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_3xi15( ; SI: %r = sub <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @sub_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @sub_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = sub <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nsw_3xi15( ; SI: %r = sub nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @sub_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @sub_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = sub nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_3xi15( ; SI: %r = sub nuw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @sub_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @sub_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = sub nuw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_nsw_3xi15( ; SI: %r = sub nuw nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @sub_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @sub_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = sub nuw nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_3xi15( ; SI: %r = mul <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @mul_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @mul_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = mul <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nsw_3xi15( ; SI: %r = mul nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @mul_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @mul_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = mul nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_3xi15( ; SI: %r = mul nuw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @mul_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @mul_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = mul nuw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_nsw_3xi15( ; SI: %r = mul nuw nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @mul_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @mul_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = mul nuw nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @urem_3xi15( ; SI: %r = urem <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = urem <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @urem_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @urem_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = urem <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @srem_3xi15( ; SI: %r = srem <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = sext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = srem <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @srem_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @srem_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = srem <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_3xi15( ; SI: %r = shl <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @shl_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @shl_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = shl <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nsw_3xi15( ; SI: %r = shl nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @shl_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @shl_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = shl nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_3xi15( ; SI: %r = shl nuw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @shl_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @shl_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = shl nuw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_nsw_3xi15( ; SI: %r = shl nuw nsw <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @shl_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @shl_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = shl nuw nsw <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_3xi15( ; SI: %r = lshr <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = lshr <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @lshr_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @lshr_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = lshr <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_exact_3xi15( ; SI: %r = lshr exact <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = lshr exact <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @lshr_exact_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @lshr_exact_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = lshr exact <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_3xi15( ; SI: %r = ashr <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = sext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = ashr <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @ashr_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @ashr_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = ashr <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_exact_3xi15( ; SI: %r = ashr exact <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = sext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = ashr exact <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @ashr_exact_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @ashr_exact_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = ashr exact <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @and_3xi15( ; SI: %r = and <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = and <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @and_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @and_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = and <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @or_3xi15( ; SI: %r = or <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = or <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @or_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @or_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = or <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @xor_3xi15( ; SI: %r = xor <3 x i15> %a, %b -; SI-NEXT: ret <3 x i15> %r +; SI-NEXT: store volatile <3 x i15> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = xor <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @xor_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @xor_3xi15(<3 x i15> %a, <3 x i15> %b) { %r = xor <3 x i15> %a, %b - ret <3 x i15> %r + store volatile <3 x i15> %r, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_eq_3xi15( ; SI: %cmp = icmp eq <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp eq <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1403,17 +1505,18 @@ define <3 x i15> @xor_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_eq_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_eq_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp eq <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ne_3xi15( ; SI: %cmp = icmp ne <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ne <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1421,17 +1524,18 @@ define <3 x i15> @select_eq_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_ne_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_ne_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp ne <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ugt_3xi15( ; SI: %cmp = icmp ugt <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ugt <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1439,17 +1543,18 @@ define <3 x i15> @select_ne_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_ugt_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_ugt_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp ugt <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_uge_3xi15( ; SI: %cmp = icmp uge <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp uge <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1457,17 +1562,18 @@ define <3 x i15> @select_ugt_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_uge_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_uge_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp uge <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ult_3xi15( ; SI: %cmp = icmp ult <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ult <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1475,17 +1581,18 @@ define <3 x i15> @select_uge_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_ult_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_ult_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp ult <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ule_3xi15( ; SI: %cmp = icmp ule <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ule <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1493,17 +1600,18 @@ define <3 x i15> @select_ult_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_ule_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_ule_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp ule <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sgt_3xi15( ; SI: %cmp = icmp sgt <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sgt <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1511,17 +1619,18 @@ define <3 x i15> @select_ule_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_sgt_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_sgt_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp sgt <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sge_3xi15( ; SI: %cmp = icmp sge <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sge <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1529,17 +1638,18 @@ define <3 x i15> @select_sgt_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_sge_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_sge_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp sge <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_slt_3xi15( ; SI: %cmp = icmp slt <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp slt <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1547,17 +1657,18 @@ define <3 x i15> @select_sge_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_slt_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_slt_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp slt <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sle_3xi15( ; SI: %cmp = icmp sle <3 x i15> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b -; SI-NEXT: ret <3 x i15> %sel +; SI-NEXT: store volatile <3 x i15> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sle <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1565,356 +1676,383 @@ define <3 x i15> @select_slt_3xi15(<3 x i15> %a, <3 x i15> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i15> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_15:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[SEL_15]] -define <3 x i15> @select_sle_3xi15(<3 x i15> %a, <3 x i15> %b) { +; VI-NEXT: store volatile <3 x i15> %[[SEL_15]] +define amdgpu_kernel void @select_sle_3xi15(<3 x i15> %a, <3 x i15> %b) { %cmp = icmp sle <3 x i15> %a, %b %sel = select <3 x i1> %cmp, <3 x i15> %a, <3 x i15> %b - ret <3 x i15> %sel + store volatile <3 x i15> %sel, <3 x i15> addrspace(1)* undef + ret void } declare <3 x i15> @llvm.bitreverse.v3i15(<3 x i15>) ; GCN-LABEL: @bitreverse_3xi15( ; SI: %brev = call <3 x i15> @llvm.bitreverse.v3i15(<3 x i15> %a) -; SI-NEXT: ret <3 x i15> %brev +; SI-NEXT: store volatile <3 x i15> %brev ; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = call <3 x i32> @llvm.bitreverse.v3i32(<3 x i32> %[[A_32]]) ; VI-NEXT: %[[S_32:[0-9]+]] = lshr <3 x i32> %[[R_32]], <i32 17, i32 17, i32 17> ; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[S_32]] to <3 x i15> -; VI-NEXT: ret <3 x i15> %[[R_15]] -define <3 x i15> @bitreverse_3xi15(<3 x i15> %a) { +; VI-NEXT: store volatile <3 x i15> %[[R_15]] +define amdgpu_kernel void @bitreverse_3xi15(<3 x i15> %a) { %brev = call <3 x i15> @llvm.bitreverse.v3i15(<3 x i15> %a) - ret <3 x i15> %brev + store volatile <3 x i15> %brev, <3 x i15> addrspace(1)* undef + ret void } ; GCN-LABEL: @add_3xi16( ; SI: %r = add <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @add_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @add_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = add <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nsw_3xi16( ; SI: %r = add nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @add_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @add_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = add nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_3xi16( ; SI: %r = add nuw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @add_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @add_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = add nuw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @add_nuw_nsw_3xi16( ; SI: %r = add nuw nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @add_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @add_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = add nuw nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_3xi16( ; SI: %r = sub <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @sub_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @sub_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = sub <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nsw_3xi16( ; SI: %r = sub nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @sub_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @sub_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = sub nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_3xi16( ; SI: %r = sub nuw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @sub_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @sub_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = sub nuw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @sub_nuw_nsw_3xi16( ; SI: %r = sub nuw nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @sub_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @sub_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = sub nuw nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_3xi16( ; SI: %r = mul <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @mul_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @mul_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = mul <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nsw_3xi16( ; SI: %r = mul nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @mul_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @mul_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = mul nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_3xi16( ; SI: %r = mul nuw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @mul_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @mul_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = mul nuw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @mul_nuw_nsw_3xi16( ; SI: %r = mul nuw nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @mul_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @mul_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = mul nuw nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @urem_3xi16( ; SI: %r = urem <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = urem <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @urem_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @urem_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = urem <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @srem_3xi16( ; SI: %r = srem <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = sext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = srem <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @srem_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @srem_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = srem <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_3xi16( ; SI: %r = shl <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @shl_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @shl_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = shl <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nsw_3xi16( ; SI: %r = shl nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @shl_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @shl_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = shl nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_3xi16( ; SI: %r = shl nuw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @shl_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @shl_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = shl nuw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @shl_nuw_nsw_3xi16( ; SI: %r = shl nuw nsw <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @shl_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @shl_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = shl nuw nsw <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_3xi16( ; SI: %r = lshr <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = lshr <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @lshr_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @lshr_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = lshr <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @lshr_exact_3xi16( ; SI: %r = lshr exact <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = lshr exact <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @lshr_exact_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @lshr_exact_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = lshr exact <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_3xi16( ; SI: %r = ashr <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = sext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = ashr <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @ashr_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @ashr_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = ashr <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @ashr_exact_3xi16( ; SI: %r = ashr exact <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = sext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = ashr exact <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @ashr_exact_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @ashr_exact_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = ashr exact <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @and_3xi16( ; SI: %r = and <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = and <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @and_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @and_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = and <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @or_3xi16( ; SI: %r = or <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = or <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @or_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @or_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = or <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @xor_3xi16( ; SI: %r = xor <3 x i16> %a, %b -; SI-NEXT: ret <3 x i16> %r +; SI-NEXT: store volatile <3 x i16> %r ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = xor <3 x i32> %[[A_32]], %[[B_32]] ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @xor_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @xor_3xi16(<3 x i16> %a, <3 x i16> %b) { %r = xor <3 x i16> %a, %b - ret <3 x i16> %r + store volatile <3 x i16> %r, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_eq_3xi16( ; SI: %cmp = icmp eq <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp eq <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1922,17 +2060,18 @@ define <3 x i16> @xor_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_eq_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_eq_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp eq <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ne_3xi16( ; SI: %cmp = icmp ne <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ne <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1940,17 +2079,18 @@ define <3 x i16> @select_eq_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_ne_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_ne_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp ne <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ugt_3xi16( ; SI: %cmp = icmp ugt <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ugt <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1958,17 +2098,18 @@ define <3 x i16> @select_ne_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_ugt_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_ugt_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp ugt <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_uge_3xi16( ; SI: %cmp = icmp uge <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp uge <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1976,17 +2117,18 @@ define <3 x i16> @select_ugt_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_uge_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_uge_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp uge <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ult_3xi16( ; SI: %cmp = icmp ult <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ult <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -1994,17 +2136,18 @@ define <3 x i16> @select_uge_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_ult_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_ult_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp ult <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_ule_3xi16( ; SI: %cmp = icmp ule <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp ule <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -2012,17 +2155,18 @@ define <3 x i16> @select_ult_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = zext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_ule_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_ule_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp ule <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sgt_3xi16( ; SI: %cmp = icmp sgt <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sgt <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -2030,17 +2174,18 @@ define <3 x i16> @select_ule_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_sgt_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_sgt_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp sgt <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sge_3xi16( ; SI: %cmp = icmp sge <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sge <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -2048,17 +2193,18 @@ define <3 x i16> @select_sgt_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_sge_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_sge_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp sge <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_slt_3xi16( ; SI: %cmp = icmp slt <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp slt <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -2066,17 +2212,18 @@ define <3 x i16> @select_sge_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_slt_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_slt_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp slt <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } ; GCN-LABEL: @select_sle_3xi16( ; SI: %cmp = icmp sle <3 x i16> %a, %b ; SI-NEXT: %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b -; SI-NEXT: ret <3 x i16> %sel +; SI-NEXT: store volatile <3 x i16> %sel ; VI: %[[A_32_0:[0-9]+]] = sext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[B_32_0:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[CMP:[0-9]+]] = icmp sle <3 x i32> %[[A_32_0]], %[[B_32_0]] @@ -2084,23 +2231,26 @@ define <3 x i16> @select_slt_3xi16(<3 x i16> %a, <3 x i16> %b) { ; VI-NEXT: %[[B_32_1:[0-9]+]] = sext <3 x i16> %b to <3 x i32> ; VI-NEXT: %[[SEL_32:[0-9]+]] = select <3 x i1> %[[CMP]], <3 x i32> %[[A_32_1]], <3 x i32> %[[B_32_1]] ; VI-NEXT: %[[SEL_16:[0-9]+]] = trunc <3 x i32> %[[SEL_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[SEL_16]] -define <3 x i16> @select_sle_3xi16(<3 x i16> %a, <3 x i16> %b) { +; VI-NEXT: store volatile <3 x i16> %[[SEL_16]] +define amdgpu_kernel void @select_sle_3xi16(<3 x i16> %a, <3 x i16> %b) { %cmp = icmp sle <3 x i16> %a, %b %sel = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b - ret <3 x i16> %sel + store volatile <3 x i16> %sel, <3 x i16> addrspace(1)* undef + ret void } declare <3 x i16> @llvm.bitreverse.v3i16(<3 x i16>) + ; GCN-LABEL: @bitreverse_3xi16( ; SI: %brev = call <3 x i16> @llvm.bitreverse.v3i16(<3 x i16> %a) -; SI-NEXT: ret <3 x i16> %brev +; SI-NEXT: store volatile <3 x i16> %brev ; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32> ; VI-NEXT: %[[R_32:[0-9]+]] = call <3 x i32> @llvm.bitreverse.v3i32(<3 x i32> %[[A_32]]) ; VI-NEXT: %[[S_32:[0-9]+]] = lshr <3 x i32> %[[R_32]], <i32 16, i32 16, i32 16> ; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[S_32]] to <3 x i16> -; VI-NEXT: ret <3 x i16> %[[R_16]] -define <3 x i16> @bitreverse_3xi16(<3 x i16> %a) { +; VI-NEXT: store volatile <3 x i16> %[[R_16]] +define amdgpu_kernel void @bitreverse_3xi16(<3 x i16> %a) { %brev = call <3 x i16> @llvm.bitreverse.v3i16(<3 x i16> %a) - ret <3 x i16> %brev + store volatile <3 x i16> %brev, <3 x i16> addrspace(1)* undef + ret void } diff --git a/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll index 88ba310a92cae..a68ddabd95609 100644 --- a/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll +++ b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll @@ -1253,8 +1253,8 @@ define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a, ; NOTES-NEXT: Owner Data size Description ; NOTES-NEXT: AMD 0x00000008 Unknown note type: (0x00000001) ; NOTES-NEXT: AMD 0x0000001b Unknown note type: (0x00000003) -; GFX700: AMD 0x00009171 Unknown note type: (0x0000000a) -; GFX800: AMD 0x00009190 Unknown note type: (0x0000000a) -; GFX900: AMD 0x00009171 Unknown note type: (0x0000000a) +; GFX700: AMD 0x00008b06 Unknown note type: (0x0000000a) +; GFX800: AMD 0x00008e6a Unknown note type: (0x0000000a) +; GFX900: AMD 0x00008b06 Unknown note type: (0x0000000a) ; PARSER: AMDGPU Code Object Metadata Parser Test: PASS diff --git a/test/CodeGen/AMDGPU/exceed-max-sgprs.ll b/test/CodeGen/AMDGPU/exceed-max-sgprs.ll index 40d115bfc0606..207dfce75f162 100644 --- a/test/CodeGen/AMDGPU/exceed-max-sgprs.ll +++ b/test/CodeGen/AMDGPU/exceed-max-sgprs.ll @@ -38,7 +38,7 @@ define amdgpu_kernel void @use_too_many_sgprs_bonaire() #1 { ret void } -; ERROR: error: scalar registers limit of 104 exceeded (106) in use_too_many_sgprs_bonaire_flat_scr +; ERROR: error: scalar registers limit of 104 exceeded (108) in use_too_many_sgprs_bonaire_flat_scr define amdgpu_kernel void @use_too_many_sgprs_bonaire_flat_scr() #1 { call void asm sideeffect "", "~{SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7}" () call void asm sideeffect "", "~{SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}" () diff --git a/test/CodeGen/AMDGPU/flat-scratch-reg.ll b/test/CodeGen/AMDGPU/flat-scratch-reg.ll index 23f40daf3d237..5705cbc99443a 100644 --- a/test/CodeGen/AMDGPU/flat-scratch-reg.ll +++ b/test/CodeGen/AMDGPU/flat-scratch-reg.ll @@ -44,12 +44,12 @@ entry: ; HSA-VI-NOXNACK: is_xnack_enabled = 0 ; HSA-VI-XNACK: is_xnack_enabled = 1 -; CI: ; NumSgprs: 8 -; VI-NOXNACK: ; NumSgprs: 8 -; VI-XNACK: ; NumSgprs: 12 -; HSA-CI: ; NumSgprs: 8 -; HSA-VI-NOXNACK: ; NumSgprs: 8 -; HSA-VI-XNACK: ; NumSgprs: 12 +; CI: ; NumSgprs: 12 +; VI-NOXNACK: ; NumSgprs: 14 +; VI-XNACK: ; NumSgprs: 14 +; HSA-CI: ; NumSgprs: 12 +; HSA-VI-NOXNACK: ; NumSgprs: 14 +; HSA-VI-XNACK: ; NumSgprs: 14 define amdgpu_kernel void @no_vcc_flat() { entry: call void asm sideeffect "", "~{SGPR7},~{FLAT_SCR}"() @@ -60,14 +60,49 @@ entry: ; HSA-NOXNACK: is_xnack_enabled = 0 ; HSA-XNACK: is_xnack_enabled = 1 -; CI: ; NumSgprs: 10 -; VI-NOXNACK: ; NumSgprs: 10 -; VI-XNACK: ; NumSgprs: 12 -; HSA-CI: ; NumSgprs: 10 -; HSA-VI-NOXNACK: ; NumSgprs: 10 -; HSA-VI-XNACK: ; NumSgprs: 12 +; CI: ; NumSgprs: 12 +; VI-NOXNACK: ; NumSgprs: 14 +; VI-XNACK: ; NumSgprs: 14 +; HSA-CI: ; NumSgprs: 12 +; HSA-VI-NOXNACK: ; NumSgprs: 14 +; HSA-VI-XNACK: ; NumSgprs: 14 define amdgpu_kernel void @vcc_flat() { entry: call void asm sideeffect "", "~{SGPR7},~{VCC},~{FLAT_SCR}"() ret void } + +; Make sure used SGPR count for flat_scr is correct when there is no +; scratch usage and implicit flat uses. + +; GCN-LABEL: {{^}}use_flat_scr: +; CI: NumSgprs: 4 +; VI-NOXNACK: NumSgprs: 6 +; VI-XNACK: NumSgprs: 6 +define amdgpu_kernel void @use_flat_scr() #0 { +entry: + call void asm sideeffect "; clobber ", "~{FLAT_SCR}"() + ret void +} + +; GCN-LABEL: {{^}}use_flat_scr_lo: +; CI: NumSgprs: 4 +; VI-NOXNACK: NumSgprs: 6 +; VI-XNACK: NumSgprs: 6 +define amdgpu_kernel void @use_flat_scr_lo() #0 { +entry: + call void asm sideeffect "; clobber ", "~{FLAT_SCR_LO}"() + ret void +} + +; GCN-LABEL: {{^}}use_flat_scr_hi: +; CI: NumSgprs: 4 +; VI-NOXNACK: NumSgprs: 6 +; VI-XNACK: NumSgprs: 6 +define amdgpu_kernel void @use_flat_scr_hi() #0 { +entry: + call void asm sideeffect "; clobber ", "~{FLAT_SCR_HI}"() + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/frame-index-amdgiz.ll b/test/CodeGen/AMDGPU/frame-index-amdgiz.ll new file mode 100644 index 0000000000000..dd46403b68af1 --- /dev/null +++ b/test/CodeGen/AMDGPU/frame-index-amdgiz.ll @@ -0,0 +1,55 @@ +; RUN: llc -verify-machineinstrs < %s | FileCheck %s +; +; The original OpenCL kernel: +; kernel void f(global int *a, int i, int j) { +; int x[100]; +; x[i] = 7; +; a[0] = x[j]; +; } +; clang -cc1 -triple amdgcn---amdgizcl -emit-llvm -o - + +target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" +target triple = "amdgcn---amdgiz" + +define amdgpu_kernel void @f(i32 addrspace(1)* nocapture %a, i32 %i, i32 %j) local_unnamed_addr #0 { +entry: +; CHECK: s_load_dword s2, s[0:1], 0xb +; CHECK: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; CHECK: s_load_dword s0, s[0:1], 0xc +; CHECK: s_mov_b32 s8, SCRATCH_RSRC_DWORD0 +; CHECK: s_mov_b32 s9, SCRATCH_RSRC_DWORD1 +; CHECK: s_mov_b32 s10, -1 +; CHECK: s_waitcnt lgkmcnt(0) +; CHECK: s_lshl_b32 s1, s2, 2 +; CHECK: v_mov_b32_e32 v0, 4 +; CHECK: s_mov_b32 s11, 0xe8f000 +; CHECK: v_add_i32_e32 v1, vcc, s1, v0 +; CHECK: v_mov_b32_e32 v2, 7 +; CHECK: s_lshl_b32 s0, s0, 2 +; CHECK: buffer_store_dword v2, v1, s[8:11], s3 offen +; CHECK: v_add_i32_e32 v0, vcc, s0, v0 +; CHECK: buffer_load_dword v0, v0, s[8:11], s3 offen +; CHECK: s_mov_b32 s7, 0xf000 +; CHECK: s_mov_b32 s6, -1 +; CHECK: s_waitcnt vmcnt(0) +; CHECK: buffer_store_dword v0, off, s[4:7], 0 +; CHECK: s_endpgm + + %x = alloca [100 x i32], align 4, addrspace(5) + %0 = bitcast [100 x i32] addrspace(5)* %x to i8 addrspace(5)* + call void @llvm.lifetime.start.p5i8(i64 400, i8 addrspace(5)* nonnull %0) #0 + %arrayidx = getelementptr inbounds [100 x i32], [100 x i32] addrspace(5)* %x, i32 0, i32 %i + store i32 7, i32 addrspace(5)* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32] addrspace(5)* %x, i32 0, i32 %j + %1 = load i32, i32 addrspace(5)* %arrayidx2, align 4 + store i32 %1, i32 addrspace(1)* %a, align 4 + call void @llvm.lifetime.end.p5i8(i64 400, i8 addrspace(5)* nonnull %0) #0 + ret void +} + +declare void @llvm.lifetime.start.p5i8(i64, i8 addrspace(5)* nocapture) #1 + +declare void @llvm.lifetime.end.p5i8(i64, i8 addrspace(5)* nocapture) #1 + +attributes #0 = { nounwind } +attributes #1 = { argmemonly nounwind } diff --git a/test/CodeGen/AMDGPU/hsa-func-align.ll b/test/CodeGen/AMDGPU/hsa-func-align.ll new file mode 100644 index 0000000000000..a00f5e2669d1d --- /dev/null +++ b/test/CodeGen/AMDGPU/hsa-func-align.ll @@ -0,0 +1,18 @@ +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri < %s | FileCheck -check-prefix=HSA %s +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -filetype=obj < %s | llvm-readobj -symbols -s -sd | FileCheck -check-prefix=ELF %s + +; ELF: Section { +; ELF: Name: .text +; ELF: SHF_ALLOC (0x2) +; ELF: SHF_EXECINSTR (0x4) +; ELF: AddressAlignment: 32 +; ELF: } + +; HSA: .globl simple_align16 +; HSA: .p2align 5 +define void @simple_align16(i32 addrspace(1)* addrspace(2)* %ptr.out) align 32 { +entry: + %out = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %ptr.out + store i32 0, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/AMDGPU/hsa-func.ll b/test/CodeGen/AMDGPU/hsa-func.ll index b4cdd4030d86a..d96b796d44950 100644 --- a/test/CodeGen/AMDGPU/hsa-func.ll +++ b/test/CodeGen/AMDGPU/hsa-func.ll @@ -14,6 +14,7 @@ ; ELF: Flags [ (0x6) ; ELF: SHF_ALLOC (0x2) ; ELF: SHF_EXECINSTR (0x4) +; ELF: AddressAlignment: 4 ; ELF: } ; ELF: SHT_NOTE @@ -26,7 +27,7 @@ ; ELF: Symbol { ; ELF: Name: simple -; ELF: Size: 292 +; ELF: Size: 44 ; ELF: Type: Function (0x2) ; ELF: } @@ -36,12 +37,13 @@ ; HSA-VI: .hsa_code_object_isa 8,0,1,"AMD","AMDGPU" ; HSA-NOT: .amdgpu_hsa_kernel simple +; HSA: .globl simple +; HSA: .p2align 2 ; HSA: {{^}}simple: -; HSA: .amd_kernel_code_t -; HSA: enable_sgpr_private_segment_buffer = 1 -; HSA: enable_sgpr_kernarg_segment_ptr = 1 -; HSA: .end_amd_kernel_code_t -; HSA: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0 +; HSA-NOT: amd_kernel_code_t + +; FIXME: Check this isn't a kernarg load when calling convention implemented. +; XHSA-NOT: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0 ; Make sure we are setting the ATC bit: ; HSA-CI: s_mov_b32 s[[HI:[0-9]]], 0x100f000 @@ -52,9 +54,20 @@ ; HSA: .Lfunc_end0: ; HSA: .size simple, .Lfunc_end0-simple - +; HSA: ; Function info: +; HSA-NOT: COMPUTE_PGM_RSRC2 define void @simple(i32 addrspace(1)* %out) { entry: store i32 0, i32 addrspace(1)* %out ret void } + +; Ignore explicit alignment that is too low. +; HSA: .globl simple_align2 +; HSA: .p2align 2 +define void @simple_align2(i32 addrspace(1)* addrspace(2)* %ptr.out) align 2 { +entry: + %out = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %ptr.out + store i32 0, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/AMDGPU/loop_break.ll b/test/CodeGen/AMDGPU/loop_break.ll index b9df2cb779ad0..84c42e8bd1e06 100644 --- a/test/CodeGen/AMDGPU/loop_break.ll +++ b/test/CodeGen/AMDGPU/loop_break.ll @@ -10,7 +10,7 @@ ; OPT: bb4: ; OPT: load volatile -; OPT: xor i1 %cmp1 +; OPT: %cmp1 = icmp sge i32 %tmp, %load ; OPT: call i64 @llvm.amdgcn.if.break( ; OPT: br label %Flow diff --git a/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll b/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll index 9d0b6b395996b..4bd8bff4809af 100644 --- a/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll +++ b/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll @@ -9,18 +9,19 @@ ; StructurizeCFG. ; IR-LABEL: @multi_divergent_region_exit_ret_ret( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) -; IR: %2 = extractvalue { i1, i64 } %1, 0 -; IR: %3 = extractvalue { i1, i64 } %1, 1 -; IR: br i1 %2, label %LeafBlock1, label %Flow +; IR: %Pivot = icmp sge i32 %tmp16, 2 +; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %Pivot) +; IR: %1 = extractvalue { i1, i64 } %0, 0 +; IR: %2 = extractvalue { i1, i64 } %0, 1 +; IR: br i1 %1, label %LeafBlock1, label %Flow ; IR: Flow: -; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] -; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) -; IR: %7 = extractvalue { i1, i64 } %6, 0 -; IR: %8 = extractvalue { i1, i64 } %6, 1 -; IR: br i1 %7, label %LeafBlock, label %Flow1 +; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] +; IR: %4 = phi i1 [ %SwitchLeaf2, %LeafBlock1 ], [ false, %entry ] +; IR: %5 = call { i1, i64 } @llvm.amdgcn.else(i64 %2) +; IR: %6 = extractvalue { i1, i64 } %5, 0 +; IR: %7 = extractvalue { i1, i64 } %5, 1 +; IR: br i1 %6, label %LeafBlock, label %Flow1 ; IR: LeafBlock: ; IR: br label %Flow1 @@ -29,32 +30,32 @@ ; IR: br label %Flow{{$}} ; IR: Flow2: -; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) -; IR: %13 = extractvalue { i1, i64 } %12, 0 -; IR: %14 = extractvalue { i1, i64 } %12, 1 -; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock +; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ] +; IR: call void @llvm.amdgcn.end.cf(i64 %16) +; IR: [[IF:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if(i1 %8) +; IR: %10 = extractvalue { i1, i64 } [[IF]], 0 +; IR: %11 = extractvalue { i1, i64 } [[IF]], 1 +; IR: br i1 %10, label %exit0, label %UnifiedReturnBlock ; IR: exit0: ; IR: store volatile i32 9, i32 addrspace(1)* undef ; IR: br label %UnifiedReturnBlock ; IR: Flow1: -; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ] -; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ] -; IR: call void @llvm.amdgcn.end.cf(i64 %8) -; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16) -; IR: %18 = extractvalue { i1, i64 } %17, 0 -; IR: %19 = extractvalue { i1, i64 } %17, 1 -; IR: br i1 %18, label %exit1, label %Flow2 +; IR: %12 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %3, %Flow ] +; IR: %13 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ] +; IR: call void @llvm.amdgcn.end.cf(i64 %7) +; IR: %14 = call { i1, i64 } @llvm.amdgcn.if(i1 %13) +; IR: %15 = extractvalue { i1, i64 } %14, 0 +; IR: %16 = extractvalue { i1, i64 } %14, 1 +; IR: br i1 %15, label %exit1, label %Flow2 ; IR: exit1: ; IR: store volatile i32 17, i32 addrspace(3)* undef ; IR: br label %Flow2 ; IR: UnifiedReturnBlock: -; IR: call void @llvm.amdgcn.end.cf(i64 %14) +; IR: call void @llvm.amdgcn.end.cf(i64 %11) ; IR: ret void @@ -64,11 +65,9 @@ ; GCN: s_xor_b64 -; FIXME: Why is this compare essentially repeated? -; GCN: v_cmp_eq_u32_e32 vcc, 1, [[REG:v[0-9]+]] -; GCN-NEXT: v_cmp_ne_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, 1, [[REG]] +; GCN: ; %LeafBlock +; GCN: v_cmp_ne_u32_e32 vcc, 1, [[REG:v[0-9]+]] ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, vcc -; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1 ; GCN: ; %Flow1 ; GCN-NEXT: s_or_b64 exec, exec @@ -126,14 +125,15 @@ exit1: ; preds = %LeafBlock, %LeafBlock1 } ; IR-LABEL: @multi_divergent_region_exit_unreachable_unreachable( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) +; IR: %Pivot = icmp sge i32 %tmp16, 2 +; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %Pivot) -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %5 = call { i1, i64 } @llvm.amdgcn.else(i64 %2) -; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) -; IR: br i1 %13, label %exit0, label %UnifiedUnreachableBlock +; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ] +; IR: call void @llvm.amdgcn.end.cf(i64 %16) +; IR: %9 = call { i1, i64 } @llvm.amdgcn.if(i1 %8) +; IR: br i1 %10, label %exit0, label %UnifiedUnreachableBlock ; IR: UnifiedUnreachableBlock: @@ -181,51 +181,49 @@ exit1: ; preds = %LeafBlock, %LeafBlock1 } ; IR-LABEL: @multi_exit_region_divergent_ret_uniform_ret( -; IR: %divergent.cond0 = icmp slt i32 %tmp16, 2 +; IR: %divergent.cond0 = icmp sge i32 %tmp16, 2 ; IR: llvm.amdgcn.if ; IR: br i1 ; IR: {{^}}Flow: -; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] -; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) -; IR: br i1 %7, label %LeafBlock, label %Flow1 +; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] +; IR: %4 = phi i1 [ %uniform.cond0, %LeafBlock1 ], [ false, %entry ] +; IR: %5 = call { i1, i64 } @llvm.amdgcn.else(i64 %2) +; IR: br i1 %6, label %LeafBlock, label %Flow1 ; IR: {{^}}LeafBlock: -; IR: %divergent.cond1 = icmp eq i32 %tmp16, 1 -; IR: %9 = xor i1 %divergent.cond1, true +; IR: %divergent.cond1 = icmp ne i32 %tmp16, 1 ; IR: br label %Flow1 ; IR: LeafBlock1: -; IR: %uniform.cond0 = icmp eq i32 %arg3, 2 -; IR: %10 = xor i1 %uniform.cond0, true +; IR: %uniform.cond0 = icmp ne i32 %arg3, 2 ; IR: br label %Flow ; IR: Flow2: -; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) -; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock +; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ] +; IR: call void @llvm.amdgcn.end.cf(i64 %16) +; IR: %9 = call { i1, i64 } @llvm.amdgcn.if(i1 %8) +; IR: br i1 %10, label %exit0, label %UnifiedReturnBlock ; IR: exit0: ; IR: store volatile i32 9, i32 addrspace(1)* undef ; IR: br label %UnifiedReturnBlock ; IR: {{^}}Flow1: -; IR: %15 = phi i1 [ %divergent.cond1, %LeafBlock ], [ %4, %Flow ] -; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ] -; IR: call void @llvm.amdgcn.end.cf(i64 %8) -; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16) -; IR: %18 = extractvalue { i1, i64 } %17, 0 -; IR: %19 = extractvalue { i1, i64 } %17, 1 -; IR: br i1 %18, label %exit1, label %Flow2 +; IR: %12 = phi i1 [ %divergent.cond1, %LeafBlock ], [ %3, %Flow ] +; IR: %13 = phi i1 [ %divergent.cond1, %LeafBlock ], [ %4, %Flow ] +; IR: call void @llvm.amdgcn.end.cf(i64 %7) +; IR: %14 = call { i1, i64 } @llvm.amdgcn.if(i1 %13) +; IR: %15 = extractvalue { i1, i64 } %14, 0 +; IR: %16 = extractvalue { i1, i64 } %14, 1 +; IR: br i1 %15, label %exit1, label %Flow2 ; IR: exit1: ; IR: store volatile i32 17, i32 addrspace(3)* undef ; IR: br label %Flow2 ; IR: UnifiedReturnBlock: -; IR: call void @llvm.amdgcn.end.cf(i64 %14) +; IR: call void @llvm.amdgcn.end.cf(i64 %11) ; IR: ret void define amdgpu_kernel void @multi_exit_region_divergent_ret_uniform_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 { entry: @@ -264,17 +262,18 @@ exit1: ; preds = %LeafBlock, %LeafBlock1 } ; IR-LABEL: @multi_exit_region_uniform_ret_divergent_ret( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) -; IR: br i1 %2, label %LeafBlock1, label %Flow +; IR: %Pivot = icmp sge i32 %tmp16, 2 +; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %Pivot) +; IR: br i1 %1, label %LeafBlock1, label %Flow ; IR: Flow: -; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] -; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] +; IR: %4 = phi i1 [ %SwitchLeaf2, %LeafBlock1 ], [ false, %entry ] +; IR: %5 = call { i1, i64 } @llvm.amdgcn.else(i64 %2) -; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) +; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ] +; IR: call void @llvm.amdgcn.end.cf(i64 %16) +; IR: %9 = call { i1, i64 } @llvm.amdgcn.if(i1 %8) define amdgpu_kernel void @multi_exit_region_uniform_ret_divergent_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 { entry: @@ -314,13 +313,13 @@ exit1: ; preds = %LeafBlock, %LeafBlock1 ; IR-LABEL: @multi_divergent_region_exit_ret_ret_return_value( ; IR: Flow2: -; IR: %11 = phi float [ 2.000000e+00, %exit1 ], [ undef, %Flow1 ] -; IR: %12 = phi i1 [ false, %exit1 ], [ %16, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %20) +; IR: %8 = phi float [ 2.000000e+00, %exit1 ], [ undef, %Flow1 ] +; IR: %9 = phi i1 [ false, %exit1 ], [ %13, %Flow1 ] +; IR: call void @llvm.amdgcn.end.cf(i64 %17) ; IR: UnifiedReturnBlock: -; IR: %UnifiedRetVal = phi float [ %11, %Flow2 ], [ 1.000000e+00, %exit0 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %15) +; IR: %UnifiedRetVal = phi float [ %8, %Flow2 ], [ 1.000000e+00, %exit0 ] +; IR: call void @llvm.amdgcn.end.cf(i64 %12) ; IR: ret float %UnifiedRetVal define amdgpu_ps float @multi_divergent_region_exit_ret_ret_return_value(i32 %vgpr) #0 { entry: @@ -387,31 +386,32 @@ exit1: ; preds = %LeafBlock, %LeafBlock1 } ; IR-LABEL: @multi_divergent_region_exit_ret_unreachable( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) +; IR: %Pivot = icmp sge i32 %tmp16, 2 +; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %Pivot) ; IR: Flow: -; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] -; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] +; IR: %4 = phi i1 [ %SwitchLeaf2, %LeafBlock1 ], [ false, %entry ] +; IR: %5 = call { i1, i64 } @llvm.amdgcn.else(i64 %2) ; IR: Flow2: -; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) -; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock +; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ] +; IR: call void @llvm.amdgcn.end.cf(i64 %16) +; IR: %9 = call { i1, i64 } @llvm.amdgcn.if(i1 %8) +; IR: br i1 %10, label %exit0, label %UnifiedReturnBlock ; IR: exit0: ; IR-NEXT: store volatile i32 17, i32 addrspace(3)* undef ; IR-NEXT: br label %UnifiedReturnBlock ; IR: Flow1: -; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ] -; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ] -; IR: call void @llvm.amdgcn.end.cf(i64 %8) -; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16) -; IR: %18 = extractvalue { i1, i64 } %17, 0 -; IR: %19 = extractvalue { i1, i64 } %17, 1 -; IR: br i1 %18, label %exit1, label %Flow2 +; IR: %12 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %3, %Flow ] +; IR: %13 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ] +; IR: call void @llvm.amdgcn.end.cf(i64 %7) +; IR: %14 = call { i1, i64 } @llvm.amdgcn.if(i1 %13) +; IR: %15 = extractvalue { i1, i64 } %14, 0 +; IR: %16 = extractvalue { i1, i64 } %14, 1 +; IR: br i1 %15, label %exit1, label %Flow2 ; IR: exit1: ; IR-NEXT: store volatile i32 9, i32 addrspace(1)* undef @@ -419,7 +419,7 @@ exit1: ; preds = %LeafBlock, %LeafBlock1 ; IR-NEXT: br label %Flow2 ; IR: UnifiedReturnBlock: -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14) +; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %11) ; IR-NEXT: ret void define amdgpu_kernel void @multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 { entry: @@ -475,7 +475,7 @@ exit1: ; preds = %LeafBlock, %LeafBlock1 ; IR-NEXT: br label %Flow2 ; IR: UnifiedReturnBlock: ; preds = %exit0, %Flow2 -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14) +; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %11) ; IR-NEXT: ret void define amdgpu_kernel void @indirect_multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 { entry: @@ -622,15 +622,15 @@ uniform.ret: ; IR-LABEL: @uniform_complex_multi_ret_nest_in_divergent_triangle( ; IR: Flow1: ; preds = %uniform.ret1, %uniform.multi.exit.region -; IR: %8 = phi i1 [ false, %uniform.ret1 ], [ true, %uniform.multi.exit.region ] -; IR: br i1 %8, label %uniform.if, label %Flow2 +; IR: %6 = phi i1 [ false, %uniform.ret1 ], [ true, %uniform.multi.exit.region ] +; IR: br i1 %6, label %uniform.if, label %Flow2 ; IR: Flow: ; preds = %uniform.then, %uniform.if -; IR: %11 = phi i1 [ %10, %uniform.then ], [ %9, %uniform.if ] -; IR: br i1 %11, label %uniform.endif, label %uniform.ret0 +; IR: %7 = phi i1 [ %uniform.cond2, %uniform.then ], [ %uniform.cond1, %uniform.if ] +; IR: br i1 %7, label %uniform.endif, label %uniform.ret0 ; IR: UnifiedReturnBlock: ; preds = %Flow3, %Flow2 -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %6) +; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %5) ; IR-NEXT: ret void define amdgpu_kernel void @uniform_complex_multi_ret_nest_in_divergent_triangle(i32 %arg0) #0 { entry: diff --git a/test/CodeGen/AMDGPU/nested-loop-conditions.ll b/test/CodeGen/AMDGPU/nested-loop-conditions.ll index 672549c8ea636..c0b4eaff60aac 100644 --- a/test/CodeGen/AMDGPU/nested-loop-conditions.ll +++ b/test/CodeGen/AMDGPU/nested-loop-conditions.ll @@ -133,9 +133,9 @@ bb23: ; preds = %bb10 ; IR: Flow1: ; IR-NEXT: %loop.phi = phi i64 [ %loop.phi9, %Flow6 ], [ %phi.broken, %bb14 ] -; IR-NEXT: %13 = phi <4 x i32> [ %29, %Flow6 ], [ undef, %bb14 ] -; IR-NEXT: %14 = phi i32 [ %30, %Flow6 ], [ undef, %bb14 ] -; IR-NEXT: %15 = phi i1 [ %31, %Flow6 ], [ false, %bb14 ] +; IR-NEXT: %13 = phi <4 x i32> [ %28, %Flow6 ], [ undef, %bb14 ] +; IR-NEXT: %14 = phi i32 [ %29, %Flow6 ], [ undef, %bb14 ] +; IR-NEXT: %15 = phi i1 [ %30, %Flow6 ], [ false, %bb14 ] ; IR-NEXT: %16 = phi i1 [ false, %Flow6 ], [ %8, %bb14 ] ; IR-NEXT: %17 = call i64 @llvm.amdgcn.else.break(i64 %11, i64 %loop.phi) ; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %11) @@ -144,9 +144,9 @@ bb23: ; preds = %bb10 ; IR: Flow2: ; IR-NEXT: %loop.phi10 = phi i64 [ %loop.phi11, %Flow5 ], [ %12, %bb16 ] -; IR-NEXT: %19 = phi <4 x i32> [ %29, %Flow5 ], [ undef, %bb16 ] -; IR-NEXT: %20 = phi i32 [ %30, %Flow5 ], [ undef, %bb16 ] -; IR-NEXT: %21 = phi i1 [ %31, %Flow5 ], [ false, %bb16 ] +; IR-NEXT: %19 = phi <4 x i32> [ %28, %Flow5 ], [ undef, %bb16 ] +; IR-NEXT: %20 = phi i32 [ %29, %Flow5 ], [ undef, %bb16 ] +; IR-NEXT: %21 = phi i1 [ %30, %Flow5 ], [ false, %bb16 ] ; IR-NEXT: %22 = phi i1 [ false, %Flow5 ], [ false, %bb16 ] ; IR-NEXT: %23 = phi i1 [ false, %Flow5 ], [ %8, %bb16 ] ; IR-NEXT: %24 = call { i1, i64 } @llvm.amdgcn.if(i1 %23) @@ -156,16 +156,15 @@ bb23: ; preds = %bb10 ; IR: bb21: ; IR: %tmp12 = icmp slt i32 %tmp11, 9 -; IR-NEXT: %27 = xor i1 %tmp12, true -; IR-NEXT: %28 = call i64 @llvm.amdgcn.if.break(i1 %27, i64 %phi.broken) +; IR-NEXT: %27 = call i64 @llvm.amdgcn.if.break(i1 %tmp12, i64 %phi.broken) ; IR-NEXT: br label %Flow3 ; IR: Flow3: ; IR-NEXT: %loop.phi11 = phi i64 [ %phi.broken, %bb21 ], [ %phi.broken, %Flow2 ] -; IR-NEXT: %loop.phi9 = phi i64 [ %28, %bb21 ], [ %loop.phi10, %Flow2 ] -; IR-NEXT: %29 = phi <4 x i32> [ %tmp9, %bb21 ], [ %19, %Flow2 ] -; IR-NEXT: %30 = phi i32 [ %tmp10, %bb21 ], [ %20, %Flow2 ] -; IR-NEXT: %31 = phi i1 [ %27, %bb21 ], [ %21, %Flow2 ] +; IR-NEXT: %loop.phi9 = phi i64 [ %27, %bb21 ], [ %loop.phi10, %Flow2 ] +; IR-NEXT: %28 = phi <4 x i32> [ %tmp9, %bb21 ], [ %19, %Flow2 ] +; IR-NEXT: %29 = phi i32 [ %tmp10, %bb21 ], [ %20, %Flow2 ] +; IR-NEXT: %30 = phi i1 [ %tmp12, %bb21 ], [ %21, %Flow2 ] ; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %26) ; IR-NEXT: br i1 %22, label %bb31.loopexit, label %Flow4 diff --git a/test/CodeGen/AMDGPU/ret_jump.ll b/test/CodeGen/AMDGPU/ret_jump.ll index f2fbacbab82e7..748f98a12c591 100644 --- a/test/CodeGen/AMDGPU/ret_jump.ll +++ b/test/CodeGen/AMDGPU/ret_jump.ll @@ -56,7 +56,7 @@ ret.bb: ; preds = %else, %main_body } ; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable: -; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]] +; GCN: s_cbranch_scc1 [[RET_BB:BB[0-9]+_[0-9]+]] ; GCN: ; BB#{{[0-9]+}}: ; %else ; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc diff --git a/test/CodeGen/AMDGPU/select-vectors.ll b/test/CodeGen/AMDGPU/select-vectors.ll index 8710fc8c7307b..4b00a48211ecf 100644 --- a/test/CodeGen/AMDGPU/select-vectors.ll +++ b/test/CodeGen/AMDGPU/select-vectors.ll @@ -1,69 +1,186 @@ -; RUN: llc -verify-machineinstrs -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -verify-machineinstrs -march=amdgcn < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s ; Test expansion of scalar selects on vectors. ; Evergreen not enabled since it seems to be having problems with doubles. +; GCN-LABEL: {{^}}v_select_v2i8: +; SI: v_cndmask_b32 +; SI-NOT: cndmask -; FUNC-LABEL: {{^}}select_v4i8: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -define amdgpu_kernel void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) nounwind { +; GFX9: v_cndmask_b32 +; GFX9-NOT: cndmask + +; This is worse when i16 is legal and packed is not because +; SelectionDAGBuilder for some reason changes the select type. +; VI: v_cndmask_b32 +; VI: v_cndmask_b32 +define amdgpu_kernel void @v_select_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %a.ptr, <2 x i8> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <2 x i8>, <2 x i8> addrspace(1)* %a.ptr, align 2 + %b = load <2 x i8>, <2 x i8> addrspace(1)* %b.ptr, align 2 + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <2 x i8> %a, <2 x i8> %b + store <2 x i8> %select, <2 x i8> addrspace(1)* %out, align 2 + ret void +} + +; GCN-LABEL: {{^}}v_select_v4i8: +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %a.ptr, <4 x i8> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <4 x i8>, <4 x i8> addrspace(1)* %a.ptr + %b = load <4 x i8>, <4 x i8> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <4 x i8> %a, <4 x i8> %b + store <4 x i8> %select, <4 x i8> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_select_v8i8: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(1)* %a.ptr, <8 x i8> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <8 x i8>, <8 x i8> addrspace(1)* %a.ptr + %b = load <8 x i8>, <8 x i8> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <8 x i8> %a, <8 x i8> %b + store <8 x i8> %select, <8 x i8> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_select_v16i8: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> addrspace(1)* %a.ptr, <16 x i8> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <16 x i8>, <16 x i8> addrspace(1)* %a.ptr + %b = load <16 x i8>, <16 x i8> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <16 x i8> %a, <16 x i8> %b + store <16 x i8> %select, <16 x i8> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}select_v4i8: +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) #0 { %cmp = icmp eq i8 %c, 0 %select = select i1 %cmp, <4 x i8> %a, <4 x i8> %b store <4 x i8> %select, <4 x i8> addrspace(1)* %out, align 4 ret void } -; FUNC-LABEL: {{^}}select_v4i16: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 +; GCN-LABEL: {{^}}select_v2i16: +; GCN: v_cndmask_b32_e32 +; GCN-NOT: v_cndmask_b32 +define amdgpu_kernel void @select_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b, i32 %c) #0 { + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <2 x i16> %a, <2 x i16> %b + store <2 x i16> %select, <2 x i16> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_select_v2i16: +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %a.ptr, <2 x i16> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <2 x i16>, <2 x i16> addrspace(1)* %a.ptr + %b = load <2 x i16>, <2 x i16> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <2 x i16> %a, <2 x i16> %b + store <2 x i16> %select, <2 x i16> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_select_v3i16: ; SI: v_cndmask_b32_e32 -define amdgpu_kernel void @select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b, i32 %c) nounwind { +; SI: cndmask +; SI-NOT: cndmask + +; GFX9: v_cndmask_b32_e32 +; GFX9: cndmask +; GFX9-NOT: cndmask + +; VI: v_cndmask_b32 +; VI: v_cndmask_b32 +; VI: v_cndmask_b32 +define amdgpu_kernel void @v_select_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(1)* %a.ptr, <3 x i16> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <3 x i16>, <3 x i16> addrspace(1)* %a.ptr + %b = load <3 x i16>, <3 x i16> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <3 x i16> %a, <3 x i16> %b + store <3 x i16> %select, <3 x i16> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_select_v4i16: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %a.ptr, <4 x i16> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <4 x i16>, <4 x i16> addrspace(1)* %a.ptr + %b = load <4 x i16>, <4 x i16> addrspace(1)* %b.ptr %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <4 x i16> %a, <4 x i16> %b store <4 x i16> %select, <4 x i16> addrspace(1)* %out, align 4 ret void } +; GCN-LABEL: {{^}}v_select_v8i16: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %a.ptr, <8 x i16> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <8 x i16>, <8 x i16> addrspace(1)* %a.ptr + %b = load <8 x i16>, <8 x i16> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <8 x i16> %a, <8 x i16> %b + store <8 x i16> %select, <8 x i16> addrspace(1)* %out, align 4 + ret void +} + ; FIXME: Expansion with bitwise operations may be better if doing a ; vector select with SGPR inputs. -; FUNC-LABEL: {{^}}s_select_v2i32: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: buffer_store_dwordx2 -define amdgpu_kernel void @s_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b, i32 %c) nounwind { +; GCN-LABEL: {{^}}s_select_v2i32: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: buffer_store_dwordx2 +define amdgpu_kernel void @s_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <2 x i32> %a, <2 x i32> %b store <2 x i32> %select, <2 x i32> addrspace(1)* %out, align 8 ret void } -; FUNC-LABEL: {{^}}s_select_v4i32: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: buffer_store_dwordx4 -define amdgpu_kernel void @s_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, i32 %c) nounwind { +; GCN-LABEL: {{^}}s_select_v4i32: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: buffer_store_dwordx4 +define amdgpu_kernel void @s_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <4 x i32> %a, <4 x i32> %b store <4 x i32> %select, <4 x i32> addrspace(1)* %out, align 16 ret void } -; FUNC-LABEL: {{^}}v_select_v4i32: -; SI: buffer_load_dwordx4 -; SI: v_cmp_lt_u32_e64 vcc, s{{[0-9]+}}, 32 -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}v_select_v4i32: +; GCN: buffer_load_dwordx4 +; GCN: v_cmp_lt_u32_e64 vcc, s{{[0-9]+}}, 32 +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: buffer_store_dwordx4 define amdgpu_kernel void @v_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %cond) #0 { bb: %tmp2 = icmp ult i32 %cond, 32 @@ -73,68 +190,68 @@ bb: ret void } -; FUNC-LABEL: {{^}}select_v8i32: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -define amdgpu_kernel void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b, i32 %c) nounwind { +; GCN-LABEL: {{^}}select_v8i32: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +define amdgpu_kernel void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <8 x i32> %a, <8 x i32> %b store <8 x i32> %select, <8 x i32> addrspace(1)* %out, align 16 ret void } -; FUNC-LABEL: {{^}}s_select_v2f32: -; SI-DAG: s_load_dwordx2 s{{\[}}[[ALO:[0-9]+]]:[[AHI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}} -; SI-DAG: s_load_dwordx2 s{{\[}}[[BLO:[0-9]+]]:[[BHI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0xd|0x34}} +; GCN-LABEL: {{^}}s_select_v2f32: +; GCN-DAG: s_load_dwordx2 s{{\[}}[[ALO:[0-9]+]]:[[AHI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}} +; GCN-DAG: s_load_dwordx2 s{{\[}}[[BLO:[0-9]+]]:[[BHI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0xd|0x34}} -; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[AHI]] -; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[BHI]] -; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[ALO]] -; SI-DAG: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[AHI]] +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[BHI]] +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[ALO]] +; GCN-DAG: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}} -; SI: v_cndmask_b32_e32 -; SI: v_mov_b32_e32 v{{[0-9]+}}, s[[BLO]] -; SI: v_cndmask_b32_e32 -; SI: buffer_store_dwordx2 -define amdgpu_kernel void @s_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b, i32 %c) nounwind { +; GCN: v_cndmask_b32_e32 +; GCN: v_mov_b32_e32 v{{[0-9]+}}, s[[BLO]] +; GCN: v_cndmask_b32_e32 +; GCN: buffer_store_dwordx2 +define amdgpu_kernel void @s_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <2 x float> %a, <2 x float> %b store <2 x float> %select, <2 x float> addrspace(1)* %out, align 16 ret void } -; FUNC-LABEL: {{^}}s_select_v4f32: -; SI: s_load_dwordx4 -; SI: s_load_dwordx4 -; SI: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}} +; GCN-LABEL: {{^}}s_select_v4f32: +; GCN: s_load_dwordx4 +; GCN: s_load_dwordx4 +; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}} -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 -; SI: buffer_store_dwordx4 -define amdgpu_kernel void @s_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b, i32 %c) nounwind { +; GCN: buffer_store_dwordx4 +define amdgpu_kernel void @s_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <4 x float> %a, <4 x float> %b store <4 x float> %select, <4 x float> addrspace(1)* %out, align 16 ret void } -; FUNC-LABEL: {{^}}v_select_v4f32: -; SI: buffer_load_dwordx4 -; SI: v_cmp_lt_u32_e64 vcc, s{{[0-9]+}}, 32 -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; SI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}v_select_v4f32: +; GCN: buffer_load_dwordx4 +; GCN: v_cmp_lt_u32_e64 vcc, s{{[0-9]+}}, 32 +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} +; GCN: buffer_store_dwordx4 define amdgpu_kernel void @v_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in, i32 %cond) #0 { bb: %tmp2 = icmp ult i32 %cond, 32 @@ -144,74 +261,112 @@ bb: ret void } -; FUNC-LABEL: {{^}}select_v8f32: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -define amdgpu_kernel void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b, i32 %c) nounwind { +; GCN-LABEL: {{^}}select_v8f32: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +define amdgpu_kernel void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <8 x float> %a, <8 x float> %b store <8 x float> %select, <8 x float> addrspace(1)* %out, align 16 ret void } -; FUNC-LABEL: {{^}}select_v2f64: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -define amdgpu_kernel void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b, i32 %c) nounwind { +; GCN-LABEL: {{^}}select_v2f64: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +define amdgpu_kernel void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <2 x double> %a, <2 x double> %b store <2 x double> %select, <2 x double> addrspace(1)* %out, align 16 ret void } -; FUNC-LABEL: {{^}}select_v4f64: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -define amdgpu_kernel void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b, i32 %c) nounwind { +; GCN-LABEL: {{^}}select_v4f64: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +define amdgpu_kernel void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <4 x double> %a, <4 x double> %b store <4 x double> %select, <4 x double> addrspace(1)* %out, align 16 ret void } -; FUNC-LABEL: {{^}}select_v8f64: -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -; SI: v_cndmask_b32_e32 -define amdgpu_kernel void @select_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b, i32 %c) nounwind { +; GCN-LABEL: {{^}}select_v8f64: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +define amdgpu_kernel void @select_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b, i32 %c) #0 { %cmp = icmp eq i32 %c, 0 %select = select i1 %cmp, <8 x double> %a, <8 x double> %b store <8 x double> %select, <8 x double> addrspace(1)* %out, align 16 ret void } +; GCN-LABEL: {{^}}v_select_v2f16: +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %a.ptr, <2 x half> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <2 x half>, <2 x half> addrspace(1)* %a.ptr + %b = load <2 x half>, <2 x half> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <2 x half> %a, <2 x half> %b + store <2 x half> %select, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_select_v3f16: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %a.ptr, <3 x half> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <3 x half>, <3 x half> addrspace(1)* %a.ptr + %b = load <3 x half>, <3 x half> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <3 x half> %a, <3 x half> %b + store <3 x half> %select, <3 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_select_v4f16: +; GCN: v_cndmask_b32_e32 +; GCN: v_cndmask_b32_e32 +; GCN-NOT: cndmask +define amdgpu_kernel void @v_select_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %a.ptr, <4 x half> addrspace(1)* %b.ptr, i32 %c) #0 { + %a = load <4 x half>, <4 x half> addrspace(1)* %a.ptr + %b = load <4 x half>, <4 x half> addrspace(1)* %b.ptr + %cmp = icmp eq i32 %c, 0 + %select = select i1 %cmp, <4 x half> %a, <4 x half> %b + store <4 x half> %select, <4 x half> addrspace(1)* %out, align 4 + ret void +} + ; Function Attrs: nounwind readnone declare i32 @llvm.amdgcn.workitem.id.x() #1 diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir index 66d9033a6d7cb..21c774133f896 100644 --- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir +++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir @@ -12,6 +12,15 @@ define void @test_fadd_s32() #0 { ret void } define void @test_fadd_s64() #0 { ret void } + define void @test_sub_s8() { ret void } + define void @test_sub_s16() { ret void } + define void @test_sub_s32() { ret void } + + define void @test_mul_s8() #1 { ret void } + define void @test_mul_s16() #1 { ret void } + define void @test_mul_s32() #1 { ret void } + define void @test_mulv5_s32() { ret void } + define void @test_load_from_stack() { ret void } define void @test_load_f32() #0 { ret void } define void @test_load_f64() #0 { ret void } @@ -24,6 +33,7 @@ define void @test_soft_fp_double() #0 { ret void } attributes #0 = { "target-features"="+vfp2,-neonfp" } + attributes #1 = { "target-features"="+v6" } ... --- name: test_zext_s1 @@ -297,6 +307,237 @@ body: | ; CHECK: BX_RET 14, _, implicit %d0 ... --- +name: test_sub_s8 +# CHECK-LABEL: name: test_sub_s8 +legalized: true +regBankSelected: true +selected: false +# CHECK: selected: true +registers: + - { id: 0, class: gprb } + - { id: 1, class: gprb } + - { id: 2, class: gprb } +# CHECK-DAG: id: 0, class: gpr +# CHECK-DAG: id: 1, class: gpr +# CHECK-DAG: id: 2, class: gpr +body: | + bb.0: + liveins: %r0, %r1 + + %0(s8) = COPY %r0 + ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0 + + %1(s8) = COPY %r1 + ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1 + + %2(s8) = G_SUB %0, %1 + ; CHECK: [[VREGRES:%[0-9]+]] = SUBrr [[VREGX]], [[VREGY]], 14, _, _ + + %r0 = COPY %2(s8) + ; CHECK: %r0 = COPY [[VREGRES]] + + BX_RET 14, _, implicit %r0 + ; CHECK: BX_RET 14, _, implicit %r0 +... +--- +name: test_sub_s16 +# CHECK-LABEL: name: test_sub_s16 +legalized: true +regBankSelected: true +selected: false +# CHECK: selected: true +registers: + - { id: 0, class: gprb } + - { id: 1, class: gprb } + - { id: 2, class: gprb } +# CHECK-DAG: id: 0, class: gpr +# CHECK-DAG: id: 1, class: gpr +# CHECK-DAG: id: 2, class: gpr +body: | + bb.0: + liveins: %r0, %r1 + + %0(s16) = COPY %r0 + ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0 + + %1(s16) = COPY %r1 + ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1 + + %2(s16) = G_SUB %0, %1 + ; CHECK: [[VREGRES:%[0-9]+]] = SUBrr [[VREGX]], [[VREGY]], 14, _, _ + + %r0 = COPY %2(s16) + ; CHECK: %r0 = COPY [[VREGRES]] + + BX_RET 14, _, implicit %r0 + ; CHECK: BX_RET 14, _, implicit %r0 +... +--- +name: test_sub_s32 +# CHECK-LABEL: name: test_sub_s32 +legalized: true +regBankSelected: true +selected: false +# CHECK: selected: true +registers: + - { id: 0, class: gprb } + - { id: 1, class: gprb } + - { id: 2, class: gprb } +# CHECK: id: 0, class: gpr +# CHECK: id: 1, class: gpr +# CHECK: id: 2, class: gpr +body: | + bb.0: + liveins: %r0, %r1 + + %0(s32) = COPY %r0 + ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0 + + %1(s32) = COPY %r1 + ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1 + + %2(s32) = G_SUB %0, %1 + ; CHECK: [[VREGRES:%[0-9]+]] = SUBrr [[VREGX]], [[VREGY]], 14, _, _ + + %r0 = COPY %2(s32) + ; CHECK: %r0 = COPY [[VREGRES]] + + BX_RET 14, _, implicit %r0 + ; CHECK: BX_RET 14, _, implicit %r0 +... +--- +name: test_mul_s8 +# CHECK-LABEL: name: test_mul_s8 +legalized: true +regBankSelected: true +selected: false +# CHECK: selected: true +registers: + - { id: 0, class: gprb } + - { id: 1, class: gprb } + - { id: 2, class: gprb } +# CHECK-DAG: id: 0, class: gprnopc +# CHECK-DAG: id: 1, class: gprnopc +# CHECK-DAG: id: 2, class: gprnopc +body: | + bb.0: + liveins: %r0, %r1 + + %0(s8) = COPY %r0 + ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0 + + %1(s8) = COPY %r1 + ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1 + + %2(s8) = G_MUL %0, %1 + ; CHECK: [[VREGRES:%[0-9]+]] = MUL [[VREGX]], [[VREGY]], 14, _, _ + + %r0 = COPY %2(s8) + ; CHECK: %r0 = COPY [[VREGRES]] + + BX_RET 14, _, implicit %r0 + ; CHECK: BX_RET 14, _, implicit %r0 +... +--- +name: test_mul_s16 +# CHECK-LABEL: name: test_mul_s16 +legalized: true +regBankSelected: true +selected: false +# CHECK: selected: true +registers: + - { id: 0, class: gprb } + - { id: 1, class: gprb } + - { id: 2, class: gprb } +# CHECK-DAG: id: 0, class: gprnopc +# CHECK-DAG: id: 1, class: gprnopc +# CHECK-DAG: id: 2, class: gprnopc +body: | + bb.0: + liveins: %r0, %r1 + + %0(s16) = COPY %r0 + ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0 + + %1(s16) = COPY %r1 + ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1 + + %2(s16) = G_MUL %0, %1 + ; CHECK: [[VREGRES:%[0-9]+]] = MUL [[VREGX]], [[VREGY]], 14, _, _ + + %r0 = COPY %2(s16) + ; CHECK: %r0 = COPY [[VREGRES]] + + BX_RET 14, _, implicit %r0 + ; CHECK: BX_RET 14, _, implicit %r0 +... +--- +name: test_mul_s32 +# CHECK-LABEL: name: test_mul_s32 +legalized: true +regBankSelected: true +selected: false +# CHECK: selected: true +registers: + - { id: 0, class: gprb } + - { id: 1, class: gprb } + - { id: 2, class: gprb } +# CHECK: id: 0, class: gprnopc +# CHECK: id: 1, class: gprnopc +# CHECK: id: 2, class: gprnopc +body: | + bb.0: + liveins: %r0, %r1 + + %0(s32) = COPY %r0 + ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0 + + %1(s32) = COPY %r1 + ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1 + + %2(s32) = G_MUL %0, %1 + ; CHECK: [[VREGRES:%[0-9]+]] = MUL [[VREGX]], [[VREGY]], 14, _, _ + + %r0 = COPY %2(s32) + ; CHECK: %r0 = COPY [[VREGRES]] + + BX_RET 14, _, implicit %r0 + ; CHECK: BX_RET 14, _, implicit %r0 +... +--- +name: test_mulv5_s32 +# CHECK-LABEL: name: test_mulv5_s32 +legalized: true +regBankSelected: true +selected: false +# CHECK: selected: true +registers: + - { id: 0, class: gprb } + - { id: 1, class: gprb } + - { id: 2, class: gprb } +# CHECK: id: 0, class: gprnopc +# CHECK: id: 1, class: gprnopc +# CHECK: id: 2, class: gprnopc +body: | + bb.0: + liveins: %r0, %r1 + + %0(s32) = COPY %r0 + ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0 + + %1(s32) = COPY %r1 + ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1 + + %2(s32) = G_MUL %0, %1 + ; CHECK: early-clobber [[VREGRES:%[0-9]+]] = MULv5 [[VREGX]], [[VREGY]], 14, _, _ + + %r0 = COPY %2(s32) + ; CHECK: %r0 = COPY [[VREGRES]] + + BX_RET 14, _, implicit %r0 + ; CHECK: BX_RET 14, _, implicit %r0 +... +--- name: test_load_from_stack # CHECK-LABEL: name: test_load_from_stack legalized: true diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll index a7f5ec33bee3c..cf77ce352074d 100644 --- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll +++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll @@ -35,6 +35,19 @@ entry: ret i8 %sum } +define i8 @test_sub_i8(i8 %x, i8 %y) { +; CHECK-LABEL: name: test_sub_i8 +; CHECK: liveins: %r0, %r1 +; CHECK-DAG: [[VREGX:%[0-9]+]](s8) = COPY %r0 +; CHECK-DAG: [[VREGY:%[0-9]+]](s8) = COPY %r1 +; CHECK: [[RES:%[0-9]+]](s8) = G_SUB [[VREGX]], [[VREGY]] +; CHECK: %r0 = COPY [[RES]](s8) +; CHECK: BX_RET 14, _, implicit %r0 +entry: + %res = sub i8 %x, %y + ret i8 %res +} + define signext i8 @test_return_sext_i8(i8 %x) { ; CHECK-LABEL: name: test_return_sext_i8 ; CHECK: liveins: %r0 @@ -59,6 +72,19 @@ entry: ret i16 %sum } +define i16 @test_sub_i16(i16 %x, i16 %y) { +; CHECK-LABEL: name: test_sub_i16 +; CHECK: liveins: %r0, %r1 +; CHECK-DAG: [[VREGX:%[0-9]+]](s16) = COPY %r0 +; CHECK-DAG: [[VREGY:%[0-9]+]](s16) = COPY %r1 +; CHECK: [[RES:%[0-9]+]](s16) = G_SUB [[VREGX]], [[VREGY]] +; CHECK: %r0 = COPY [[RES]](s16) +; CHECK: BX_RET 14, _, implicit %r0 +entry: + %res = sub i16 %x, %y + ret i16 %res +} + define zeroext i16 @test_return_zext_i16(i16 %x) { ; CHECK-LABEL: name: test_return_zext_i16 ; CHECK: liveins: %r0 @@ -83,6 +109,19 @@ entry: ret i32 %sum } +define i32 @test_sub_i32(i32 %x, i32 %y) { +; CHECK-LABEL: name: test_sub_i32 +; CHECK: liveins: %r0, %r1 +; CHECK-DAG: [[VREGX:%[0-9]+]](s32) = COPY %r0 +; CHECK-DAG: [[VREGY:%[0-9]+]](s32) = COPY %r1 +; CHECK: [[RES:%[0-9]+]](s32) = G_SUB [[VREGX]], [[VREGY]] +; CHECK: %r0 = COPY [[RES]](s32) +; CHECK: BX_RET 14, _, implicit %r0 +entry: + %res = sub i32 %x, %y + ret i32 %res +} + define i32 @test_stack_args(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) { ; CHECK-LABEL: name: test_stack_args ; CHECK: fixedStack: diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel.ll b/test/CodeGen/ARM/GlobalISel/arm-isel.ll index 236dcbeb84c52..f3ca2915f306e 100644 --- a/test/CodeGen/ARM/GlobalISel/arm-isel.ll +++ b/test/CodeGen/ARM/GlobalISel/arm-isel.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple arm-unknown -mattr=+vfp2 -global-isel %s -o - | FileCheck %s +; RUN: llc -mtriple arm-unknown -mattr=+vfp2,+v6 -global-isel %s -o - | FileCheck %s define void @test_void_return() { ; CHECK-LABEL: test_void_return: @@ -67,6 +67,60 @@ entry: ret i32 %sum } +define i8 @test_sub_i8(i8 %x, i8 %y) { +; CHECK-LABEL: test_sub_i8: +; CHECK: sub r0, r0, r1 +; CHECK: bx lr +entry: + %sum = sub i8 %x, %y + ret i8 %sum +} + +define i16 @test_sub_i16(i16 %x, i16 %y) { +; CHECK-LABEL: test_sub_i16: +; CHECK: sub r0, r0, r1 +; CHECK: bx lr +entry: + %sum = sub i16 %x, %y + ret i16 %sum +} + +define i32 @test_sub_i32(i32 %x, i32 %y) { +; CHECK-LABEL: test_sub_i32: +; CHECK: sub r0, r0, r1 +; CHECK: bx lr +entry: + %sum = sub i32 %x, %y + ret i32 %sum +} + +define i8 @test_mul_i8(i8 %x, i8 %y) { +; CHECK-LABEL: test_mul_i8: +; CHECK: mul r0, r0, r1 +; CHECK: bx lr +entry: + %sum = mul i8 %x, %y + ret i8 %sum +} + +define i16 @test_mul_i16(i16 %x, i16 %y) { +; CHECK-LABEL: test_mul_i16: +; CHECK: mul r0, r0, r1 +; CHECK: bx lr +entry: + %sum = mul i16 %x, %y + ret i16 %sum +} + +define i32 @test_mul_i32(i32 %x, i32 %y) { +; CHECK-LABEL: test_mul_i32: +; CHECK: mul r0, r0, r1 +; CHECK: bx lr +entry: + %sum = mul i32 %x, %y + ret i32 %sum +} + define i32 @test_stack_args_i32(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) { ; CHECK-LABEL: test_stack_args_i32: ; CHECK: add [[P5ADDR:r[0-9]+]], sp, #4 diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir index cbff7e12fb77c..625d35acf17b9 100644 --- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir +++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir @@ -7,6 +7,14 @@ define void @test_add_s16() { ret void } define void @test_add_s32() { ret void } + define void @test_sub_s8() { ret void } + define void @test_sub_s16() { ret void } + define void @test_sub_s32() { ret void } + + define void @test_mul_s8() { ret void } + define void @test_mul_s16() { ret void } + define void @test_mul_s32() { ret void } + define void @test_load_from_stack() { ret void } define void @test_legal_loads() #0 { ret void } define void @test_legal_stores() #0 { ret void } @@ -139,6 +147,154 @@ body: | ... --- +name: test_sub_s8 +# CHECK-LABEL: name: test_sub_s8 +legalized: false +# CHECK: legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s8) = COPY %r0 + %1(s8) = COPY %r1 + %2(s8) = G_SUB %0, %1 + ; G_SUB with s8 is legal, so we should find it unchanged in the output + ; CHECK: {{%[0-9]+}}(s8) = G_SUB {{%[0-9]+, %[0-9]+}} + %r0 = COPY %2(s8) + BX_RET 14, _, implicit %r0 +... +--- +name: test_sub_s16 +# CHECK-LABEL: name: test_sub_s16 +legalized: false +# CHECK: legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s16) = COPY %r0 + %1(s16) = COPY %r1 + %2(s16) = G_SUB %0, %1 + ; G_SUB with s16 is legal, so we should find it unchanged in the output + ; CHECK: {{%[0-9]+}}(s16) = G_SUB {{%[0-9]+, %[0-9]+}} + %r0 = COPY %2(s16) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_sub_s32 +# CHECK-LABEL: name: test_sub_s32 +legalized: false +# CHECK: legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s32) = COPY %r0 + %1(s32) = COPY %r1 + %2(s32) = G_SUB %0, %1 + ; G_SUB with s32 is legal, so we should find it unchanged in the output + ; CHECK: {{%[0-9]+}}(s32) = G_SUB {{%[0-9]+, %[0-9]+}} + %r0 = COPY %2(s32) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_mul_s8 +# CHECK-LABEL: name: test_mul_s8 +legalized: false +# CHECK: legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s8) = COPY %r0 + %1(s8) = COPY %r1 + %2(s8) = G_MUL %0, %1 + ; G_MUL with s8 is legal, so we should find it unchanged in the output + ; CHECK: {{%[0-9]+}}(s8) = G_MUL {{%[0-9]+, %[0-9]+}} + %r0 = COPY %2(s8) + BX_RET 14, _, implicit %r0 +... +--- +name: test_mul_s16 +# CHECK-LABEL: name: test_mul_s16 +legalized: false +# CHECK: legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s16) = COPY %r0 + %1(s16) = COPY %r1 + %2(s16) = G_MUL %0, %1 + ; G_MUL with s16 is legal, so we should find it unchanged in the output + ; CHECK: {{%[0-9]+}}(s16) = G_MUL {{%[0-9]+, %[0-9]+}} + %r0 = COPY %2(s16) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_mul_s32 +# CHECK-LABEL: name: test_mul_s32 +legalized: false +# CHECK: legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s32) = COPY %r0 + %1(s32) = COPY %r1 + %2(s32) = G_MUL %0, %1 + ; G_MUL with s32 is legal, so we should find it unchanged in the output + ; CHECK: {{%[0-9]+}}(s32) = G_MUL {{%[0-9]+, %[0-9]+}} + %r0 = COPY %2(s32) + BX_RET 14, _, implicit %r0 + +... +--- name: test_load_from_stack # CHECK-LABEL: name: test_load_from_stack legalized: false diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir index fbf8d81322f8f..e7935832f98a8 100644 --- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir +++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir @@ -5,6 +5,14 @@ define void @test_add_s8() { ret void } define void @test_add_s1() { ret void } + define void @test_sub_s32() { ret void } + define void @test_sub_s16() { ret void } + define void @test_sub_s8() { ret void } + + define void @test_mul_s32() { ret void } + define void @test_mul_s16() { ret void } + define void @test_mul_s8() { ret void } + define void @test_loads() #0 { ret void } define void @test_stores() #0 { ret void } @@ -126,6 +134,162 @@ body: | ... --- +name: test_sub_s32 +# CHECK-LABEL: name: test_sub_s32 +legalized: true +regBankSelected: false +selected: false +# CHECK: registers: +# CHECK: - { id: 0, class: gprb } +# CHECK: - { id: 1, class: gprb } +# CHECK: - { id: 2, class: gprb } + +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s32) = COPY %r0 + %1(s32) = COPY %r1 + %2(s32) = G_SUB %0, %1 + %r0 = COPY %2(s32) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_sub_s16 +# CHECK-LABEL: name: test_sub_s16 +legalized: true +regBankSelected: false +selected: false +# CHECK: registers: +# CHECK: - { id: 0, class: gprb } +# CHECK: - { id: 1, class: gprb } +# CHECK: - { id: 2, class: gprb } + +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s16) = COPY %r0 + %1(s16) = COPY %r1 + %2(s16) = G_SUB %0, %1 + %r0 = COPY %2(s16) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_sub_s8 +# CHECK-LABEL: name: test_sub_s8 +legalized: true +regBankSelected: false +selected: false +# CHECK: registers: +# CHECK: - { id: 0, class: gprb } +# CHECK: - { id: 1, class: gprb } +# CHECK: - { id: 2, class: gprb } + +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s8) = COPY %r0 + %1(s8) = COPY %r1 + %2(s8) = G_SUB %0, %1 + %r0 = COPY %2(s8) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_mul_s32 +# CHECK-LABEL: name: test_mul_s32 +legalized: true +regBankSelected: false +selected: false +# CHECK: registers: +# CHECK: - { id: 0, class: gprb } +# CHECK: - { id: 1, class: gprb } +# CHECK: - { id: 2, class: gprb } + +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s32) = COPY %r0 + %1(s32) = COPY %r1 + %2(s32) = G_MUL %0, %1 + %r0 = COPY %2(s32) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_mul_s16 +# CHECK-LABEL: name: test_mul_s16 +legalized: true +regBankSelected: false +selected: false +# CHECK: registers: +# CHECK: - { id: 0, class: gprb } +# CHECK: - { id: 1, class: gprb } +# CHECK: - { id: 2, class: gprb } + +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s16) = COPY %r0 + %1(s16) = COPY %r1 + %2(s16) = G_MUL %0, %1 + %r0 = COPY %2(s16) + BX_RET 14, _, implicit %r0 + +... +--- +name: test_mul_s8 +# CHECK-LABEL: name: test_mul_s8 +legalized: true +regBankSelected: false +selected: false +# CHECK: registers: +# CHECK: - { id: 0, class: gprb } +# CHECK: - { id: 1, class: gprb } +# CHECK: - { id: 2, class: gprb } + +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.0: + liveins: %r0, %r1 + + %0(s8) = COPY %r0 + %1(s8) = COPY %r1 + %2(s8) = G_MUL %0, %1 + %r0 = COPY %2(s8) + BX_RET 14, _, implicit %r0 + +... +--- name: test_loads # CHECK-LABEL: name: test_loads legalized: true diff --git a/test/CodeGen/ARM/alloc-no-stack-realign.ll b/test/CodeGen/ARM/alloc-no-stack-realign.ll index 0e077b3aee5a1..64c279b0f2187 100644 --- a/test/CodeGen/ARM/alloc-no-stack-realign.ll +++ b/test/CodeGen/ARM/alloc-no-stack-realign.ll @@ -7,31 +7,32 @@ define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" { entry: -; CHECK-LABEL: test1 -; CHECK: ldr r[[R1:[0-9]+]], [pc, r1] -; CHECK: add r[[R2:[0-9]+]], r1, #48 -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: mov r[[R2:[0-9]+]], r[[R1]] -; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]! -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32 -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: mov r[[R1:[0-9]+]], sp -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: add r[[R2:[0-9]+]], r[[R1]], #32 -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]! -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]! -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: add r[[R1:[0-9]+]], r0, #48 -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: add r[[R1:[0-9]+]], r0, #32 -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r0:128]! -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r0:128] +; CHECK-LABEL: test1: +; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]] +; CHECK: mov r[[R2:[0-9]+]], r[[R1]] +; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]! +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48 +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32 +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128] +; CHECK: mov r[[R1:[0-9]+]], #32 +; CHECK: mov r[[R2:[0-9]+]], sp +; CHECK: mov r[[R3:[0-9]+]], r[[R2]] +; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128], r[[R1]] +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128] +; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]! +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128] +; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]! +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128] +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: add r[[R1:[0-9]+]], r0, #48 +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128] +; CHECK: add r[[R1:[0-9]+]], r0, #32 +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128] +; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]! +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128] %retval = alloca <16 x float>, align 16 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16 store <16 x float> %0, <16 x float>* %retval @@ -42,30 +43,32 @@ entry: define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp { entry: -; CHECK: ldr r[[R1:[0-9]+]], [pc, r1] -; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48 -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: mov r[[R2:[0-9]+]], r[[R1]] -; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]! -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32 -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: mov r[[R1:[0-9]+]], sp -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: orr r[[R2:[0-9]+]], r[[R1]], #32 -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]! -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]! -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; CHECK: add r[[R1:[0-9]+]], r0, #48 -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: add r[[R1:[0-9]+]], r0, #32 -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] -; CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r0:128]! -; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r0:128] +; CHECK-LABEL: test2: +; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]] +; CHECK: mov r[[R2:[0-9]+]], r[[R1]] +; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]! +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48 +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32 +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128] +; CHECK: mov r[[R1:[0-9]+]], #32 +; CHECK: mov r[[R2:[0-9]+]], sp +; CHECK: mov r[[R3:[0-9]+]], r[[R2]] +; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128], r[[R1]] +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128] +; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]! +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128] +; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]! +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128] +; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128] +; CHECK: add r[[R1:[0-9]+]], r0, #48 +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128] +; CHECK: add r[[R1:[0-9]+]], r0, #32 +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128] +; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]! +; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128] %retval = alloca <16 x float>, align 16 diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll index fc85a3a2e6834..699ef6e92a4ff 100644 --- a/test/CodeGen/ARM/build-attributes.ll +++ b/test/CodeGen/ARM/build-attributes.ll @@ -231,6 +231,11 @@ ; V6: .eabi_attribute 6, 6 ; V6: .eabi_attribute 8, 1 ;; We assume round-to-nearest by default (matches GCC) +; V6-NOT: .eabi_attribute 27 +; V6-NOT: .eabi_attribute 36 +; V6-NOT: .eabi_attribute 42 +; V6-NOT: .eabi_attribute 44 +; V6-NOT: .eabi_attribute 68 ; V6-NOT: .eabi_attribute 19 ;; The default choice made by llc is for a V6 CPU without an FPU. ;; This is not an interesting detail, but for such CPUs, the default intention is to use @@ -242,13 +247,8 @@ ; V6: .eabi_attribute 23, 3 ; V6: .eabi_attribute 24, 1 ; V6: .eabi_attribute 25, 1 -; V6-NOT: .eabi_attribute 27 ; V6-NOT: .eabi_attribute 28 -; V6-NOT: .eabi_attribute 36 ; V6: .eabi_attribute 38, 1 -; V6-NOT: .eabi_attribute 42 -; V6-NOT: .eabi_attribute 44 -; V6-NOT: .eabi_attribute 68 ; V6-FAST-NOT: .eabi_attribute 19 ;; Despite the V6 CPU having no FPU by default, we chose to flush to @@ -262,9 +262,14 @@ ;; We emit 6, 12 for both v6-M and v6S-M, technically this is incorrect for ;; V6-M, however we don't model the OS extension so this is fine. ; V6M: .eabi_attribute 6, 12 -; V6M-NOT: .eabi_attribute 7 +; V6M: .eabi_attribute 7, 77 ; V6M: .eabi_attribute 8, 0 ; V6M: .eabi_attribute 9, 1 +; V6M-NOT: .eabi_attribute 27 +; V6M-NOT: .eabi_attribute 36 +; V6M-NOT: .eabi_attribute 42 +; V6M-NOT: .eabi_attribute 44 +; V6M-NOT: .eabi_attribute 68 ; V6M-NOT: .eabi_attribute 19 ;; The default choice made by llc is for a V6M CPU without an FPU. ;; This is not an interesting detail, but for such CPUs, the default intention is to use @@ -276,13 +281,8 @@ ; V6M: .eabi_attribute 23, 3 ; V6M: .eabi_attribute 24, 1 ; V6M: .eabi_attribute 25, 1 -; V6M-NOT: .eabi_attribute 27 ; V6M-NOT: .eabi_attribute 28 -; V6M-NOT: .eabi_attribute 36 ; V6M: .eabi_attribute 38, 1 -; V6M-NOT: .eabi_attribute 42 -; V6M-NOT: .eabi_attribute 44 -; V6M-NOT: .eabi_attribute 68 ; V6M-FAST-NOT: .eabi_attribute 19 ;; Despite the V6M CPU having no FPU by default, we chose to flush to @@ -298,6 +298,11 @@ ; ARM1156T2F-S: .eabi_attribute 8, 1 ; ARM1156T2F-S: .eabi_attribute 9, 2 ; ARM1156T2F-S: .fpu vfpv2 +; ARM1156T2F-S-NOT: .eabi_attribute 27 +; ARM1156T2F-S-NOT: .eabi_attribute 36 +; ARM1156T2F-S-NOT: .eabi_attribute 42 +; ARM1156T2F-S-NOT: .eabi_attribute 44 +; ARM1156T2F-S-NOT: .eabi_attribute 68 ; ARM1156T2F-S-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; ARM1156T2F-S: .eabi_attribute 20, 1 @@ -306,13 +311,8 @@ ; ARM1156T2F-S: .eabi_attribute 23, 3 ; ARM1156T2F-S: .eabi_attribute 24, 1 ; ARM1156T2F-S: .eabi_attribute 25, 1 -; ARM1156T2F-S-NOT: .eabi_attribute 27 ; ARM1156T2F-S-NOT: .eabi_attribute 28 -; ARM1156T2F-S-NOT: .eabi_attribute 36 ; ARM1156T2F-S: .eabi_attribute 38, 1 -; ARM1156T2F-S-NOT: .eabi_attribute 42 -; ARM1156T2F-S-NOT: .eabi_attribute 44 -; ARM1156T2F-S-NOT: .eabi_attribute 68 ; ARM1156T2F-S-FAST-NOT: .eabi_attribute 19 ;; V6 cores default to flush to positive zero (value 0). Note that value 2 is also equally @@ -327,6 +327,11 @@ ; V7M: .eabi_attribute 7, 77 ; V7M: .eabi_attribute 8, 0 ; V7M: .eabi_attribute 9, 2 +; V7M-NOT: .eabi_attribute 27 +; V7M-NOT: .eabi_attribute 36 +; V7M-NOT: .eabi_attribute 42 +; V7M-NOT: .eabi_attribute 44 +; V7M-NOT: .eabi_attribute 68 ; V7M-NOT: .eabi_attribute 19 ;; The default choice made by llc is for a V7M CPU without an FPU. ;; This is not an interesting detail, but for such CPUs, the default intention is to use @@ -338,13 +343,8 @@ ; V7M: .eabi_attribute 23, 3 ; V7M: .eabi_attribute 24, 1 ; V7M: .eabi_attribute 25, 1 -; V7M-NOT: .eabi_attribute 27 ; V7M-NOT: .eabi_attribute 28 -; V7M-NOT: .eabi_attribute 36 ; V7M: .eabi_attribute 38, 1 -; V7M-NOT: .eabi_attribute 42 -; V7M-NOT: .eabi_attribute 44 -; V7M-NOT: .eabi_attribute 68 ; V7M-FAST-NOT: .eabi_attribute 19 ;; Despite the V7M CPU having no FPU by default, we chose to flush @@ -357,6 +357,11 @@ ; V7: .syntax unified ; V7: .eabi_attribute 6, 10 +; V7-NOT: .eabi_attribute 27 +; V7-NOT: .eabi_attribute 36 +; V7-NOT: .eabi_attribute 42 +; V7-NOT: .eabi_attribute 44 +; V7-NOT: .eabi_attribute 68 ; V7-NOT: .eabi_attribute 19 ;; In safe-maths mode we default to an IEEE 754 compliant choice. ; V7: .eabi_attribute 20, 1 @@ -365,13 +370,8 @@ ; V7: .eabi_attribute 23, 3 ; V7: .eabi_attribute 24, 1 ; V7: .eabi_attribute 25, 1 -; V7-NOT: .eabi_attribute 27 ; V7-NOT: .eabi_attribute 28 -; V7-NOT: .eabi_attribute 36 ; V7: .eabi_attribute 38, 1 -; V7-NOT: .eabi_attribute 42 -; V7-NOT: .eabi_attribute 44 -; V7-NOT: .eabi_attribute 68 ; V7-FAST-NOT: .eabi_attribute 19 ;; The default CPU does have an FPU and it must be VFPv3 or better, so it flushes @@ -386,6 +386,9 @@ ; V7VE: .eabi_attribute 7, 65 @ Tag_CPU_arch_profile ; V7VE: .eabi_attribute 8, 1 @ Tag_ARM_ISA_use ; V7VE: .eabi_attribute 9, 2 @ Tag_THUMB_ISA_use +; V7VE: .eabi_attribute 42, 1 @ Tag_MPextension_use +; V7VE: .eabi_attribute 44, 2 @ Tag_DIV_use +; V7VE: .eabi_attribute 68, 3 @ Tag_Virtualization_use ; V7VE: .eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use ; V7VE: .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal ; V7VE: .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions @@ -393,19 +396,16 @@ ; V7VE: .eabi_attribute 24, 1 @ Tag_ABI_align_needed ; V7VE: .eabi_attribute 25, 1 @ Tag_ABI_align_preserved ; V7VE: .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format -; V7VE: .eabi_attribute 42, 1 @ Tag_MPextension_use -; V7VE: .eabi_attribute 44, 2 @ Tag_DIV_use -; V7VE: .eabi_attribute 68, 3 @ Tag_Virtualization_use ; V8: .syntax unified ; V8: .eabi_attribute 67, "2.09" ; V8: .eabi_attribute 6, 14 +; V8-NOT: .eabi_attribute 44 ; V8-NOT: .eabi_attribute 19 ; V8: .eabi_attribute 20, 1 ; V8: .eabi_attribute 21, 1 ; V8-NOT: .eabi_attribute 22 ; V8: .eabi_attribute 23, 3 -; V8-NOT: .eabi_attribute 44 ; V8-FAST-NOT: .eabi_attribute 19 ;; The default does have an FPU, and for V8-A, it flushes preserving sign. @@ -496,6 +496,30 @@ ; CORTEX-A7-FPUV4: .fpu vfpv4 ; CORTEX-A7-CHECK-NOT: .eabi_attribute 19 + +; Tag_FP_HP_extension +; CORTEX-A7-CHECK: .eabi_attribute 36, 1 +; CORTEX-A7-NOFPU-NOT: .eabi_attribute 36 +; CORTEX-A7-FPUV4: .eabi_attribute 36, 1 + +; Tag_MPextension_use +; CORTEX-A7-CHECK: .eabi_attribute 42, 1 +; CORTEX-A7-NOFPU: .eabi_attribute 42, 1 +; CORTEX-A7-FPUV4: .eabi_attribute 42, 1 + +; Tag_DIV_use +; CORTEX-A7-CHECK: .eabi_attribute 44, 2 +; CORTEX-A7-NOFPU: .eabi_attribute 44, 2 +; CORTEX-A7-FPUV4: .eabi_attribute 44, 2 + +; Tag_DSP_extension +; CORTEX-A7-CHECK-NOT: .eabi_attribute 46 + +; Tag_Virtualization_use +; CORTEX-A7-CHECK: .eabi_attribute 68, 3 +; CORTEX-A7-NOFPU: .eabi_attribute 68, 3 +; CORTEX-A7-FPUV4: .eabi_attribute 68, 3 + ; Tag_ABI_FP_denormal ;; We default to IEEE 754 compliance ; CORTEX-A7-CHECK: .eabi_attribute 20, 1 @@ -535,40 +559,20 @@ ; CORTEX-A7-NOFPU: .eabi_attribute 25, 1 ; CORTEX-A7-FPUV4: .eabi_attribute 25, 1 -; Tag_FP_HP_extension -; CORTEX-A7-CHECK: .eabi_attribute 36, 1 -; CORTEX-A7-NOFPU-NOT: .eabi_attribute 36 -; CORTEX-A7-FPUV4: .eabi_attribute 36, 1 - ; Tag_FP_16bit_format ; CORTEX-A7-CHECK: .eabi_attribute 38, 1 ; CORTEX-A7-NOFPU: .eabi_attribute 38, 1 ; CORTEX-A7-FPUV4: .eabi_attribute 38, 1 -; Tag_MPextension_use -; CORTEX-A7-CHECK: .eabi_attribute 42, 1 -; CORTEX-A7-NOFPU: .eabi_attribute 42, 1 -; CORTEX-A7-FPUV4: .eabi_attribute 42, 1 - -; Tag_DIV_use -; CORTEX-A7-CHECK: .eabi_attribute 44, 2 -; CORTEX-A7-NOFPU: .eabi_attribute 44, 2 -; CORTEX-A7-FPUV4: .eabi_attribute 44, 2 - -; Tag_DSP_extension -; CORTEX-A7-CHECK-NOT: .eabi_attribute 46 - -; Tag_Virtualization_use -; CORTEX-A7-CHECK: .eabi_attribute 68, 3 -; CORTEX-A7-NOFPU: .eabi_attribute 68, 3 -; CORTEX-A7-FPUV4: .eabi_attribute 68, 3 - ; CORTEX-A5-DEFAULT: .cpu cortex-a5 ; CORTEX-A5-DEFAULT: .eabi_attribute 6, 10 ; CORTEX-A5-DEFAULT: .eabi_attribute 7, 65 ; CORTEX-A5-DEFAULT: .eabi_attribute 8, 1 ; CORTEX-A5-DEFAULT: .eabi_attribute 9, 2 ; CORTEX-A5-DEFAULT: .fpu neon-vfpv4 +; CORTEX-A5-DEFAULT: .eabi_attribute 42, 1 +; CORTEX-A5-DEFAULT-NOT: .eabi_attribute 44 +; CORTEX-A5-DEFAULT: .eabi_attribute 68, 1 ; CORTEX-A5-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A5-DEFAULT: .eabi_attribute 20, 1 @@ -577,9 +581,6 @@ ; CORTEX-A5-DEFAULT: .eabi_attribute 23, 3 ; CORTEX-A5-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A5-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A5-DEFAULT: .eabi_attribute 42, 1 -; CORTEX-A5-DEFAULT-NOT: .eabi_attribute 44 -; CORTEX-A5-DEFAULT: .eabi_attribute 68, 1 ; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 19 ;; The A5 defaults to a VFPv4 FPU, so it flushed preserving the sign when -ffast-math @@ -595,6 +596,8 @@ ; CORTEX-A5-NONEON: .eabi_attribute 8, 1 ; CORTEX-A5-NONEON: .eabi_attribute 9, 2 ; CORTEX-A5-NONEON: .fpu vfpv4-d16 +; CORTEX-A5-NONEON: .eabi_attribute 42, 1 +; CORTEX-A5-NONEON: .eabi_attribute 68, 1 ;; We default to IEEE 754 compliance ; CORTEX-A5-NONEON: .eabi_attribute 20, 1 ; CORTEX-A5-NONEON: .eabi_attribute 21, 1 @@ -602,8 +605,6 @@ ; CORTEX-A5-NONEON: .eabi_attribute 23, 3 ; CORTEX-A5-NONEON: .eabi_attribute 24, 1 ; CORTEX-A5-NONEON: .eabi_attribute 25, 1 -; CORTEX-A5-NONEON: .eabi_attribute 42, 1 -; CORTEX-A5-NONEON: .eabi_attribute 68, 1 ; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 19 ;; The A5 defaults to a VFPv4 FPU, so it flushed preserving sign when -ffast-math @@ -619,6 +620,8 @@ ; CORTEX-A5-NOFPU: .eabi_attribute 8, 1 ; CORTEX-A5-NOFPU: .eabi_attribute 9, 2 ; CORTEX-A5-NOFPU-NOT: .fpu +; CORTEX-A5-NOFPU: .eabi_attribute 42, 1 +; CORTEX-A5-NOFPU: .eabi_attribute 68, 1 ; CORTEX-A5-NOFPU-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A5-NOFPU: .eabi_attribute 20, 1 @@ -627,8 +630,6 @@ ; CORTEX-A5-NOFPU: .eabi_attribute 23, 3 ; CORTEX-A5-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A5-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A5-NOFPU: .eabi_attribute 42, 1 -; CORTEX-A5-NOFPU: .eabi_attribute 68, 1 ; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving @@ -645,6 +646,11 @@ ; CORTEX-A8-SOFT: .eabi_attribute 8, 1 ; CORTEX-A8-SOFT: .eabi_attribute 9, 2 ; CORTEX-A8-SOFT: .fpu neon +; CORTEX-A8-SOFT-NOT: .eabi_attribute 27 +; CORTEX-A8-SOFT-NOT: .eabi_attribute 36, 1 +; CORTEX-A8-SOFT-NOT: .eabi_attribute 42, 1 +; CORTEX-A8-SOFT-NOT: .eabi_attribute 44 +; CORTEX-A8-SOFT: .eabi_attribute 68, 1 ; CORTEX-A8-SOFT-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A8-SOFT: .eabi_attribute 20, 1 @@ -653,13 +659,8 @@ ; CORTEX-A8-SOFT: .eabi_attribute 23, 3 ; CORTEX-A8-SOFT: .eabi_attribute 24, 1 ; CORTEX-A8-SOFT: .eabi_attribute 25, 1 -; CORTEX-A8-SOFT-NOT: .eabi_attribute 27 ; CORTEX-A8-SOFT-NOT: .eabi_attribute 28 -; CORTEX-A8-SOFT-NOT: .eabi_attribute 36, 1 ; CORTEX-A8-SOFT: .eabi_attribute 38, 1 -; CORTEX-A8-SOFT-NOT: .eabi_attribute 42, 1 -; CORTEX-A8-SOFT-NOT: .eabi_attribute 44 -; CORTEX-A8-SOFT: .eabi_attribute 68, 1 ; CORTEX-A9-SOFT: .cpu cortex-a9 ; CORTEX-A9-SOFT: .eabi_attribute 6, 10 @@ -667,6 +668,11 @@ ; CORTEX-A9-SOFT: .eabi_attribute 8, 1 ; CORTEX-A9-SOFT: .eabi_attribute 9, 2 ; CORTEX-A9-SOFT: .fpu neon +; CORTEX-A9-SOFT-NOT: .eabi_attribute 27 +; CORTEX-A9-SOFT: .eabi_attribute 36, 1 +; CORTEX-A9-SOFT: .eabi_attribute 42, 1 +; CORTEX-A9-SOFT-NOT: .eabi_attribute 44 +; CORTEX-A9-SOFT: .eabi_attribute 68, 1 ; CORTEX-A9-SOFT-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A9-SOFT: .eabi_attribute 20, 1 @@ -675,13 +681,8 @@ ; CORTEX-A9-SOFT: .eabi_attribute 23, 3 ; CORTEX-A9-SOFT: .eabi_attribute 24, 1 ; CORTEX-A9-SOFT: .eabi_attribute 25, 1 -; CORTEX-A9-SOFT-NOT: .eabi_attribute 27 ; CORTEX-A9-SOFT-NOT: .eabi_attribute 28 -; CORTEX-A9-SOFT: .eabi_attribute 36, 1 ; CORTEX-A9-SOFT: .eabi_attribute 38, 1 -; CORTEX-A9-SOFT: .eabi_attribute 42, 1 -; CORTEX-A9-SOFT-NOT: .eabi_attribute 44 -; CORTEX-A9-SOFT: .eabi_attribute 68, 1 ; CORTEX-A8-SOFT-FAST-NOT: .eabi_attribute 19 ; CORTEX-A9-SOFT-FAST-NOT: .eabi_attribute 19 @@ -699,6 +700,10 @@ ; CORTEX-A8-HARD: .eabi_attribute 8, 1 ; CORTEX-A8-HARD: .eabi_attribute 9, 2 ; CORTEX-A8-HARD: .fpu neon +; CORTEX-A8-HARD-NOT: .eabi_attribute 27 +; CORTEX-A8-HARD-NOT: .eabi_attribute 36, 1 +; CORTEX-A8-HARD-NOT: .eabi_attribute 42, 1 +; CORTEX-A8-HARD: .eabi_attribute 68, 1 ; CORTEX-A8-HARD-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A8-HARD: .eabi_attribute 20, 1 @@ -707,12 +712,8 @@ ; CORTEX-A8-HARD: .eabi_attribute 23, 3 ; CORTEX-A8-HARD: .eabi_attribute 24, 1 ; CORTEX-A8-HARD: .eabi_attribute 25, 1 -; CORTEX-A8-HARD-NOT: .eabi_attribute 27 ; CORTEX-A8-HARD: .eabi_attribute 28, 1 -; CORTEX-A8-HARD-NOT: .eabi_attribute 36, 1 ; CORTEX-A8-HARD: .eabi_attribute 38, 1 -; CORTEX-A8-HARD-NOT: .eabi_attribute 42, 1 -; CORTEX-A8-HARD: .eabi_attribute 68, 1 @@ -722,6 +723,10 @@ ; CORTEX-A9-HARD: .eabi_attribute 8, 1 ; CORTEX-A9-HARD: .eabi_attribute 9, 2 ; CORTEX-A9-HARD: .fpu neon +; CORTEX-A9-HARD-NOT: .eabi_attribute 27 +; CORTEX-A9-HARD: .eabi_attribute 36, 1 +; CORTEX-A9-HARD: .eabi_attribute 42, 1 +; CORTEX-A9-HARD: .eabi_attribute 68, 1 ; CORTEX-A9-HARD-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A9-HARD: .eabi_attribute 20, 1 @@ -730,12 +735,8 @@ ; CORTEX-A9-HARD: .eabi_attribute 23, 3 ; CORTEX-A9-HARD: .eabi_attribute 24, 1 ; CORTEX-A9-HARD: .eabi_attribute 25, 1 -; CORTEX-A9-HARD-NOT: .eabi_attribute 27 ; CORTEX-A9-HARD: .eabi_attribute 28, 1 -; CORTEX-A9-HARD: .eabi_attribute 36, 1 ; CORTEX-A9-HARD: .eabi_attribute 38, 1 -; CORTEX-A9-HARD: .eabi_attribute 42, 1 -; CORTEX-A9-HARD: .eabi_attribute 68, 1 ; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 19 ;; The A8 defaults to a VFPv3 FPU, so it flushes preserving the sign when @@ -759,6 +760,9 @@ ; CORTEX-A12-DEFAULT: .eabi_attribute 8, 1 ; CORTEX-A12-DEFAULT: .eabi_attribute 9, 2 ; CORTEX-A12-DEFAULT: .fpu neon-vfpv4 +; CORTEX-A12-DEFAULT: .eabi_attribute 42, 1 +; CORTEX-A12-DEFAULT: .eabi_attribute 44, 2 +; CORTEX-A12-DEFAULT: .eabi_attribute 68, 3 ; CORTEX-A12-DEFAULT-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A12-DEFAULT: .eabi_attribute 20, 1 @@ -767,9 +771,6 @@ ; CORTEX-A12-DEFAULT: .eabi_attribute 23, 3 ; CORTEX-A12-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A12-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A12-DEFAULT: .eabi_attribute 42, 1 -; CORTEX-A12-DEFAULT: .eabi_attribute 44, 2 -; CORTEX-A12-DEFAULT: .eabi_attribute 68, 3 ; CORTEX-A12-DEFAULT-FAST-NOT: .eabi_attribute 19 ;; The A12 defaults to a VFPv3 FPU, so it flushes preserving the sign when @@ -785,6 +786,9 @@ ; CORTEX-A12-NOFPU: .eabi_attribute 8, 1 ; CORTEX-A12-NOFPU: .eabi_attribute 9, 2 ; CORTEX-A12-NOFPU-NOT: .fpu +; CORTEX-A12-NOFPU: .eabi_attribute 42, 1 +; CORTEX-A12-NOFPU: .eabi_attribute 44, 2 +; CORTEX-A12-NOFPU: .eabi_attribute 68, 3 ; CORTEX-A12-NOFPU-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A12-NOFPU: .eabi_attribute 20, 1 @@ -793,9 +797,6 @@ ; CORTEX-A12-NOFPU: .eabi_attribute 23, 3 ; CORTEX-A12-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A12-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A12-NOFPU: .eabi_attribute 42, 1 -; CORTEX-A12-NOFPU: .eabi_attribute 44, 2 -; CORTEX-A12-NOFPU: .eabi_attribute 68, 3 ; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving @@ -812,6 +813,11 @@ ; CORTEX-A15: .eabi_attribute 8, 1 ; CORTEX-A15: .eabi_attribute 9, 2 ; CORTEX-A15: .fpu neon-vfpv4 +; CORTEX-A15-NOT: .eabi_attribute 27 +; CORTEX-A15: .eabi_attribute 36, 1 +; CORTEX-A15: .eabi_attribute 42, 1 +; CORTEX-A15: .eabi_attribute 44, 2 +; CORTEX-A15: .eabi_attribute 68, 3 ; CORTEX-A15-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A15: .eabi_attribute 20, 1 @@ -820,13 +826,8 @@ ; CORTEX-A15: .eabi_attribute 23, 3 ; CORTEX-A15: .eabi_attribute 24, 1 ; CORTEX-A15: .eabi_attribute 25, 1 -; CORTEX-A15-NOT: .eabi_attribute 27 ; CORTEX-A15-NOT: .eabi_attribute 28 -; CORTEX-A15: .eabi_attribute 36, 1 ; CORTEX-A15: .eabi_attribute 38, 1 -; CORTEX-A15: .eabi_attribute 42, 1 -; CORTEX-A15: .eabi_attribute 44, 2 -; CORTEX-A15: .eabi_attribute 68, 3 ; CORTEX-A15-FAST-NOT: .eabi_attribute 19 ;; The A15 defaults to a VFPv3 FPU, so it flushes preserving the sign when @@ -842,6 +843,9 @@ ; CORTEX-A17-DEFAULT: .eabi_attribute 8, 1 ; CORTEX-A17-DEFAULT: .eabi_attribute 9, 2 ; CORTEX-A17-DEFAULT: .fpu neon-vfpv4 +; CORTEX-A17-DEFAULT: .eabi_attribute 42, 1 +; CORTEX-A17-DEFAULT: .eabi_attribute 44, 2 +; CORTEX-A17-DEFAULT: .eabi_attribute 68, 3 ; CORTEX-A17-DEFAULT-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A17-DEFAULT: .eabi_attribute 20, 1 @@ -850,9 +854,6 @@ ; CORTEX-A17-DEFAULT: .eabi_attribute 23, 3 ; CORTEX-A17-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A17-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A17-DEFAULT: .eabi_attribute 42, 1 -; CORTEX-A17-DEFAULT: .eabi_attribute 44, 2 -; CORTEX-A17-DEFAULT: .eabi_attribute 68, 3 ; CORTEX-A17-FAST-NOT: .eabi_attribute 19 ;; The A17 defaults to a VFPv3 FPU, so it flushes preserving the sign when @@ -868,6 +869,9 @@ ; CORTEX-A17-NOFPU: .eabi_attribute 8, 1 ; CORTEX-A17-NOFPU: .eabi_attribute 9, 2 ; CORTEX-A17-NOFPU-NOT: .fpu +; CORTEX-A17-NOFPU: .eabi_attribute 42, 1 +; CORTEX-A17-NOFPU: .eabi_attribute 44, 2 +; CORTEX-A17-NOFPU: .eabi_attribute 68, 3 ; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A17-NOFPU: .eabi_attribute 20, 1 @@ -876,9 +880,6 @@ ; CORTEX-A17-NOFPU: .eabi_attribute 23, 3 ; CORTEX-A17-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A17-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A17-NOFPU: .eabi_attribute 42, 1 -; CORTEX-A17-NOFPU: .eabi_attribute 44, 2 -; CORTEX-A17-NOFPU: .eabi_attribute 68, 3 ; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving @@ -897,25 +898,25 @@ ; CORTEX-M0: .cpu cortex-m0 ; CORTEX-M0: .eabi_attribute 6, 12 -; CORTEX-M0-NOT: .eabi_attribute 7 +; CORTEX-M0: .eabi_attribute 7, 77 ; CORTEX-M0: .eabi_attribute 8, 0 ; CORTEX-M0: .eabi_attribute 9, 1 +; CORTEX-M0-NOT: .eabi_attribute 27 +; CORTEX-M0-NOT: .eabi_attribute 36 +; CORTEX-M0: .eabi_attribute 34, 0 +; CORTEX-M0-NOT: .eabi_attribute 42 +; CORTEX-M0-NOT: .eabi_attribute 44 +; CORTEX-M0-NOT: .eabi_attribute 68 ; CORTEX-M0-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-M0: .eabi_attribute 20, 1 ; CORTEX-M0: .eabi_attribute 21, 1 ; CORTEX-M0-NOT: .eabi_attribute 22 ; CORTEX-M0: .eabi_attribute 23, 3 -; CORTEX-M0: .eabi_attribute 34, 0 ; CORTEX-M0: .eabi_attribute 24, 1 ; CORTEX-M0: .eabi_attribute 25, 1 -; CORTEX-M0-NOT: .eabi_attribute 27 ; CORTEX-M0-NOT: .eabi_attribute 28 -; CORTEX-M0-NOT: .eabi_attribute 36 ; CORTEX-M0: .eabi_attribute 38, 1 -; CORTEX-M0-NOT: .eabi_attribute 42 -; CORTEX-M0-NOT: .eabi_attribute 44 -; CORTEX-M0-NOT: .eabi_attribute 68 ; CORTEX-M0-FAST-NOT: .eabi_attribute 19 ;; Despite the M0 CPU having no FPU in this scenario, we chose to @@ -930,9 +931,14 @@ ; CORTEX-M0PLUS: .cpu cortex-m0plus ; CORTEX-M0PLUS: .eabi_attribute 6, 12 -; CORTEX-M0PLUS-NOT: .eabi_attribute 7 +; CORTEX-M0PLUS: .eabi_attribute 7, 77 ; CORTEX-M0PLUS: .eabi_attribute 8, 0 ; CORTEX-M0PLUS: .eabi_attribute 9, 1 +; CORTEX-M0PLUS-NOT: .eabi_attribute 27 +; CORTEX-M0PLUS-NOT: .eabi_attribute 36 +; CORTEX-M0PLUS-NOT: .eabi_attribute 42 +; CORTEX-M0PLUS-NOT: .eabi_attribute 44 +; CORTEX-M0PLUS-NOT: .eabi_attribute 68 ; CORTEX-M0PLUS-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-M0PLUS: .eabi_attribute 20, 1 @@ -941,13 +947,8 @@ ; CORTEX-M0PLUS: .eabi_attribute 23, 3 ; CORTEX-M0PLUS: .eabi_attribute 24, 1 ; CORTEX-M0PLUS: .eabi_attribute 25, 1 -; CORTEX-M0PLUS-NOT: .eabi_attribute 27 ; CORTEX-M0PLUS-NOT: .eabi_attribute 28 -; CORTEX-M0PLUS-NOT: .eabi_attribute 36 ; CORTEX-M0PLUS: .eabi_attribute 38, 1 -; CORTEX-M0PLUS-NOT: .eabi_attribute 42 -; CORTEX-M0PLUS-NOT: .eabi_attribute 44 -; CORTEX-M0PLUS-NOT: .eabi_attribute 68 ; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 19 ;; Despite the M0+ CPU having no FPU in this scenario, we chose to @@ -962,9 +963,14 @@ ; CORTEX-M1: .cpu cortex-m1 ; CORTEX-M1: .eabi_attribute 6, 12 -; CORTEX-M1-NOT: .eabi_attribute 7 +; CORTEX-M1: .eabi_attribute 7, 77 ; CORTEX-M1: .eabi_attribute 8, 0 ; CORTEX-M1: .eabi_attribute 9, 1 +; CORTEX-M1-NOT: .eabi_attribute 27 +; CORTEX-M1-NOT: .eabi_attribute 36 +; CORTEX-M1-NOT: .eabi_attribute 42 +; CORTEX-M1-NOT: .eabi_attribute 44 +; CORTEX-M1-NOT: .eabi_attribute 68 ; CORTEX-M1-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-M1: .eabi_attribute 20, 1 @@ -973,13 +979,8 @@ ; CORTEX-M1: .eabi_attribute 23, 3 ; CORTEX-M1: .eabi_attribute 24, 1 ; CORTEX-M1: .eabi_attribute 25, 1 -; CORTEX-M1-NOT: .eabi_attribute 27 ; CORTEX-M1-NOT: .eabi_attribute 28 -; CORTEX-M1-NOT: .eabi_attribute 36 ; CORTEX-M1: .eabi_attribute 38, 1 -; CORTEX-M1-NOT: .eabi_attribute 42 -; CORTEX-M1-NOT: .eabi_attribute 44 -; CORTEX-M1-NOT: .eabi_attribute 68 ; CORTEX-M1-FAST-NOT: .eabi_attribute 19 ;; Despite the M1 CPU having no FPU in this scenario, we chose to @@ -994,9 +995,13 @@ ; SC000: .cpu sc000 ; SC000: .eabi_attribute 6, 12 -; SC000-NOT: .eabi_attribute 7 +; SC000: .eabi_attribute 7, 77 ; SC000: .eabi_attribute 8, 0 ; SC000: .eabi_attribute 9, 1 +; SC000-NOT: .eabi_attribute 27 +; SC000-NOT: .eabi_attribute 42 +; SC000-NOT: .eabi_attribute 44 +; SC000-NOT: .eabi_attribute 68 ; SC000-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; SC000: .eabi_attribute 20, 1 @@ -1005,13 +1010,8 @@ ; SC000: .eabi_attribute 23, 3 ; SC000: .eabi_attribute 24, 1 ; SC000: .eabi_attribute 25, 1 -; SC000-NOT: .eabi_attribute 27 ; SC000-NOT: .eabi_attribute 28 -; SC000-NOT: .eabi_attribute 36 ; SC000: .eabi_attribute 38, 1 -; SC000-NOT: .eabi_attribute 42 -; SC000-NOT: .eabi_attribute 44 -; SC000-NOT: .eabi_attribute 68 ; SC000-FAST-NOT: .eabi_attribute 19 ;; Despite the SC000 CPU having no FPU in this scenario, we chose to @@ -1029,6 +1029,11 @@ ; CORTEX-M3: .eabi_attribute 7, 77 ; CORTEX-M3: .eabi_attribute 8, 0 ; CORTEX-M3: .eabi_attribute 9, 2 +; CORTEX-M3-NOT: .eabi_attribute 27 +; CORTEX-M3-NOT: .eabi_attribute 36 +; CORTEX-M3-NOT: .eabi_attribute 42 +; CORTEX-M3-NOT: .eabi_attribute 44 +; CORTEX-M3-NOT: .eabi_attribute 68 ; CORTEX-M3-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-M3: .eabi_attribute 20, 1 @@ -1037,13 +1042,8 @@ ; CORTEX-M3: .eabi_attribute 23, 3 ; CORTEX-M3: .eabi_attribute 24, 1 ; CORTEX-M3: .eabi_attribute 25, 1 -; CORTEX-M3-NOT: .eabi_attribute 27 ; CORTEX-M3-NOT: .eabi_attribute 28 -; CORTEX-M3-NOT: .eabi_attribute 36 ; CORTEX-M3: .eabi_attribute 38, 1 -; CORTEX-M3-NOT: .eabi_attribute 42 -; CORTEX-M3-NOT: .eabi_attribute 44 -; CORTEX-M3-NOT: .eabi_attribute 68 ; CORTEX-M3-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving @@ -1059,6 +1059,11 @@ ; SC300: .eabi_attribute 7, 77 ; SC300: .eabi_attribute 8, 0 ; SC300: .eabi_attribute 9, 2 +; SC300-NOT: .eabi_attribute 27 +; SC300-NOT: .eabi_attribute 36 +; SC300-NOT: .eabi_attribute 42 +; SC300-NOT: .eabi_attribute 44 +; SC300-NOT: .eabi_attribute 68 ; SC300-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; SC300: .eabi_attribute 20, 1 @@ -1067,13 +1072,8 @@ ; SC300: .eabi_attribute 23, 3 ; SC300: .eabi_attribute 24, 1 ; SC300: .eabi_attribute 25, 1 -; SC300-NOT: .eabi_attribute 27 ; SC300-NOT: .eabi_attribute 28 -; SC300-NOT: .eabi_attribute 36 ; SC300: .eabi_attribute 38, 1 -; SC300-NOT: .eabi_attribute 42 -; SC300-NOT: .eabi_attribute 44 -; SC300-NOT: .eabi_attribute 68 ; SC300-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving @@ -1090,6 +1090,11 @@ ; CORTEX-M4-SOFT: .eabi_attribute 8, 0 ; CORTEX-M4-SOFT: .eabi_attribute 9, 2 ; CORTEX-M4-SOFT: .fpu fpv4-sp-d16 +; CORTEX-M4-SOFT: .eabi_attribute 27, 1 +; CORTEX-M4-SOFT: .eabi_attribute 36, 1 +; CORTEX-M4-SOFT-NOT: .eabi_attribute 42 +; CORTEX-M4-SOFT-NOT: .eabi_attribute 44 +; CORTEX-M4-SOFT-NOT: .eabi_attribute 68 ; CORTEX-M4-SOFT-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-M4-SOFT: .eabi_attribute 20, 1 @@ -1098,13 +1103,8 @@ ; CORTEX-M4-SOFT: .eabi_attribute 23, 3 ; CORTEX-M4-SOFT: .eabi_attribute 24, 1 ; CORTEX-M4-SOFT: .eabi_attribute 25, 1 -; CORTEX-M4-SOFT: .eabi_attribute 27, 1 ; CORTEX-M4-SOFT-NOT: .eabi_attribute 28 -; CORTEX-M4-SOFT: .eabi_attribute 36, 1 ; CORTEX-M4-SOFT: .eabi_attribute 38, 1 -; CORTEX-M4-SOFT-NOT: .eabi_attribute 42 -; CORTEX-M4-SOFT-NOT: .eabi_attribute 44 -; CORTEX-M4-SOFT-NOT: .eabi_attribute 68 ; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 19 ;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when @@ -1120,6 +1120,11 @@ ; CORTEX-M4-HARD: .eabi_attribute 8, 0 ; CORTEX-M4-HARD: .eabi_attribute 9, 2 ; CORTEX-M4-HARD: .fpu fpv4-sp-d16 +; CORTEX-M4-HARD: .eabi_attribute 27, 1 +; CORTEX-M4-HARD: .eabi_attribute 36, 1 +; CORTEX-M4-HARD-NOT: .eabi_attribute 42 +; CORTEX-M4-HARD-NOT: .eabi_attribute 44 +; CORTEX-M4-HARD-NOT: .eabi_attribute 68 ; CORTEX-M4-HARD-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-M4-HARD: .eabi_attribute 20, 1 @@ -1128,13 +1133,8 @@ ; CORTEX-M4-HARD: .eabi_attribute 23, 3 ; CORTEX-M4-HARD: .eabi_attribute 24, 1 ; CORTEX-M4-HARD: .eabi_attribute 25, 1 -; CORTEX-M4-HARD: .eabi_attribute 27, 1 ; CORTEX-M4-HARD: .eabi_attribute 28, 1 -; CORTEX-M4-HARD: .eabi_attribute 36, 1 ; CORTEX-M4-HARD: .eabi_attribute 38, 1 -; CORTEX-M4-HARD-NOT: .eabi_attribute 42 -; CORTEX-M4-HARD-NOT: .eabi_attribute 44 -; CORTEX-M4-HARD-NOT: .eabi_attribute 68 ; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 19 ;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when @@ -1152,6 +1152,11 @@ ; CORTEX-M7-SOFT-NOT: .fpu ; CORTEX-M7-SINGLE: .fpu fpv5-sp-d16 ; CORTEX-M7-DOUBLE: .fpu fpv5-d16 +; CORTEX-M7-SOFT-NOT: .eabi_attribute 27 +; CORTEX-M7-SINGLE: .eabi_attribute 27, 1 +; CORTEX-M7-DOUBLE-NOT: .eabi_attribute 27 +; CORTEX-M7: .eabi_attribute 36, 1 +; CORTEX-M7-NOT: .eabi_attribute 44 ; CORTEX-M7: .eabi_attribute 17, 1 ; CORTEX-M7-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance @@ -1161,12 +1166,7 @@ ; CORTEX-M7: .eabi_attribute 23, 3 ; CORTEX-M7: .eabi_attribute 24, 1 ; CORTEX-M7: .eabi_attribute 25, 1 -; CORTEX-M7-SOFT-NOT: .eabi_attribute 27 -; CORTEX-M7-SINGLE: .eabi_attribute 27, 1 -; CORTEX-M7-DOUBLE-NOT: .eabi_attribute 27 -; CORTEX-M7: .eabi_attribute 36, 1 ; CORTEX-M7: .eabi_attribute 38, 1 -; CORTEX-M7-NOT: .eabi_attribute 44 ; CORTEX-M7: .eabi_attribute 14, 0 ; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 19 @@ -1186,6 +1186,10 @@ ; CORTEX-R4: .eabi_attribute 8, 1 ; CORTEX-R4: .eabi_attribute 9, 2 ; CORTEX-R4-NOT: .fpu vfpv3-d16 +; CORTEX-R4-NOT: .eabi_attribute 36 +; CORTEX-R4-NOT: .eabi_attribute 42 +; CORTEX-R4-NOT: .eabi_attribute 44 +; CORTEX-R4-NOT: .eabi_attribute 68 ; CORTEX-R4-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-R4: .eabi_attribute 20, 1 @@ -1195,11 +1199,7 @@ ; CORTEX-R4: .eabi_attribute 24, 1 ; CORTEX-R4: .eabi_attribute 25, 1 ; CORTEX-R4-NOT: .eabi_attribute 28 -; CORTEX-R4-NOT: .eabi_attribute 36 ; CORTEX-R4: .eabi_attribute 38, 1 -; CORTEX-R4-NOT: .eabi_attribute 42 -; CORTEX-R4-NOT: .eabi_attribute 44 -; CORTEX-R4-NOT: .eabi_attribute 68 ; CORTEX-R4F: .cpu cortex-r4f ; CORTEX-R4F: .eabi_attribute 6, 10 @@ -1207,6 +1207,11 @@ ; CORTEX-R4F: .eabi_attribute 8, 1 ; CORTEX-R4F: .eabi_attribute 9, 2 ; CORTEX-R4F: .fpu vfpv3-d16 +; CORTEX-R4F-NOT: .eabi_attribute 27, 1 +; CORTEX-R4F-NOT: .eabi_attribute 36 +; CORTEX-R4F-NOT: .eabi_attribute 42 +; CORTEX-R4F-NOT: .eabi_attribute 44 +; CORTEX-R4F-NOT: .eabi_attribute 68 ; CORTEX-R4F-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-R4F: .eabi_attribute 20, 1 @@ -1215,13 +1220,8 @@ ; CORTEX-R4F: .eabi_attribute 23, 3 ; CORTEX-R4F: .eabi_attribute 24, 1 ; CORTEX-R4F: .eabi_attribute 25, 1 -; CORTEX-R4F-NOT: .eabi_attribute 27, 1 ; CORTEX-R4F-NOT: .eabi_attribute 28 -; CORTEX-R4F-NOT: .eabi_attribute 36 ; CORTEX-R4F: .eabi_attribute 38, 1 -; CORTEX-R4F-NOT: .eabi_attribute 42 -; CORTEX-R4F-NOT: .eabi_attribute 44 -; CORTEX-R4F-NOT: .eabi_attribute 68 ; CORTEX-R5: .cpu cortex-r5 ; CORTEX-R5: .eabi_attribute 6, 10 @@ -1229,6 +1229,11 @@ ; CORTEX-R5: .eabi_attribute 8, 1 ; CORTEX-R5: .eabi_attribute 9, 2 ; CORTEX-R5: .fpu vfpv3-d16 +; CORTEX-R5-NOT: .eabi_attribute 27, 1 +; CORTEX-R5-NOT: .eabi_attribute 36 +; CORTEX-R5: .eabi_attribute 44, 2 +; CORTEX-R5-NOT: .eabi_attribute 42 +; CORTEX-R5-NOT: .eabi_attribute 68 ; CORTEX-R5-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-R5: .eabi_attribute 20, 1 @@ -1237,13 +1242,8 @@ ; CORTEX-R5: .eabi_attribute 23, 3 ; CORTEX-R5: .eabi_attribute 24, 1 ; CORTEX-R5: .eabi_attribute 25, 1 -; CORTEX-R5-NOT: .eabi_attribute 27, 1 ; CORTEX-R5-NOT: .eabi_attribute 28 -; CORTEX-R5-NOT: .eabi_attribute 36 ; CORTEX-R5: .eabi_attribute 38, 1 -; CORTEX-R5-NOT: .eabi_attribute 42 -; CORTEX-R5: .eabi_attribute 44, 2 -; CORTEX-R5-NOT: .eabi_attribute 68 ; CORTEX-R5-FAST-NOT: .eabi_attribute 19 ;; The R5 has the VFPv3 FP unit, which always flushes preserving sign. @@ -1258,6 +1258,10 @@ ; CORTEX-R7: .eabi_attribute 8, 1 ; CORTEX-R7: .eabi_attribute 9, 2 ; CORTEX-R7: .fpu vfpv3-d16-fp16 +; CORTEX-R7: .eabi_attribute 36, 1 +; CORTEX-R7: .eabi_attribute 42, 1 +; CORTEX-R7: .eabi_attribute 44, 2 +; CORTEX-R7-NOT: .eabi_attribute 68 ; CORTEX-R7-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-R7: .eabi_attribute 20, 1 @@ -1267,11 +1271,7 @@ ; CORTEX-R7: .eabi_attribute 24, 1 ; CORTEX-R7: .eabi_attribute 25, 1 ; CORTEX-R7-NOT: .eabi_attribute 28 -; CORTEX-R7: .eabi_attribute 36, 1 ; CORTEX-R7: .eabi_attribute 38, 1 -; CORTEX-R7: .eabi_attribute 42, 1 -; CORTEX-R7: .eabi_attribute 44, 2 -; CORTEX-R7-NOT: .eabi_attribute 68 ; CORTEX-R7-FAST-NOT: .eabi_attribute 19 ;; The R7 has the VFPv3 FP unit, which always flushes preserving sign. @@ -1286,6 +1286,10 @@ ; CORTEX-R8: .eabi_attribute 8, 1 ; CORTEX-R8: .eabi_attribute 9, 2 ; CORTEX-R8: .fpu vfpv3-d16-fp16 +; CORTEX-R8: .eabi_attribute 36, 1 +; CORTEX-R8: .eabi_attribute 42, 1 +; CORTEX-R8: .eabi_attribute 44, 2 +; CORTEX-R8-NOT: .eabi_attribute 68 ; CORTEX-R8-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-R8: .eabi_attribute 20, 1 @@ -1295,11 +1299,7 @@ ; CORTEX-R8: .eabi_attribute 24, 1 ; CORTEX-R8: .eabi_attribute 25, 1 ; CORTEX-R8-NOT: .eabi_attribute 28 -; CORTEX-R8: .eabi_attribute 36, 1 ; CORTEX-R8: .eabi_attribute 38, 1 -; CORTEX-R8: .eabi_attribute 42, 1 -; CORTEX-R8: .eabi_attribute 44, 2 -; CORTEX-R8-NOT: .eabi_attribute 68 ; CORTEX-R8-FAST-NOT: .eabi_attribute 19 ;; The R8 has the VFPv3 FP unit, which always flushes preserving sign. @@ -1315,6 +1315,11 @@ ; CORTEX-A32: .eabi_attribute 9, 2 ; CORTEX-A32: .fpu crypto-neon-fp-armv8 ; CORTEX-A32: .eabi_attribute 12, 3 +; CORTEX-A32-NOT: .eabi_attribute 27 +; CORTEX-A32: .eabi_attribute 36, 1 +; CORTEX-A32: .eabi_attribute 42, 1 +; CORTEX-A32-NOT: .eabi_attribute 44 +; CORTEX-A32: .eabi_attribute 68, 3 ; CORTEX-A32-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A32: .eabi_attribute 20, 1 @@ -1323,13 +1328,8 @@ ; CORTEX-A32: .eabi_attribute 23, 3 ; CORTEX-A32: .eabi_attribute 24, 1 ; CORTEX-A32: .eabi_attribute 25, 1 -; CORTEX-A32-NOT: .eabi_attribute 27 ; CORTEX-A32-NOT: .eabi_attribute 28 -; CORTEX-A32: .eabi_attribute 36, 1 ; CORTEX-A32: .eabi_attribute 38, 1 -; CORTEX-A32: .eabi_attribute 42, 1 -; CORTEX-A32-NOT: .eabi_attribute 44 -; CORTEX-A32: .eabi_attribute 68, 3 ; CORTEX-A32-FAST-NOT: .eabi_attribute 19 ;; The A32 has the ARMv8 FP unit, which always flushes preserving sign. @@ -1343,20 +1343,20 @@ ; CORTEX-M23: .eabi_attribute 7, 77 ; CORTEX-M23: .eabi_attribute 8, 0 ; CORTEX-M23: .eabi_attribute 9, 3 +; CORTEX-M23-NOT: .eabi_attribute 27 +; CORTEX-M23: .eabi_attribute 34, 1 +; CORTEX-M23-NOT: .eabi_attribute 44 ; CORTEX-M23: .eabi_attribute 17, 1 ;; We default to IEEE 754 compliance ; CORTEX-M23-NOT: .eabi_attribute 19 ; CORTEX-M23: .eabi_attribute 20, 1 ; CORTEX-M23: .eabi_attribute 21, 1 ; CORTEX-M23: .eabi_attribute 23, 3 -; CORTEX-M23: .eabi_attribute 34, 1 ; CORTEX-M23: .eabi_attribute 24, 1 -; CORTEX-M23-NOT: .eabi_attribute 27 ; CORTEX-M23-NOT: .eabi_attribute 28 ; CORTEX-M23: .eabi_attribute 25, 1 ; CORTEX-M23: .eabi_attribute 38, 1 ; CORTEX-M23: .eabi_attribute 14, 0 -; CORTEX-M23-NOT: .eabi_attribute 44 ; CORTEX-M33: .cpu cortex-m33 ; CORTEX-M33: .eabi_attribute 6, 17 @@ -1364,21 +1364,21 @@ ; CORTEX-M33: .eabi_attribute 8, 0 ; CORTEX-M33: .eabi_attribute 9, 3 ; CORTEX-M33: .fpu fpv5-sp-d16 +; CORTEX-M33: .eabi_attribute 27, 1 +; CORTEX-M33: .eabi_attribute 36, 1 +; CORTEX-M33-NOT: .eabi_attribute 44 +; CORTEX-M33: .eabi_attribute 46, 1 +; CORTEX-M33: .eabi_attribute 34, 1 ; CORTEX-M33: .eabi_attribute 17, 1 ;; We default to IEEE 754 compliance ; CORTEX-M23-NOT: .eabi_attribute 19 ; CORTEX-M33: .eabi_attribute 20, 1 ; CORTEX-M33: .eabi_attribute 21, 1 ; CORTEX-M33: .eabi_attribute 23, 3 -; CORTEX-M33: .eabi_attribute 34, 1 ; CORTEX-M33: .eabi_attribute 24, 1 ; CORTEX-M33: .eabi_attribute 25, 1 -; CORTEX-M33: .eabi_attribute 27, 1 ; CORTEX-M33-NOT: .eabi_attribute 28 -; CORTEX-M33: .eabi_attribute 36, 1 ; CORTEX-M33: .eabi_attribute 38, 1 -; CORTEX-M33: .eabi_attribute 46, 1 -; CORTEX-M33-NOT: .eabi_attribute 44 ; CORTEX-M33: .eabi_attribute 14, 0 ; CORTEX-M33-FAST-NOT: .eabi_attribute 19 @@ -1394,6 +1394,11 @@ ; CORTEX-A35: .eabi_attribute 9, 2 ; CORTEX-A35: .fpu crypto-neon-fp-armv8 ; CORTEX-A35: .eabi_attribute 12, 3 +; CORTEX-A35-NOT: .eabi_attribute 27 +; CORTEX-A35: .eabi_attribute 36, 1 +; CORTEX-A35: .eabi_attribute 42, 1 +; CORTEX-A35-NOT: .eabi_attribute 44 +; CORTEX-A35: .eabi_attribute 68, 3 ; CORTEX-A35-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A35: .eabi_attribute 20, 1 @@ -1402,13 +1407,8 @@ ; CORTEX-A35: .eabi_attribute 23, 3 ; CORTEX-A35: .eabi_attribute 24, 1 ; CORTEX-A35: .eabi_attribute 25, 1 -; CORTEX-A35-NOT: .eabi_attribute 27 ; CORTEX-A35-NOT: .eabi_attribute 28 -; CORTEX-A35: .eabi_attribute 36, 1 ; CORTEX-A35: .eabi_attribute 38, 1 -; CORTEX-A35: .eabi_attribute 42, 1 -; CORTEX-A35-NOT: .eabi_attribute 44 -; CORTEX-A35: .eabi_attribute 68, 3 ; CORTEX-A35-FAST-NOT: .eabi_attribute 19 ;; The A35 has the ARMv8 FP unit, which always flushes preserving sign. @@ -1424,6 +1424,11 @@ ; CORTEX-A53: .eabi_attribute 9, 2 ; CORTEX-A53: .fpu crypto-neon-fp-armv8 ; CORTEX-A53: .eabi_attribute 12, 3 +; CORTEX-A53-NOT: .eabi_attribute 27 +; CORTEX-A53: .eabi_attribute 36, 1 +; CORTEX-A53: .eabi_attribute 42, 1 +; CORTEX-A53-NOT: .eabi_attribute 44 +; CORTEX-A53: .eabi_attribute 68, 3 ; CORTEX-A53-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A53: .eabi_attribute 20, 1 @@ -1432,13 +1437,8 @@ ; CORTEX-A53: .eabi_attribute 23, 3 ; CORTEX-A53: .eabi_attribute 24, 1 ; CORTEX-A53: .eabi_attribute 25, 1 -; CORTEX-A53-NOT: .eabi_attribute 27 ; CORTEX-A53-NOT: .eabi_attribute 28 -; CORTEX-A53: .eabi_attribute 36, 1 ; CORTEX-A53: .eabi_attribute 38, 1 -; CORTEX-A53: .eabi_attribute 42, 1 -; CORTEX-A53-NOT: .eabi_attribute 44 -; CORTEX-A53: .eabi_attribute 68, 3 ; CORTEX-A53-FAST-NOT: .eabi_attribute 19 ;; The A53 has the ARMv8 FP unit, which always flushes preserving sign. @@ -1454,6 +1454,11 @@ ; CORTEX-A57: .eabi_attribute 9, 2 ; CORTEX-A57: .fpu crypto-neon-fp-armv8 ; CORTEX-A57: .eabi_attribute 12, 3 +; CORTEX-A57-NOT: .eabi_attribute 27 +; CORTEX-A57: .eabi_attribute 36, 1 +; CORTEX-A57: .eabi_attribute 42, 1 +; CORTEX-A57-NOT: .eabi_attribute 44 +; CORTEX-A57: .eabi_attribute 68, 3 ; CORTEX-A57-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A57: .eabi_attribute 20, 1 @@ -1462,13 +1467,8 @@ ; CORTEX-A57: .eabi_attribute 23, 3 ; CORTEX-A57: .eabi_attribute 24, 1 ; CORTEX-A57: .eabi_attribute 25, 1 -; CORTEX-A57-NOT: .eabi_attribute 27 ; CORTEX-A57-NOT: .eabi_attribute 28 -; CORTEX-A57: .eabi_attribute 36, 1 ; CORTEX-A57: .eabi_attribute 38, 1 -; CORTEX-A57: .eabi_attribute 42, 1 -; CORTEX-A57-NOT: .eabi_attribute 44 -; CORTEX-A57: .eabi_attribute 68, 3 ; CORTEX-A57-FAST-NOT: .eabi_attribute 19 ;; The A57 has the ARMv8 FP unit, which always flushes preserving sign. @@ -1484,6 +1484,11 @@ ; CORTEX-A72: .eabi_attribute 9, 2 ; CORTEX-A72: .fpu crypto-neon-fp-armv8 ; CORTEX-A72: .eabi_attribute 12, 3 +; CORTEX-A72-NOT: .eabi_attribute 27 +; CORTEX-A72: .eabi_attribute 36, 1 +; CORTEX-A72: .eabi_attribute 42, 1 +; CORTEX-A72-NOT: .eabi_attribute 44 +; CORTEX-A72: .eabi_attribute 68, 3 ; CORTEX-A72-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A72: .eabi_attribute 20, 1 @@ -1492,13 +1497,8 @@ ; CORTEX-A72: .eabi_attribute 23, 3 ; CORTEX-A72: .eabi_attribute 24, 1 ; CORTEX-A72: .eabi_attribute 25, 1 -; CORTEX-A72-NOT: .eabi_attribute 27 ; CORTEX-A72-NOT: .eabi_attribute 28 -; CORTEX-A72: .eabi_attribute 36, 1 ; CORTEX-A72: .eabi_attribute 38, 1 -; CORTEX-A72: .eabi_attribute 42, 1 -; CORTEX-A72-NOT: .eabi_attribute 44 -; CORTEX-A72: .eabi_attribute 68, 3 ; CORTEX-A72-FAST-NOT: .eabi_attribute 19 ;; The A72 has the ARMv8 FP unit, which always flushes preserving sign. @@ -1514,6 +1514,11 @@ ; CORTEX-A73: .eabi_attribute 9, 2 ; CORTEX-A73: .fpu crypto-neon-fp-armv8 ; CORTEX-A73: .eabi_attribute 12, 3 +; CORTEX-A73-NOT: .eabi_attribute 27 +; CORTEX-A73: .eabi_attribute 36, 1 +; CORTEX-A73: .eabi_attribute 42, 1 +; CORTEX-A73-NOT: .eabi_attribute 44 +; CORTEX-A73: .eabi_attribute 68, 3 ; CORTEX-A73-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; CORTEX-A73: .eabi_attribute 20, 1 @@ -1522,14 +1527,9 @@ ; CORTEX-A73: .eabi_attribute 23, 3 ; CORTEX-A73: .eabi_attribute 24, 1 ; CORTEX-A73: .eabi_attribute 25, 1 -; CORTEX-A73-NOT: .eabi_attribute 27 ; CORTEX-A73-NOT: .eabi_attribute 28 -; CORTEX-A73: .eabi_attribute 36, 1 ; CORTEX-A73: .eabi_attribute 38, 1 -; CORTEX-A73: .eabi_attribute 42, 1 -; CORTEX-A73-NOT: .eabi_attribute 44 ; CORTEX-A73: .eabi_attribute 14, 0 -; CORTEX-A73: .eabi_attribute 68, 3 ; EXYNOS-M1: .cpu exynos-m1 ; EXYNOS-M1: .eabi_attribute 6, 14 @@ -1538,6 +1538,11 @@ ; EXYNOS-M1: .eabi_attribute 9, 2 ; EXYNOS-M1: .fpu crypto-neon-fp-armv8 ; EXYNOS-M1: .eabi_attribute 12, 3 +; EXYNOS-M1-NOT: .eabi_attribute 27 +; EXYNOS-M1: .eabi_attribute 36, 1 +; EXYNOS-M1: .eabi_attribute 42, 1 +; EXYNOS-M1-NOT: .eabi_attribute 44 +; EXYNOS-M1: .eabi_attribute 68, 3 ; EXYNOS-M1-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; EXYNOS-M1: .eabi_attribute 20, 1 @@ -1546,13 +1551,8 @@ ; EXYNOS-M1: .eabi_attribute 23, 3 ; EXYNOS-M1: .eabi_attribute 24, 1 ; EXYNOS-M1: .eabi_attribute 25, 1 -; EXYNOS-M1-NOT: .eabi_attribute 27 ; EXYNOS-M1-NOT: .eabi_attribute 28 -; EXYNOS-M1: .eabi_attribute 36, 1 ; EXYNOS-M1: .eabi_attribute 38, 1 -; EXYNOS-M1: .eabi_attribute 42, 1 -; EXYNOS-M1-NOT: .eabi_attribute 44 -; EXYNOS-M1: .eabi_attribute 68, 3 ; EXYNOS-M1-FAST-NOT: .eabi_attribute 19 ;; The exynos-m1 has the ARMv8 FP unit, which always flushes preserving sign. @@ -1568,6 +1568,11 @@ ; EXYNOS-M2: .eabi_attribute 9, 2 ; EXYNOS-M2: .fpu crypto-neon-fp-armv8 ; EXYNOS-M2: .eabi_attribute 12, 3 +; EXYNOS-M2-NOT: .eabi_attribute 27 +; EXYNOS-M2: .eabi_attribute 36, 1 +; EXYNOS-M2: .eabi_attribute 42, 1 +; EXYNOS-M2-NOT: .eabi_attribute 44 +; EXYNOS-M2: .eabi_attribute 68, 3 ; EXYNOS-M2-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; EXYNOS-M2: .eabi_attribute 20, 1 @@ -1576,13 +1581,8 @@ ; EXYNOS-M2: .eabi_attribute 23, 3 ; EXYNOS-M2: .eabi_attribute 24, 1 ; EXYNOS-M2: .eabi_attribute 25, 1 -; EXYNOS-M2-NOT: .eabi_attribute 27 ; EXYNOS-M2-NOT: .eabi_attribute 28 -; EXYNOS-M2: .eabi_attribute 36, 1 ; EXYNOS-M2: .eabi_attribute 38, 1 -; EXYNOS-M2: .eabi_attribute 42, 1 -; EXYNOS-M2-NOT: .eabi_attribute 44 -; EXYNOS-M2: .eabi_attribute 68, 3 ; EXYNOS-M3: .cpu exynos-m3 ; EXYNOS-M3: .eabi_attribute 6, 14 @@ -1591,6 +1591,11 @@ ; EXYNOS-M3: .eabi_attribute 9, 2 ; EXYNOS-M3: .fpu crypto-neon-fp-armv8 ; EXYNOS-M3: .eabi_attribute 12, 3 +; EXYNOS-M3-NOT: .eabi_attribute 27 +; EXYNOS-M3: .eabi_attribute 36, 1 +; EXYNOS-M3: .eabi_attribute 42, 1 +; EXYNOS-M3-NOT: .eabi_attribute 44 +; EXYNOS-M3: .eabi_attribute 68, 3 ; EXYNOS-M3-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; EXYNOS-M3: .eabi_attribute 20, 1 @@ -1599,13 +1604,8 @@ ; EXYNOS-M3: .eabi_attribute 23, 3 ; EXYNOS-M3: .eabi_attribute 24, 1 ; EXYNOS-M3: .eabi_attribute 25, 1 -; EXYNOS-M3-NOT: .eabi_attribute 27 ; EXYNOS-M3-NOT: .eabi_attribute 28 -; EXYNOS-M3: .eabi_attribute 36, 1 ; EXYNOS-M3: .eabi_attribute 38, 1 -; EXYNOS-M3: .eabi_attribute 42, 1 -; EXYNOS-M3-NOT: .eabi_attribute 44 -; EXYNOS-M3: .eabi_attribute 68, 3 ; GENERIC-FPU-VFPV3-FP16: .fpu vfpv3-fp16 ; GENERIC-FPU-VFPV3-D16-FP16: .fpu vfpv3-d16-fp16 @@ -1619,6 +1619,11 @@ ; GENERIC-ARMV8_1-A: .eabi_attribute 9, 2 ; GENERIC-ARMV8_1-A: .fpu crypto-neon-fp-armv8 ; GENERIC-ARMV8_1-A: .eabi_attribute 12, 4 +; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 27 +; GENERIC-ARMV8_1-A: .eabi_attribute 36, 1 +; GENERIC-ARMV8_1-A: .eabi_attribute 42, 1 +; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 44 +; GENERIC-ARMV8_1-A: .eabi_attribute 68, 3 ; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 19 ;; We default to IEEE 754 compliance ; GENERIC-ARMV8_1-A: .eabi_attribute 20, 1 @@ -1627,13 +1632,8 @@ ; GENERIC-ARMV8_1-A: .eabi_attribute 23, 3 ; GENERIC-ARMV8_1-A: .eabi_attribute 24, 1 ; GENERIC-ARMV8_1-A: .eabi_attribute 25, 1 -; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 27 ; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 28 -; GENERIC-ARMV8_1-A: .eabi_attribute 36, 1 ; GENERIC-ARMV8_1-A: .eabi_attribute 38, 1 -; GENERIC-ARMV8_1-A: .eabi_attribute 42, 1 -; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 44 -; GENERIC-ARMV8_1-A: .eabi_attribute 68, 3 ; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 19 ;; GENERIC-ARMV8_1-A has the ARMv8 FP unit, which always flushes preserving sign. @@ -1670,23 +1670,16 @@ ; ARMv8R-SP-NOT: .eabi_attribute 12 ; ARMv8R-NEON: .fpu neon-fp-armv8 ; ARMv8R-NEON: .eabi_attribute 12, 3 @ Tag_Advanced_SIMD_arch -; ARMv8R: .eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use -; ARMv8R: .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal -; ARMv8R: .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions -; ARMv8R: .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model -; ARMv8R: .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access -; ARMv8R: .eabi_attribute 24, 1 @ Tag_ABI_align_needed -; ARMv8R: .eabi_attribute 25, 1 @ Tag_ABI_align_preserved ; ARMv8R-NOFPU-NOT: .eabi_attribute 27 ; ARMv8R-SP: .eabi_attribute 27, 1 @ Tag_ABI_HardFP_use ; ARMv8R-NEON-NOT: .eabi_attribute 27 ; ARMv8R-NOFPU-NOT: .eabi_attribute 36 ; ARMv8R-SP: .eabi_attribute 36, 1 @ Tag_FP_HP_extension ; ARMv8R-NEON: .eabi_attribute 36, 1 @ Tag_FP_HP_extension -; ARMv8R: .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format ; ARMv8R: .eabi_attribute 42, 1 @ Tag_MPextension_use -; ARMv8R: .eabi_attribute 14, 0 @ Tag_ABI_PCS_R9_use ; ARMv8R: .eabi_attribute 68, 2 @ Tag_Virtualization_use +; ARMv8R: .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format +; ARMv8R: .eabi_attribute 14, 0 @ Tag_ABI_PCS_R9_use define i32 @f(i64 %z) { ret i32 0 diff --git a/test/CodeGen/ARM/darwin-tls-preserved.ll b/test/CodeGen/ARM/darwin-tls-preserved.ll new file mode 100644 index 0000000000000..4969fabfd9b3c --- /dev/null +++ b/test/CodeGen/ARM/darwin-tls-preserved.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=thumbv7k-apple-watchos2.0 -arm-atomic-cfg-tidy=0 -o - %s | FileCheck %s + +@tls_var = thread_local global i32 0 + +; r9 and r12 can be live across the asm, but those get clobbered by the TLS +; access (in a different BB to order it). +define i32 @test_regs_preserved(i32* %ptr1, i32* %ptr2, i1 %tst1) { +; CHECK-LABEL: test_regs_preserved: +; CHECK: str {{.*}}, [sp +; CHECK: mov {{.*}}, r12 +entry: + call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r10},~{r11},~{r13},~{lr}"() + br i1 %tst1, label %get_tls, label %done + +get_tls: + %val = load i32, i32* @tls_var + br label %done + +done: + %res = phi i32 [%val, %get_tls], [0, %entry] + store i32 42, i32* %ptr1 + store i32 42, i32* %ptr2 + ret i32 %res +} diff --git a/test/CodeGen/ARM/divmod-hwdiv.ll b/test/CodeGen/ARM/divmod-hwdiv.ll new file mode 100644 index 0000000000000..4cc316ffa3ea6 --- /dev/null +++ b/test/CodeGen/ARM/divmod-hwdiv.ll @@ -0,0 +1,37 @@ +; The hwdiv subtarget feature should only influence thumb, not arm. +; RUN: llc < %s -mtriple=arm-gnueabi -mattr=+hwdiv | FileCheck %s -check-prefixes=ALL,AEABI-NOHWDIV +; RUN: llc < %s -mtriple=arm-gnueabi -mattr=-hwdiv | FileCheck %s -check-prefixes=ALL,AEABI-NOHWDIV +; RUN: llc < %s -mtriple=thumbv7-gnueabi -mattr=+hwdiv | FileCheck %s -check-prefixes=ALL,THUMB-HWDIV +; RUN: llc < %s -mtriple=thumbv7-gnueabi -mattr=-hwdiv | FileCheck %s -check-prefixes=ALL,AEABI-NOHWDIV + +; The hwdiv-arm subtarget feature should only influence arm, not thumb. +; RUN: llc < %s -mtriple=arm-gnueabi -mattr=+hwdiv-arm | FileCheck %s -check-prefixes=ALL,ARM-HWDIV +; RUN: llc < %s -mtriple=arm-gnueabi -mattr=-hwdiv-arm | FileCheck %s -check-prefixes=ALL,AEABI-NOHWDIV +; RUN: llc < %s -mtriple=thumbv7-gnueabi -mattr=+hwdiv-arm | FileCheck %s -check-prefixes=ALL,AEABI-NOHWDIV +; RUN: llc < %s -mtriple=thumbv7-gnueabi -mattr=-hwdiv-arm | FileCheck %s -check-prefixes=ALL,AEABI-NOHWDIV + +define arm_aapcscc i32 @test_i32_srem(i32 %x, i32 %y) { +; ALL-LABEL: test_i32_srem: +; ARM-HWDIV: sdiv [[Q:r[0-9]+]], r0, r1 +; ARM-HWDIV: mul [[P:r[0-9]+]], [[Q]], r1 +; ARM-HWDIV: sub r0, r0, [[P]] +; THUMB-HWDIV: sdiv [[Q:r[0-9]+]], r0, r1 +; THUMB-HWDIV: mls r0, [[Q]], r1, r0 +; AEABI-NOHWDIV: bl __aeabi_idivmod +; AEABI-NOHWDIV: mov r0, r1 + %r = srem i32 %x, %y + ret i32 %r +} + +define arm_aapcscc i32 @test_i32_urem(i32 %x, i32 %y) { +; ALL-LABEL: test_i32_urem: +; ARM-HWDIV: udiv [[Q:r[0-9]+]], r0, r1 +; ARM-HWDIV: mul [[P:r[0-9]+]], [[Q]], r1 +; ARM-HWDIV: sub r0, r0, [[P]] +; THUMB-HWDIV: udiv [[Q:r[0-9]+]], r0, r1 +; THUMB-HWDIV: mls r0, [[Q]], r1, r0 +; AEABI-NOHWDIV: bl __aeabi_uidivmod +; AEABI-NOHWDIV: mov r0, r1 + %r = urem i32 %x, %y + ret i32 %r +} diff --git a/test/CodeGen/ARM/fpoffset_overflow.mir b/test/CodeGen/ARM/fpoffset_overflow.mir new file mode 100644 index 0000000000000..9c6cd931b1532 --- /dev/null +++ b/test/CodeGen/ARM/fpoffset_overflow.mir @@ -0,0 +1,94 @@ +# RUN: llc -o - %s -mtriple=thumbv7-- -run-pass=stack-protector -run-pass=prologepilog | FileCheck %s +--- +# This should trigger an emergency spill in the register scavenger because the +# frame offset into the large argument is too large. +# CHECK-LABEL: name: func0 +# CHECK: t2STRi12 killed %r7, %sp, 0, 14, _ :: (store 4 into %stack.0) +# CHECK: %r7 = t2ADDri killed %sp, 4096, 14, _, _ +# CHECK: %r11 = t2LDRi12 killed %r7, 36, 14, _ :: (load 4) +# CHECK: %r7 = t2LDRi12 %sp, 0, 14, _ :: (load 4 from %stack.0) +name: func0 +tracksRegLiveness: true +fixedStack: + - { id: 0, offset: 4084, size: 4, alignment: 4, isImmutable: true, + isAliased: false } + - { id: 1, offset: -12, size: 4096, alignment: 4, isImmutable: false, + isAliased: false } +body: | + bb.0: + %r0 = IMPLICIT_DEF + %r1 = IMPLICIT_DEF + %r2 = IMPLICIT_DEF + %r3 = IMPLICIT_DEF + %r4 = IMPLICIT_DEF + %r5 = IMPLICIT_DEF + %r6 = IMPLICIT_DEF + %r8 = IMPLICIT_DEF + %r9 = IMPLICIT_DEF + %r10 = IMPLICIT_DEF + %r11 = IMPLICIT_DEF + %r12 = IMPLICIT_DEF + %lr = IMPLICIT_DEF + + %r11 = t2LDRi12 %fixed-stack.0, 0, 14, _ :: (load 4) + + KILL %r0 + KILL %r1 + KILL %r2 + KILL %r3 + KILL %r4 + KILL %r5 + KILL %r6 + KILL %r8 + KILL %r9 + KILL %r10 + KILL %r11 + KILL %r12 + KILL %lr +... +--- +# This should not trigger an emergency spill yet. +# CHECK-LABEL: name: func1 +# CHECK-NOT: t2STRi12 +# CHECK-NOT: t2ADDri +# CHECK: %r11 = t2LDRi12 %sp, 4092, 14, _ :: (load 4) +# CHECK-NOT: t2LDRi12 +name: func1 +tracksRegLiveness: true +fixedStack: + - { id: 0, offset: 4044, size: 4, alignment: 4, isImmutable: true, + isAliased: false } + - { id: 1, offset: -12, size: 4056, alignment: 4, isImmutable: false, + isAliased: false } +body: | + bb.0: + %r0 = IMPLICIT_DEF + %r1 = IMPLICIT_DEF + %r2 = IMPLICIT_DEF + %r3 = IMPLICIT_DEF + %r4 = IMPLICIT_DEF + %r5 = IMPLICIT_DEF + %r6 = IMPLICIT_DEF + %r8 = IMPLICIT_DEF + %r9 = IMPLICIT_DEF + %r10 = IMPLICIT_DEF + %r11 = IMPLICIT_DEF + %r12 = IMPLICIT_DEF + %lr = IMPLICIT_DEF + + %r11 = t2LDRi12 %fixed-stack.0, 0, 14, _ :: (load 4) + + KILL %r0 + KILL %r1 + KILL %r2 + KILL %r3 + KILL %r4 + KILL %r5 + KILL %r6 + KILL %r8 + KILL %r9 + KILL %r10 + KILL %r11 + KILL %r12 + KILL %lr +... diff --git a/test/CodeGen/ARM/memcpy-inline.ll b/test/CodeGen/ARM/memcpy-inline.ll index d874884dcb393..fb204debf6127 100644 --- a/test/CodeGen/ARM/memcpy-inline.ll +++ b/test/CodeGen/ARM/memcpy-inline.ll @@ -30,10 +30,9 @@ entry: define void @t1(i8* nocapture %C) nounwind { entry: ; CHECK-LABEL: t1: -; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1] -; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0] -; CHECK: adds r0, #15 -; CHECK: adds r1, #15 +; CHECK: movs [[INC:r[0-9]+]], #15 +; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1], [[INC]] +; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0], [[INC]] ; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1] ; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0] tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i32 1, i1 false) @@ -43,13 +42,15 @@ entry: define void @t2(i8* nocapture %C) nounwind { entry: ; CHECK-LABEL: t2: +; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! +; CHECK: movs [[INC:r[0-9]+]], #32 +; CHECK: add.w r3, r0, #16 +; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0], [[INC]] ; CHECK: movw [[REG2:r[0-9]+]], #16716 ; CHECK: movt [[REG2:r[0-9]+]], #72 -; CHECK: str [[REG2]], [r0, #32] -; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! -; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]! +; CHECK: str [[REG2]], [r0] ; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1] -; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0] +; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r3] tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false) ret void } diff --git a/test/CodeGen/ARM/memset-inline.ll b/test/CodeGen/ARM/memset-inline.ll index f6f8d5623509e..b86874692acad 100644 --- a/test/CodeGen/ARM/memset-inline.ll +++ b/test/CodeGen/ARM/memset-inline.ll @@ -13,10 +13,10 @@ entry: define void @t2() nounwind ssp { entry: ; CHECK-LABEL: t2: -; CHECK: add.w r1, r0, #10 ; CHECK: vmov.i32 {{q[0-9]+}}, #0x0 -; CHECK: vst1.16 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1] -; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0] +; CHECK: movs r1, #10 +; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r2], r1 +; CHECK: vst1.16 {d{{[0-9]+}}, d{{[0-9]+}}}, [r2] %buf = alloca [26 x i8], align 1 %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0 call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false) diff --git a/test/CodeGen/ARM/vbits.ll b/test/CodeGen/ARM/vbits.ll index db9bc6ccdd0c8..0a7f7698fa88c 100644 --- a/test/CodeGen/ARM/vbits.ll +++ b/test/CodeGen/ARM/vbits.ll @@ -1,8 +1,14 @@ -; RUN: llc -mtriple=arm-eabi -mattr=+neon -mcpu=cortex-a8 %s -o - | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=arm-eabi -mattr=+neon -mcpu=cortex-a8 | FileCheck %s define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK-LABEL: v_andi8: -;CHECK: vand +; CHECK-LABEL: v_andi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vand d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = and <8 x i8> %tmp1, %tmp2 @@ -10,8 +16,13 @@ define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK-LABEL: v_andi16: -;CHECK: vand +; CHECK-LABEL: v_andi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vand d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = and <4 x i16> %tmp1, %tmp2 @@ -19,8 +30,13 @@ define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK-LABEL: v_andi32: -;CHECK: vand +; CHECK-LABEL: v_andi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vand d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = and <2 x i32> %tmp1, %tmp2 @@ -28,8 +44,13 @@ define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind { -;CHECK-LABEL: v_andi64: -;CHECK: vand +; CHECK-LABEL: v_andi64: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vand d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <1 x i64>, <1 x i64>* %A %tmp2 = load <1 x i64>, <1 x i64>* %B %tmp3 = and <1 x i64> %tmp1, %tmp2 @@ -37,8 +58,14 @@ define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind { } define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK-LABEL: v_andQi8: -;CHECK: vand +; CHECK-LABEL: v_andQi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vand q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = and <16 x i8> %tmp1, %tmp2 @@ -46,8 +73,14 @@ define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK-LABEL: v_andQi16: -;CHECK: vand +; CHECK-LABEL: v_andQi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vand q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = and <8 x i16> %tmp1, %tmp2 @@ -55,8 +88,14 @@ define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK-LABEL: v_andQi32: -;CHECK: vand +; CHECK-LABEL: v_andQi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vand q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = load <4 x i32>, <4 x i32>* %B %tmp3 = and <4 x i32> %tmp1, %tmp2 @@ -64,8 +103,14 @@ define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { } define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { -;CHECK-LABEL: v_andQi64: -;CHECK: vand +; CHECK-LABEL: v_andQi64: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vand q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i64>, <2 x i64>* %A %tmp2 = load <2 x i64>, <2 x i64>* %B %tmp3 = and <2 x i64> %tmp1, %tmp2 @@ -73,8 +118,13 @@ define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { } define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK-LABEL: v_bici8: -;CHECK: vbic +; CHECK-LABEL: v_bici8: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vbic d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > @@ -83,8 +133,13 @@ define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK-LABEL: v_bici16: -;CHECK: vbic +; CHECK-LABEL: v_bici16: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vbic d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = xor <4 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1 > @@ -93,8 +148,13 @@ define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK-LABEL: v_bici32: -;CHECK: vbic +; CHECK-LABEL: v_bici32: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vbic d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = xor <2 x i32> %tmp2, < i32 -1, i32 -1 > @@ -103,8 +163,13 @@ define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind { -;CHECK-LABEL: v_bici64: -;CHECK: vbic +; CHECK-LABEL: v_bici64: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vbic d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <1 x i64>, <1 x i64>* %A %tmp2 = load <1 x i64>, <1 x i64>* %B %tmp3 = xor <1 x i64> %tmp2, < i64 -1 > @@ -113,8 +178,14 @@ define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind { } define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK-LABEL: v_bicQi8: -;CHECK: vbic +; CHECK-LABEL: v_bicQi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vbic q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > @@ -123,8 +194,14 @@ define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK-LABEL: v_bicQi16: -;CHECK: vbic +; CHECK-LABEL: v_bicQi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vbic q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = xor <8 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -133,8 +210,14 @@ define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK-LABEL: v_bicQi32: -;CHECK: vbic +; CHECK-LABEL: v_bicQi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vbic q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = load <4 x i32>, <4 x i32>* %B %tmp3 = xor <4 x i32> %tmp2, < i32 -1, i32 -1, i32 -1, i32 -1 > @@ -143,8 +226,14 @@ define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { } define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { -;CHECK-LABEL: v_bicQi64: -;CHECK: vbic +; CHECK-LABEL: v_bicQi64: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vbic q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i64>, <2 x i64>* %A %tmp2 = load <2 x i64>, <2 x i64>* %B %tmp3 = xor <2 x i64> %tmp2, < i64 -1, i64 -1 > @@ -153,8 +242,13 @@ define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { } define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK-LABEL: v_eori8: -;CHECK: veor +; CHECK-LABEL: v_eori8: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: veor d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = xor <8 x i8> %tmp1, %tmp2 @@ -162,8 +256,13 @@ define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK-LABEL: v_eori16: -;CHECK: veor +; CHECK-LABEL: v_eori16: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: veor d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = xor <4 x i16> %tmp1, %tmp2 @@ -171,8 +270,13 @@ define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK-LABEL: v_eori32: -;CHECK: veor +; CHECK-LABEL: v_eori32: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: veor d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = xor <2 x i32> %tmp1, %tmp2 @@ -180,8 +284,13 @@ define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind { -;CHECK-LABEL: v_eori64: -;CHECK: veor +; CHECK-LABEL: v_eori64: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: veor d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <1 x i64>, <1 x i64>* %A %tmp2 = load <1 x i64>, <1 x i64>* %B %tmp3 = xor <1 x i64> %tmp1, %tmp2 @@ -189,8 +298,14 @@ define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind { } define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK-LABEL: v_eorQi8: -;CHECK: veor +; CHECK-LABEL: v_eorQi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: veor q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = xor <16 x i8> %tmp1, %tmp2 @@ -198,8 +313,14 @@ define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK-LABEL: v_eorQi16: -;CHECK: veor +; CHECK-LABEL: v_eorQi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: veor q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = xor <8 x i16> %tmp1, %tmp2 @@ -207,8 +328,14 @@ define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK-LABEL: v_eorQi32: -;CHECK: veor +; CHECK-LABEL: v_eorQi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: veor q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = load <4 x i32>, <4 x i32>* %B %tmp3 = xor <4 x i32> %tmp1, %tmp2 @@ -216,8 +343,14 @@ define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { } define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { -;CHECK-LABEL: v_eorQi64: -;CHECK: veor +; CHECK-LABEL: v_eorQi64: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: veor q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i64>, <2 x i64>* %A %tmp2 = load <2 x i64>, <2 x i64>* %B %tmp3 = xor <2 x i64> %tmp1, %tmp2 @@ -225,72 +358,113 @@ define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { } define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind { -;CHECK-LABEL: v_mvni8: -;CHECK: vmvn +; CHECK-LABEL: v_mvni8: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vmvn d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > ret <8 x i8> %tmp2 } define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind { -;CHECK-LABEL: v_mvni16: -;CHECK: vmvn +; CHECK-LABEL: v_mvni16: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vmvn d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 > ret <4 x i16> %tmp2 } define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind { -;CHECK-LABEL: v_mvni32: -;CHECK: vmvn +; CHECK-LABEL: v_mvni32: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vmvn d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 > ret <2 x i32> %tmp2 } define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind { -;CHECK-LABEL: v_mvni64: -;CHECK: vmvn +; CHECK-LABEL: v_mvni64: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vmvn d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <1 x i64>, <1 x i64>* %A %tmp2 = xor <1 x i64> %tmp1, < i64 -1 > ret <1 x i64> %tmp2 } define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind { -;CHECK-LABEL: v_mvnQi8: -;CHECK: vmvn +; CHECK-LABEL: v_mvnQi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vmvn q8, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > ret <16 x i8> %tmp2 } define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind { -;CHECK-LABEL: v_mvnQi16: -;CHECK: vmvn +; CHECK-LABEL: v_mvnQi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vmvn q8, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = xor <8 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > ret <8 x i16> %tmp2 } define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind { -;CHECK-LABEL: v_mvnQi32: -;CHECK: vmvn +; CHECK-LABEL: v_mvnQi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vmvn q8, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = xor <4 x i32> %tmp1, < i32 -1, i32 -1, i32 -1, i32 -1 > ret <4 x i32> %tmp2 } define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind { -;CHECK-LABEL: v_mvnQi64: -;CHECK: vmvn +; CHECK-LABEL: v_mvnQi64: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vmvn q8, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i64>, <2 x i64>* %A %tmp2 = xor <2 x i64> %tmp1, < i64 -1, i64 -1 > ret <2 x i64> %tmp2 } define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK-LABEL: v_orri8: -;CHECK: vorr +; CHECK-LABEL: v_orri8: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorr d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = or <8 x i8> %tmp1, %tmp2 @@ -298,8 +472,13 @@ define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK-LABEL: v_orri16: -;CHECK: vorr +; CHECK-LABEL: v_orri16: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorr d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = or <4 x i16> %tmp1, %tmp2 @@ -307,8 +486,13 @@ define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK-LABEL: v_orri32: -;CHECK: vorr +; CHECK-LABEL: v_orri32: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorr d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = or <2 x i32> %tmp1, %tmp2 @@ -316,8 +500,13 @@ define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind { -;CHECK-LABEL: v_orri64: -;CHECK: vorr +; CHECK-LABEL: v_orri64: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorr d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <1 x i64>, <1 x i64>* %A %tmp2 = load <1 x i64>, <1 x i64>* %B %tmp3 = or <1 x i64> %tmp1, %tmp2 @@ -325,8 +514,14 @@ define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind { } define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK-LABEL: v_orrQi8: -;CHECK: vorr +; CHECK-LABEL: v_orrQi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorr q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = or <16 x i8> %tmp1, %tmp2 @@ -334,8 +529,14 @@ define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK-LABEL: v_orrQi16: -;CHECK: vorr +; CHECK-LABEL: v_orrQi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorr q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = or <8 x i16> %tmp1, %tmp2 @@ -343,8 +544,14 @@ define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK-LABEL: v_orrQi32: -;CHECK: vorr +; CHECK-LABEL: v_orrQi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorr q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = load <4 x i32>, <4 x i32>* %B %tmp3 = or <4 x i32> %tmp1, %tmp2 @@ -352,8 +559,14 @@ define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { } define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { -;CHECK-LABEL: v_orrQi64: -;CHECK: vorr +; CHECK-LABEL: v_orrQi64: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorr q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i64>, <2 x i64>* %A %tmp2 = load <2 x i64>, <2 x i64>* %B %tmp3 = or <2 x i64> %tmp1, %tmp2 @@ -361,8 +574,13 @@ define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { } define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK-LABEL: v_orni8: -;CHECK: vorn +; CHECK-LABEL: v_orni8: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorn d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > @@ -371,8 +589,13 @@ define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK-LABEL: v_orni16: -;CHECK: vorn +; CHECK-LABEL: v_orni16: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorn d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = xor <4 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1 > @@ -381,8 +604,13 @@ define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK-LABEL: v_orni32: -;CHECK: vorn +; CHECK-LABEL: v_orni32: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorn d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = xor <2 x i32> %tmp2, < i32 -1, i32 -1 > @@ -391,8 +619,13 @@ define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind { -;CHECK-LABEL: v_orni64: -;CHECK: vorn +; CHECK-LABEL: v_orni64: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vorn d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <1 x i64>, <1 x i64>* %A %tmp2 = load <1 x i64>, <1 x i64>* %B %tmp3 = xor <1 x i64> %tmp2, < i64 -1 > @@ -401,8 +634,14 @@ define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind { } define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK-LABEL: v_ornQi8: -;CHECK: vorn +; CHECK-LABEL: v_ornQi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorn q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > @@ -411,8 +650,14 @@ define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK-LABEL: v_ornQi16: -;CHECK: vorn +; CHECK-LABEL: v_ornQi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorn q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = xor <8 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -421,8 +666,14 @@ define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK-LABEL: v_ornQi32: -;CHECK: vorn +; CHECK-LABEL: v_ornQi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorn q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = load <4 x i32>, <4 x i32>* %B %tmp3 = xor <4 x i32> %tmp2, < i32 -1, i32 -1, i32 -1, i32 -1 > @@ -431,8 +682,14 @@ define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { } define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { -;CHECK-LABEL: v_ornQi64: -;CHECK: vorn +; CHECK-LABEL: v_ornQi64: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vorn q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i64>, <2 x i64>* %A %tmp2 = load <2 x i64>, <2 x i64>* %B %tmp3 = xor <2 x i64> %tmp2, < i64 -1, i64 -1 > @@ -441,8 +698,13 @@ define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { } define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK-LABEL: vtsti8: -;CHECK: vtst.8 +; CHECK-LABEL: vtsti8: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vtst.8 d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = and <8 x i8> %tmp1, %tmp2 @@ -452,8 +714,13 @@ define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind { } define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK-LABEL: vtsti16: -;CHECK: vtst.16 +; CHECK-LABEL: vtsti16: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vtst.16 d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = and <4 x i16> %tmp1, %tmp2 @@ -463,8 +730,13 @@ define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind { } define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK-LABEL: vtsti32: -;CHECK: vtst.32 +; CHECK-LABEL: vtsti32: +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0] +; CHECK-NEXT: vtst.32 d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = and <2 x i32> %tmp1, %tmp2 @@ -474,8 +746,14 @@ define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind { } define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK-LABEL: vtstQi8: -;CHECK: vtst.8 +; CHECK-LABEL: vtstQi8: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vtst.8 q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = and <16 x i8> %tmp1, %tmp2 @@ -485,8 +763,14 @@ define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK-LABEL: vtstQi16: -;CHECK: vtst.16 +; CHECK-LABEL: vtstQi16: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vtst.16 q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = and <8 x i16> %tmp1, %tmp2 @@ -496,8 +780,14 @@ define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { } define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK-LABEL: vtstQi32: -;CHECK: vtst.32 +; CHECK-LABEL: vtstQi32: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vtst.32 q8, q9, q8 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = load <4 x i32>, <4 x i32>* %B %tmp3 = and <4 x i32> %tmp1, %tmp2 @@ -508,19 +798,24 @@ define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_orrimm: -; CHECK-NOT: vmov -; CHECK-NOT: vmvn -; CHECK: vorr +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vorr.i32 d16, #0x1000000 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp3 = or <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1> ret <8 x i8> %tmp3 } define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind { -; CHECK: v_orrimmQ -; CHECK-NOT: vmov -; CHECK-NOT: vmvn -; CHECK: vorr +; CHECK-LABEL: v_orrimmQ: +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vorr.i32 q8, #0x1000000 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp3 = or <16 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1> ret <16 x i8> %tmp3 @@ -528,9 +823,11 @@ define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind { define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_bicimm: -; CHECK-NOT: vmov -; CHECK-NOT: vmvn -; CHECK: vbic +; CHECK: @ BB#0: +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vbic.i32 d16, #0xff000000 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp3 = and <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 > ret <8 x i8> %tmp3 @@ -538,10 +835,29 @@ define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind { define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind { ; CHECK-LABEL: v_bicimmQ: -; CHECK-NOT: vmov -; CHECK-NOT: vmvn -; CHECK: vbic +; CHECK: @ BB#0: +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vbic.i32 q8, #0xff000000 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp3 = and <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 > ret <16 x i8> %tmp3 } + +define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) nounwind { +; CHECK-LABEL: hidden_not_v4i32: +; CHECK: @ BB#0: +; CHECK-NEXT: vmov d19, r2, r3 +; CHECK-NEXT: vmov.i32 q8, #0x6 +; CHECK-NEXT: vmov d18, r0, r1 +; CHECK-NEXT: vbic q8, q8, q9 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %xor = xor <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15> + %and = and <4 x i32> %xor, <i32 6, i32 6, i32 6, i32 6> + ret <4 x i32> %and +} + diff --git a/test/CodeGen/ARM/vector-load.ll b/test/CodeGen/ARM/vector-load.ll index ed734723a86d3..4f7ebc938d4c7 100644 --- a/test/CodeGen/ARM/vector-load.ll +++ b/test/CodeGen/ARM/vector-load.ll @@ -253,11 +253,22 @@ define <4 x i32> @zextload_v8i8tov8i32_fake_update(<4 x i8>** %ptr) { } ; CHECK-LABEL: test_silly_load: -; CHECK: ldr {{r[0-9]+}}, [r0, #24] -; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0:128]! -; CHECK: vldr d{{[0-9]+}}, [r0] +; CHECK: vldr d{{[0-9]+}}, [r0, #16] +; CHECK: movs r1, #24 +; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0:128], r1 +; CHECK: ldr {{r[0-9]+}}, [r0] define void @test_silly_load(<28 x i8>* %addr) { load volatile <28 x i8>, <28 x i8>* %addr ret void } + +define <4 x i32>* @test_vld1_immoffset(<4 x i32>* %ptr.in, <4 x i32>* %ptr.out) { +; CHECK-LABEL: test_vld1_immoffset: +; CHECK: movs [[INC:r[0-9]+]], #32 +; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0], [[INC]] + %val = load <4 x i32>, <4 x i32>* %ptr.in + store <4 x i32> %val, <4 x i32>* %ptr.out + %next = getelementptr <4 x i32>, <4 x i32>* %ptr.in, i32 2 + ret <4 x i32>* %next +} diff --git a/test/CodeGen/ARM/vector-store.ll b/test/CodeGen/ARM/vector-store.ll index 161bbf1d0fde8..e8c1a78a9113b 100644 --- a/test/CodeGen/ARM/vector-store.ll +++ b/test/CodeGen/ARM/vector-store.ll @@ -256,3 +256,13 @@ define void @truncstore_v4i32tov4i8_fake_update(<4 x i8>** %ptr, <4 x i32> %val) store <4 x i8>* %inc, <4 x i8>** %ptr ret void } + +define <4 x i32>* @test_vst1_1reg(<4 x i32>* %ptr.in, <4 x i32>* %ptr.out) { +; CHECK-LABEL: test_vst1_1reg: +; CHECK: movs [[INC:r[0-9]+]], #32 +; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r1], [[INC]] + %val = load <4 x i32>, <4 x i32>* %ptr.in + store <4 x i32> %val, <4 x i32>* %ptr.out + %next = getelementptr <4 x i32>, <4 x i32>* %ptr.out, i32 2 + ret <4 x i32>* %next +} diff --git a/test/CodeGen/ARM/vlddup.ll b/test/CodeGen/ARM/vlddup.ll index c6d5747f35093..71ca0f7915242 100644 --- a/test/CodeGen/ARM/vlddup.ll +++ b/test/CodeGen/ARM/vlddup.ll @@ -310,6 +310,23 @@ define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind { ret <4 x i16> %tmp5 } +define <4 x i16> @vld2dupi16_odd_update(i16** %ptr) nounwind { +;CHECK-LABEL: vld2dupi16_odd_update: +;CHECK: mov [[INC:r[0-9]+]], #6 +;CHECK: vld2.16 {d16[], d17[]}, [r1], [[INC]] + %A = load i16*, i16** %ptr + %A2 = bitcast i16* %A to i8* + %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16.p0i8(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) + %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 + %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer + %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 + %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer + %tmp5 = add <4 x i16> %tmp2, %tmp4 + %tmp6 = getelementptr i16, i16* %A, i32 3 + store i16* %tmp6, i16** %ptr + ret <4 x i16> %tmp5 +} + define <2 x i32> @vld2dupi32(i8* %A) nounwind { ;CHECK-LABEL: vld2dupi32: ;Check the alignment value. Max for this instruction is 64 bits: diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll index 2c14bc2d8f4eb..866641f3fbbd9 100644 --- a/test/CodeGen/ARM/vldlane.ll +++ b/test/CodeGen/ARM/vldlane.ll @@ -150,6 +150,22 @@ define <2 x i32> @vld2lanei32_update(i32** %ptr, <2 x i32>* %B) nounwind { ret <2 x i32> %tmp5 } +define <2 x i32> @vld2lanei32_odd_update(i32** %ptr, <2 x i32>* %B) nounwind { +;CHECK-LABEL: vld2lanei32_odd_update: +;CHECK: mov [[INC:r[0-9]+]], #12 +;CHECK: vld2.32 {d16[1], d17[1]}, [{{r[0-9]+}}], [[INC]] + %A = load i32*, i32** %ptr + %tmp0 = bitcast i32* %A to i8* + %tmp1 = load <2 x i32>, <2 x i32>* %B + %tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32.p0i8(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1) + %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0 + %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1 + %tmp5 = add <2 x i32> %tmp3, %tmp4 + %tmp6 = getelementptr i32, i32* %A, i32 3 + store i32* %tmp6, i32** %ptr + ret <2 x i32> %tmp5 +} + define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind { ;CHECK-LABEL: vld2lanef: ;CHECK: vld2.32 diff --git a/test/CodeGen/ARM/vtbl.ll b/test/CodeGen/ARM/vtbl.ll index e4dd572a41b4d..2e0718877e96d 100644 --- a/test/CodeGen/ARM/vtbl.ll +++ b/test/CodeGen/ARM/vtbl.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s +; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - -verify-machineinstrs | FileCheck %s %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } diff --git a/test/CodeGen/AVR/alloca.ll b/test/CodeGen/AVR/alloca.ll index 579573c0a133d..37c0e62b55fde 100644 --- a/test/CodeGen/AVR/alloca.ll +++ b/test/CodeGen/AVR/alloca.ll @@ -45,14 +45,14 @@ entry: define i16 @alloca_write(i16 %x) { entry: ; CHECK-LABEL: alloca_write: +; Small offset here +; CHECK: std Y+23, {{.*}} +; CHECK: std Y+24, {{.*}} ; Big offset here ; CHECK: adiw r28, 57 ; CHECK: std Y+62, {{.*}} ; CHECK: std Y+63, {{.*}} ; CHECK: sbiw r28, 57 -; Small offset here -; CHECK: std Y+23, {{.*}} -; CHECK: std Y+24, {{.*}} %p = alloca [15 x i16] %k = alloca [14 x i16] %arrayidx = getelementptr inbounds [15 x i16], [15 x i16]* %p, i16 0, i16 45 diff --git a/test/CodeGen/AVR/call.ll b/test/CodeGen/AVR/call.ll index 58bffd3a67870..bc6cb198a9e5b 100644 --- a/test/CodeGen/AVR/call.ll +++ b/test/CodeGen/AVR/call.ll @@ -30,9 +30,9 @@ define i8 @calli8_reg() { define i8 @calli8_stack() { ; CHECK-LABEL: calli8_stack: -; CHECK: ldi [[REG1:r[0-9]+]], 11 +; CHECK: ldi [[REG1:r[0-9]+]], 10 ; CHECK: push [[REG1]] -; CHECK: ldi [[REG1]], 10 +; CHECK: ldi [[REG1]], 11 ; CHECK: push [[REG1]] ; CHECK: call foo8_3 %result1 = call i8 @foo8_3(i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11) @@ -52,14 +52,14 @@ define i16 @calli16_reg() { define i16 @calli16_stack() { ; CHECK-LABEL: calli16_stack: -; CHECK: ldi [[REG1:r[0-9]+]], 10 -; CHECK: ldi [[REG2:r[0-9]+]], 2 -; CHECK: push [[REG2]] -; CHECK: push [[REG1]] ; CHECK: ldi [[REG1:r[0-9]+]], 9 ; CHECK: ldi [[REG2:r[0-9]+]], 2 ; CHECK: push [[REG2]] ; CHECK: push [[REG1]] +; CHECK: ldi [[REG1:r[0-9]+]], 10 +; CHECK: ldi [[REG2:r[0-9]+]], 2 +; CHECK: push [[REG2]] +; CHECK: push [[REG1]] ; CHECK: call foo16_2 %result1 = call i16 @foo16_2(i16 512, i16 513, i16 514, i16 515, i16 516, i16 517, i16 518, i16 519, i16 520, i16 521, i16 522) ret i16 %result1 @@ -82,14 +82,14 @@ define i32 @calli32_reg() { define i32 @calli32_stack() { ; CHECK-LABEL: calli32_stack: -; CHECK: ldi [[REG1:r[0-9]+]], 15 -; CHECK: ldi [[REG2:r[0-9]+]], 2 -; CHECK: push [[REG2]] -; CHECK: push [[REG1]] ; CHECK: ldi [[REG1:r[0-9]+]], 64 ; CHECK: ldi [[REG2:r[0-9]+]], 66 ; CHECK: push [[REG2]] ; CHECK: push [[REG1]] +; CHECK: ldi [[REG1:r[0-9]+]], 15 +; CHECK: ldi [[REG2:r[0-9]+]], 2 +; CHECK: push [[REG2]] +; CHECK: push [[REG1]] ; CHECK: call foo32_2 %result1 = call i32 @foo32_2(i32 1, i32 2, i32 3, i32 4, i32 34554432) ret i32 %result1 @@ -112,14 +112,15 @@ define i64 @calli64_reg() { define i64 @calli64_stack() { ; CHECK-LABEL: calli64_stack: -; CHECK: ldi [[REG1:r[0-9]+]], 31 -; CHECK: ldi [[REG2:r[0-9]+]], 242 -; CHECK: push [[REG2]] -; CHECK: push [[REG1]] + ; CHECK: ldi [[REG1:r[0-9]+]], 76 ; CHECK: ldi [[REG2:r[0-9]+]], 73 ; CHECK: push [[REG2]] ; CHECK: push [[REG1]] +; CHECK: ldi [[REG1:r[0-9]+]], 31 +; CHECK: ldi [[REG2:r[0-9]+]], 242 +; CHECK: push [[REG2]] +; CHECK: push [[REG1]] ; CHECK: ldi [[REG1:r[0-9]+]], 155 ; CHECK: ldi [[REG2:r[0-9]+]], 88 ; CHECK: push [[REG2]] diff --git a/test/CodeGen/AVR/directmem.ll b/test/CodeGen/AVR/directmem.ll index a97e712ed625e..032263a9d657e 100644 --- a/test/CodeGen/AVR/directmem.ll +++ b/test/CodeGen/AVR/directmem.ll @@ -33,10 +33,10 @@ define i8 @global8_load() { define void @array8_store() { ; CHECK-LABEL: array8_store: -; CHECK: ldi [[REG1:r[0-9]+]], 1 -; CHECK: sts char.array, [[REG1]] ; CHECK: ldi [[REG2:r[0-9]+]], 2 ; CHECK: sts char.array+1, [[REG2]] +; CHECK: ldi [[REG1:r[0-9]+]], 1 +; CHECK: sts char.array, [[REG1]] ; CHECK: ldi [[REG:r[0-9]+]], 3 ; CHECK: sts char.array+2, [[REG]] store i8 1, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @char.array, i32 0, i64 0) @@ -83,14 +83,18 @@ define i16 @global16_load() { define void @array16_store() { ; CHECK-LABEL: array16_store: -; CHECK: ldi [[REG1:r[0-9]+]], 187 -; CHECK: ldi [[REG2:r[0-9]+]], 170 -; CHECK: sts int.array+1, [[REG2]] -; CHECK: sts int.array, [[REG1]] + ; CHECK: ldi [[REG1:r[0-9]+]], 204 ; CHECK: ldi [[REG2:r[0-9]+]], 170 ; CHECK: sts int.array+3, [[REG2]] ; CHECK: sts int.array+2, [[REG1]] + +; CHECK: ldi [[REG1:r[0-9]+]], 187 +; CHECK: ldi [[REG2:r[0-9]+]], 170 +; CHECK: sts int.array+1, [[REG2]] +; CHECK: sts int.array, [[REG1]] + + ; CHECK: ldi [[REG1:r[0-9]+]], 221 ; CHECK: ldi [[REG2:r[0-9]+]], 170 ; CHECK: sts int.array+5, [[REG2]] @@ -148,14 +152,6 @@ define i32 @global32_load() { define void @array32_store() { ; CHECK-LABEL: array32_store: -; CHECK: ldi [[REG1:r[0-9]+]], 27 -; CHECK: ldi [[REG2:r[0-9]+]], 172 -; CHECK: sts long.array+3, [[REG2]] -; CHECK: sts long.array+2, [[REG1]] -; CHECK: ldi [[REG1:r[0-9]+]], 68 -; CHECK: ldi [[REG2:r[0-9]+]], 13 -; CHECK: sts long.array+1, [[REG2]] -; CHECK: sts long.array, [[REG1]] ; CHECK: ldi [[REG1:r[0-9]+]], 102 ; CHECK: ldi [[REG2:r[0-9]+]], 85 ; CHECK: sts long.array+7, [[REG2]] @@ -164,6 +160,14 @@ define void @array32_store() { ; CHECK: ldi [[REG2:r[0-9]+]], 119 ; CHECK: sts long.array+5, [[REG2]] ; CHECK: sts long.array+4, [[REG1]] +; CHECK: ldi [[REG1:r[0-9]+]], 27 +; CHECK: ldi [[REG2:r[0-9]+]], 172 +; CHECK: sts long.array+3, [[REG2]] +; CHECK: sts long.array+2, [[REG1]] +; CHECK: ldi [[REG1:r[0-9]+]], 68 +; CHECK: ldi [[REG2:r[0-9]+]], 13 +; CHECK: sts long.array+1, [[REG2]] +; CHECK: sts long.array, [[REG1]] ; CHECK: ldi [[REG1:r[0-9]+]], 170 ; CHECK: ldi [[REG2:r[0-9]+]], 153 ; CHECK: sts long.array+11, [[REG2]] diff --git a/test/CodeGen/AVR/inline-asm/multibyte.ll b/test/CodeGen/AVR/inline-asm/multibyte.ll deleted file mode 100644 index a7c8f6e75f0fb..0000000000000 --- a/test/CodeGen/AVR/inline-asm/multibyte.ll +++ /dev/null @@ -1,135 +0,0 @@ -; RUN: llc < %s -march=avr -no-integrated-as | FileCheck %s -; XFAIL: * - -; Multibyte references - -; CHECK-LABEL: multibyte_i16 -define void @multibyte_i16(i16 %a) { -entry: -; CHECK: instr r24 r25 - call void asm sideeffect "instr ${0:A} ${0:B}", "r"(i16 %a) -; CHECK: instr r25 r24 - call void asm sideeffect "instr ${0:B} ${0:A}", "r"(i16 %a) - ret void -} - -; CHECK-LABEL: multibyte_i32 -define void @multibyte_i32(i32 %a) { -entry: -; CHECK: instr r22 r23 r24 r25 - call void asm sideeffect "instr ${0:A} ${0:B} ${0:C} ${0:D}", "r"(i32 %a) -; CHECK: instr r25 r24 r23 r22 - call void asm sideeffect "instr ${0:D} ${0:C} ${0:B} ${0:A}", "r"(i32 %a) - ret void -} - -; CHECK-LABEL: multibyte_alternative_name -define void @multibyte_alternative_name(i16* %p) { -entry: -; CHECK: instr Z - call void asm sideeffect "instr ${0:a}", "e" (i16* %p) - ret void -} - -; CHECK-LABEL: multibyte_a_i32 -define void @multibyte_a_i32() { -entry: - %a = alloca i32 - %0 = load i32, i32* %a -; CHECK: instr r20 r21 r22 r23 - call void asm sideeffect "instr ${0:A} ${0:B} ${0:C} ${0:D}", "a"(i32 %0) - ret void -} - -@c = internal global i32 0 - -; CHECK-LABEL: multibyte_b_i32 -define void @multibyte_b_i32() { -entry: - %0 = load i32, i32* @c -; CHECK: instr r28 r29 r30 r31 - call void asm sideeffect "instr ${0:A} ${0:B} ${0:C} ${0:D}", "b"(i32 %0) - ret void -} - -; CHECK-LABEL: multibyte_d_i32 -define void @multibyte_d_i32() { -entry: - %a = alloca i32 - %0 = load i32, i32* %a -; CHECK: instr r18 r19 r24 r25 - call void asm sideeffect "instr ${0:A} ${0:B} ${0:C} ${0:D}", "d"(i32 %0) - ret void -} - -; CHECK-LABEL: multibyte_e_i32 -define void @multibyte_e_i32() { -entry: - %a = alloca i32 - %0 = load i32, i32* %a -; CHECK: instr r26 r27 r30 r31 - call void asm sideeffect "instr ${0:A} ${0:B} ${0:C} ${0:D}", "e"(i32 %0) - ret void -} - -; CHECK-LABEL: multibyte_l_i32 -define void @multibyte_l_i32() { -entry: - %a = alloca i32 - %0 = load i32, i32* %a -; CHECK: instr r12 r13 r14 r15 - call void asm sideeffect "instr ${0:A} ${0:B} ${0:C} ${0:D}", "l"(i32 %0) - ret void -} - -; CHECK-LABEL: multibyte_a_i16 -define void @multibyte_a_i16() { -entry: - %a = alloca i16 - %0 = load i16, i16* %a -; CHECK: instr r22 r23 - call void asm sideeffect "instr ${0:A} ${0:B}", "a"(i16 %0) - ret void -} - -; CHECK-LABEL: multibyte_b_i16 -define void @multibyte_b_i16() { -entry: - %a = alloca i16 - %0 = load i16, i16* %a -; CHECK: instr r30 r31 - call void asm sideeffect "instr ${0:A} ${0:B}", "b"(i16 %0) - ret void -} - -; CHECK-LABEL: multibyte_d_i16 -define void @multibyte_d_i16() { -entry: - %a = alloca i16 - %0 = load i16, i16* %a -; CHECK: instr r24 r25 - call void asm sideeffect "instr ${0:A} ${0:B}", "d"(i16 %0) - ret void -} - -; CHECK-LABEL: multibyte_e_i16 -define void @multibyte_e_i16() { -entry: - %a = alloca i16 - %0 = load i16, i16* %a -; CHECK: instr r30 r31 - call void asm sideeffect "instr ${0:A} ${0:B}", "e"(i16 %0) - ret void -} - -; CHECK-LABEL: multibyte_l_i16 -define void @multibyte_l_i16() { -entry: - %a = alloca i16 - %0 = load i16, i16* %a -; CHECK: instr r14 r15 - call void asm sideeffect "instr ${0:A} ${0:B}", "l"(i16 %0) - ret void -} - - diff --git a/test/CodeGen/AVR/varargs.ll b/test/CodeGen/AVR/varargs.ll index b35ce4c0f7aef..4959f2d880c8b 100644 --- a/test/CodeGen/AVR/varargs.ll +++ b/test/CodeGen/AVR/varargs.ll @@ -40,14 +40,14 @@ define i16 @varargs2(i8* nocapture %x, ...) { declare void @var1223(i16, ...) define void @varargcall() { ; CHECK-LABEL: varargcall: -; CHECK: ldi [[REG1:r[0-9]+]], 191 -; CHECK: ldi [[REG2:r[0-9]+]], 223 -; CHECK: push [[REG2]] -; CHECK: push [[REG1]] ; CHECK: ldi [[REG1:r[0-9]+]], 189 ; CHECK: ldi [[REG2:r[0-9]+]], 205 ; CHECK: push [[REG2]] ; CHECK: push [[REG1]] +; CHECK: ldi [[REG1:r[0-9]+]], 191 +; CHECK: ldi [[REG2:r[0-9]+]], 223 +; CHECK: push [[REG2]] +; CHECK: push [[REG1]] ; CHECK: ldi [[REG1:r[0-9]+]], 205 ; CHECK: ldi [[REG2:r[0-9]+]], 171 ; CHECK: push [[REG2]] diff --git a/test/CodeGen/Hexagon/addrmode-globoff.mir b/test/CodeGen/Hexagon/addrmode-globoff.mir new file mode 100644 index 0000000000000..fb22959751ac8 --- /dev/null +++ b/test/CodeGen/Hexagon/addrmode-globoff.mir @@ -0,0 +1,25 @@ +# RUN: llc -march=hexagon -run-pass amode-opt %s -o - | FileCheck %s + +--- | + @g0 = external global [16 x i16], align 8 + define void @foo() { + ret void + } +... + +--- +name: foo +tracksRegLiveness: true + +body: | + bb.0: + liveins: %r0 + + ; Make sure that the offset in @g0 is 8. + ; CHECK: S4_storerh_ur killed %r0, 2, @g0 + 8, %r0 + + %r1 = A2_tfrsi @g0+4 + %r2 = S2_addasl_rrri %r1, %r0, 2 + S2_storerh_io %r2, 4, %r0 +... + diff --git a/test/CodeGen/Mips/msa/shift_constant_pool.ll b/test/CodeGen/Mips/msa/shift_constant_pool.ll new file mode 100644 index 0000000000000..73da33361bfa0 --- /dev/null +++ b/test/CodeGen/Mips/msa/shift_constant_pool.ll @@ -0,0 +1,171 @@ +; Test whether the following functions, with vectors featuring negative or values larger than the element +; bit size have their results of operations generated correctly when placed into constant pools + +; RUN: llc -march=mips64 -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64 %s +; RUN: llc -march=mips -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS32 %s +; RUN: llc -march=mips64el -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64 %s +; RUN: llc -march=mipsel -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS32 %s + +@llvm_mips_bclr_w_test_const_vec_res = global <4 x i32> zeroinitializer, align 16 + +define void @llvm_mips_bclr_w_test_const_vec() nounwind { +entry: + %0 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> <i32 2147483649, i32 2147483649, i32 7, i32 7>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>) + store <4 x i32> %0, <4 x i32>* @llvm_mips_bclr_w_test_const_vec_res + ret void +} + +declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind + +; MIPS32: [[LABEL:\$CPI[0-9]+_[0-9]+]]: +; MIPS64: [[LABEL:\.LCPI[0-9]+_[0-9]+]]: +; ALL: .4byte 1 # 0x1 +; ALL: .4byte 1 # 0x1 +; ALL: .4byte 3 # 0x3 +; ALL: .4byte 3 # 0x3 +; ALL-LABEL: llvm_mips_bclr_w_test_const_vec: +; MIPS32: lw $[[R2:[0-9]+]], %got([[LABEL]])($[[R1:[0-9]+]]) +; MIPS32: addiu $[[R2]], $[[R2]], %lo([[LABEL]]) +; MIPS32: lw $[[R3:[0-9]+]], %got(llvm_mips_bclr_w_test_const_vec_res)($[[R1]]) +; MIPS64: ld $[[R2:[0-9]+]], %got_page([[LABEL]])($[[R1:[0-9]+]]) +; MIPS64: daddiu $[[R2]], $[[R2]], %got_ofst([[LABEL]]) +; MIPS64: ld $[[R3:[0-9]+]], %got_disp(llvm_mips_bclr_w_test_const_vec_res)($[[R1]]) +; ALL: ld.w $w0, 0($[[R2]]) +; ALL: st.w $w0, 0($[[R3]]) + + +@llvm_mips_bneg_w_test_const_vec_res = global <4 x i32> zeroinitializer, align 16 + +define void @llvm_mips_bneg_w_test_const_vec() nounwind { +entry: + %0 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> <i32 2147483649, i32 2147483649, i32 7, i32 7>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>) + store <4 x i32> %0, <4 x i32>* @llvm_mips_bneg_w_test_const_vec_res + ret void +} + +declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind + +; MIPS32: [[LABEL:\$CPI[0-9]+_[0-9]+]]: +; MIPS64: [[LABEL:\.LCPI[0-9]+_[0-9]+]]: +; ALL: .4byte 1 # 0x1 +; ALL: .4byte 1 # 0x1 +; ALL: .4byte 3 # 0x3 +; ALL: .4byte 3 # 0x3 +; ALL-LABEL: llvm_mips_bneg_w_test_const_vec: +; MIPS32: lw $[[R2:[0-9]+]], %got([[LABEL]])($[[R1:[0-9]+]]) +; MIPS32: addiu $[[R2]], $[[R2]], %lo([[LABEL]]) +; MIPS32: lw $[[R3:[0-9]+]], %got(llvm_mips_bneg_w_test_const_vec_res)($[[R1]]) +; MIPS64: ld $[[R2:[0-9]+]], %got_page([[LABEL]])($[[R1:[0-9]+]]) +; MIPS64: daddiu $[[R2]], $[[R2]], %got_ofst([[LABEL]]) +; MIPS64: ld $[[R3:[0-9]+]], %got_disp(llvm_mips_bneg_w_test_const_vec_res)($[[R1]]) +; ALL: ld.w $w0, 0($[[R2]]) +; ALL: st.w $w0, 0($[[R3]]) + + +@llvm_mips_bset_w_test_const_vec_res = global <4 x i32> zeroinitializer, align 16 + +define void @llvm_mips_bset_w_test_const_vec() nounwind { +entry: + %0 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>) + store <4 x i32> %0, <4 x i32>* @llvm_mips_bset_w_test_const_vec_res + ret void +} + +declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind + +; MIPS32: [[LABEL:\$CPI[0-9]+_[0-9]+]]: +; MIPS64: [[LABEL:\.LCPI[0-9]+_[0-9]+]]: +; ALL: .4byte 2147483648 # 0x80000000 +; ALL: .4byte 2147483648 # 0x80000000 +; ALL: .4byte 4 # 0x4 +; ALL: .4byte 4 # 0x4 +; ALL-LABEL: llvm_mips_bset_w_test_const_vec: +; MIPS32: lw $[[R2:[0-9]+]], %got([[LABEL]])($[[R1:[0-9]+]]) +; MIPS32: addiu $[[R2]], $[[R2]], %lo([[LABEL]]) +; MIPS32: lw $[[R3:[0-9]+]], %got(llvm_mips_bset_w_test_const_vec_res)($[[R1]]) +; MIPS64: ld $[[R2:[0-9]+]], %got_page([[LABEL]])($[[R1:[0-9]+]]) +; MIPS64: daddiu $[[R2]], $[[R2]], %got_ofst([[LABEL]]) +; MIPS64: ld $[[R3:[0-9]+]], %got_disp(llvm_mips_bset_w_test_const_vec_res)($[[R1]]) +; ALL: ld.w $w0, 0($[[R2]]) +; ALL: st.w $w0, 0($[[R3]]) + +@llvm_mips_sll_w_test_const_vec_res = global <4 x i32> zeroinitializer, align 16 + +define void @llvm_mips_sll_w_test_const_vec() nounwind { +entry: + %0 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>) + store <4 x i32> %0, <4 x i32>* @llvm_mips_sll_w_test_const_vec_res + ret void +} + +declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind + +; MIPS32: [[LABEL:\$CPI[0-9]+_[0-9]+]]: +; MIPS64: [[LABEL:\.LCPI[0-9]+_[0-9]+]]: +; ALL: .4byte 2147483648 # 0x80000000 +; ALL: .4byte 2147483648 # 0x80000000 +; ALL: .4byte 4 # 0x4 +; ALL: .4byte 4 # 0x4 +; ALL-LABEL: llvm_mips_sll_w_test_const_vec: +; MIPS32: lw $[[R2:[0-9]+]], %got([[LABEL]])($[[R1:[0-9]+]]) +; MIPS32: addiu $[[R2]], $[[R2]], %lo([[LABEL]]) +; MIPS32: lw $[[R3:[0-9]+]], %got(llvm_mips_sll_w_test_const_vec_res)($[[R1]]) +; MIPS64: ld $[[R2:[0-9]+]], %got_page([[LABEL]])($[[R1:[0-9]+]]) +; MIPS64: daddiu $[[R2]], $[[R2]], %got_ofst([[LABEL]]) +; MIPS64: ld $[[R3:[0-9]+]], %got_disp(llvm_mips_sll_w_test_const_vec_res)($[[R1]]) +; ALL: ld.w $w0, 0($[[R2]]) +; ALL: st.w $w0, 0($[[R3]]) + +@llvm_mips_sra_w_test_const_vec_res = global <4 x i32> zeroinitializer, align 16 + +define void @llvm_mips_sra_w_test_const_vec() nounwind { +entry: + %0 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> <i32 -16, i32 16, i32 16, i32 16>, <4 x i32> <i32 2, i32 -30, i32 33, i32 1>) + store <4 x i32> %0, <4 x i32>* @llvm_mips_sra_w_test_const_vec_res + ret void +} + +declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind + +; MIPS32: [[LABEL:\$CPI[0-9]+_[0-9]+]]: +; MIPS64: [[LABEL:\.LCPI[0-9]+_[0-9]+]]: +; ALL: .4byte 4294967292 # 0xfffffffc +; ALL: .4byte 4 # 0x4 +; ALL: .4byte 8 # 0x8 +; ALL: .4byte 8 # 0x8 +; ALL-LABEL: llvm_mips_sra_w_test_const_vec: +; MIPS32: lw $[[R2:[0-9]+]], %got([[LABEL]])($[[R1:[0-9]+]]) +; MIPS32: addiu $[[R2]], $[[R2]], %lo([[LABEL]]) +; MIPS32: lw $[[R3:[0-9]+]], %got(llvm_mips_sra_w_test_const_vec_res)($[[R1]]) +; MIPS64: ld $[[R2:[0-9]+]], %got_page([[LABEL]])($[[R1:[0-9]+]]) +; MIPS64: daddiu $[[R2]], $[[R2]], %got_ofst([[LABEL]]) +; MIPS64: ld $[[R3:[0-9]+]], %got_disp(llvm_mips_sra_w_test_const_vec_res)($[[R1]]) +; ALL: ld.w $w0, 0($[[R2]]) +; ALL: st.w $w0, 0($[[R3]]) + +@llvm_mips_srl_w_test_const_vec_res = global <4 x i32> zeroinitializer, align 16 + +define void @llvm_mips_srl_w_test_const_vec() nounwind { +entry: + %0 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> <i32 -16, i32 16, i32 16, i32 16>, <4 x i32> <i32 2, i32 -30, i32 33, i32 1>) + store <4 x i32> %0, <4 x i32>* @llvm_mips_srl_w_test_const_vec_res + ret void +} + +declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind + +; MIPS32: [[LABEL:\$CPI[0-9]+_[0-9]+]]: +; MIPS64: [[LABEL:\.LCPI[0-9]+_[0-9]+]]: +; ALL: .4byte 1073741820 # 0x3ffffffc +; ALL: .4byte 4 # 0x4 +; ALL: .4byte 8 # 0x8 +; ALL: .4byte 8 # 0x8 +; ALL-LABEL: llvm_mips_srl_w_test_const_vec: +; MIPS32: lw $[[R2:[0-9]+]], %got([[LABEL]])($[[R1:[0-9]+]]) +; MIPS32: addiu $[[R2]], $[[R2]], %lo([[LABEL]]) +; MIPS32: lw $[[R3:[0-9]+]], %got(llvm_mips_srl_w_test_const_vec_res)($[[R1]]) +; MIPS64: ld $[[R2:[0-9]+]], %got_page([[LABEL]])($[[R1:[0-9]+]]) +; MIPS64: daddiu $[[R2]], $[[R2]], %got_ofst([[LABEL]]) +; MIPS64: ld $[[R3:[0-9]+]], %got_disp(llvm_mips_srl_w_test_const_vec_res)($[[R1]]) +; ALL: ld.w $w0, 0($[[R2]]) +; ALL: st.w $w0, 0($[[R3]]) diff --git a/test/CodeGen/Mips/msa/shift_no_and.ll b/test/CodeGen/Mips/msa/shift_no_and.ll new file mode 100644 index 0000000000000..c6f90215af9c8 --- /dev/null +++ b/test/CodeGen/Mips/msa/shift_no_and.ll @@ -0,0 +1,460 @@ +; Test the absence of the andi.b / and.v instructions + +; RUN: llc -march=mips -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s +; RUN: llc -march=mipsel -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s + +@llvm_mips_bclr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_bclr_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 +@llvm_mips_bclr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 + +define void @llvm_mips_bclr_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1) + store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES + ret void +} + +declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind + +; CHECK-LABEL: llvm_mips_bclr_b_test: +; CHECK-NOT: andi.b +; CHECK: bclr.b + +@llvm_mips_bclr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_bclr_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 +@llvm_mips_bclr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 + +define void @llvm_mips_bclr_h_test() nounwind { +entry: + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2 + %2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1) + store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES + ret void +} + +declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind + +; CHECK-LABEL: llvm_mips_bclr_h_test: +; CHECK-NOT: and.v +; CHECK: bclr.h + +@llvm_mips_bclr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_bclr_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 +@llvm_mips_bclr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 + +define void @llvm_mips_bclr_w_test() nounwind { +entry: + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2 + %2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1) + store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES + ret void +} + +declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind + +; CHECK-LABEL: llvm_mips_bclr_w_test: +; CHECK-NOT: and.v +; CHECK: bclr.w + +@llvm_mips_bclr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_bclr_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16 +@llvm_mips_bclr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 + +define void @llvm_mips_bclr_d_test() nounwind { +entry: + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2 + %2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1) + store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES + ret void +} + +declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind + +; CHECK-LABEL: llvm_mips_bclr_d_test: +; CHECK-NOT: and.v +; CHECK: bclr.d + +@llvm_mips_bneg_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_bneg_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 +@llvm_mips_bneg_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 + +define void @llvm_mips_bneg_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1) + store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES + ret void +} + +declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind + +; CHECK-LABEL: llvm_mips_bneg_b_test: +; CHECK-NOT: andi.b +; CHECK: bneg.b + +@llvm_mips_bneg_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_bneg_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 +@llvm_mips_bneg_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 + +define void @llvm_mips_bneg_h_test() nounwind { +entry: + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2 + %2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1) + store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES + ret void +} + +declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind + +; CHECK-LABEL: llvm_mips_bneg_h_test: +; CHECK-NOT: and.v +; CHECK: bneg.h + +@llvm_mips_bneg_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_bneg_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 +@llvm_mips_bneg_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 + +define void @llvm_mips_bneg_w_test() nounwind { +entry: + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2 + %2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1) + store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES + ret void +} + +declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind + +; CHECK-LABEL: llvm_mips_bneg_w_test: +; CHECK-NOT: and.v +; CHECK: bneg.w + +@llvm_mips_bneg_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_bneg_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16 +@llvm_mips_bneg_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 + +define void @llvm_mips_bneg_d_test() nounwind { +entry: + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2 + %2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1) + store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES + ret void +} + +declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind + +; CHECK-LABEL: llvm_mips_bneg_d_test: +; CHECK-NOT: and.v +; CHECK: bneg.d + +@llvm_mips_bset_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_bset_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 +@llvm_mips_bset_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 + +define void @llvm_mips_bset_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1) + store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES + ret void +} + +declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind + +; CHECK-LABEL: llvm_mips_bset_b_test: +; CHECK-NOT: andi.b +; CHECK: bset.b + +@llvm_mips_bset_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_bset_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 +@llvm_mips_bset_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 + +define void @llvm_mips_bset_h_test() nounwind { +entry: + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2 + %2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1) + store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES + ret void +} + +declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind + +; CHECK-LABEL: llvm_mips_bset_h_test: +; CHECK-NOT: and.v +; CHECK: bset.h + +@llvm_mips_bset_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_bset_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 +@llvm_mips_bset_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 + +define void @llvm_mips_bset_w_test() nounwind { +entry: + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2 + %2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1) + store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES + ret void +} + +declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind + +; CHECK-LABEL: llvm_mips_bset_w_test: +; CHECK-NOT: and.v +; CHECK: bset.w + +@llvm_mips_bset_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_bset_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16 +@llvm_mips_bset_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 + +define void @llvm_mips_bset_d_test() nounwind { +entry: + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2 + %2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1) + store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES + ret void +} + +declare <2 x i64> @llvm.mips.bset.d(<2 x i64>, <2 x i64>) nounwind + +; CHECK-LABEL: llvm_mips_bset_d_test: +; CHECK-NOT: and.v +; CHECK: bset.d + +@llvm_mips_sll_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_sll_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 +@llvm_mips_sll_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 + +define void @llvm_mips_sll_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1) + store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES + ret void +} + +declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind + +; CHECK-LABEL: llvm_mips_sll_b_test: +; CHECK-NOT: andi.b +; CHECK: sll.b + +@llvm_mips_sll_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_sll_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 +@llvm_mips_sll_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 + +define void @llvm_mips_sll_h_test() nounwind { +entry: + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2 + %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1) + store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES + ret void +} + +declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind + +; CHECK-LABEL: llvm_mips_sll_h_test: +; CHECK-NOT: and.v +; CHECK: sll.h + +@llvm_mips_sll_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_sll_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 +@llvm_mips_sll_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 + +define void @llvm_mips_sll_w_test() nounwind { +entry: + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2 + %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1) + store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES + ret void +} + +declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind + +; CHECK-LABEL: llvm_mips_sll_w_test: +; CHECK-NOT: and.v +; CHECK: sll.w + +@llvm_mips_sll_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_sll_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16 +@llvm_mips_sll_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 + +define void @llvm_mips_sll_d_test() nounwind { +entry: + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2 + %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1) + store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES + ret void +} + +declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind + +; CHECK-LABEL: llvm_mips_sll_d_test: +; CHECK-NOT: and.v +; CHECK: sll.d + +@llvm_mips_sra_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_sra_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 +@llvm_mips_sra_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 + +define void @llvm_mips_sra_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1) + store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES + ret void +} + +declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind + +; CHECK-LABEL: llvm_mips_sra_b_test: +; CHECK-NOT: andi.b +; CHECK: sra.b + +@llvm_mips_sra_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_sra_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 +@llvm_mips_sra_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 + +define void @llvm_mips_sra_h_test() nounwind { +entry: + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2 + %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1) + store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES + ret void +} + +declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind + +; CHECK-LABEL: llvm_mips_sra_h_test: +; CHECK-NOT: and.v +; CHECK: sra.h + +@llvm_mips_sra_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_sra_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 +@llvm_mips_sra_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 + +define void @llvm_mips_sra_w_test() nounwind { +entry: + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2 + %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1) + store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES + ret void +} + +declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind + +; CHECK-LABEL: llvm_mips_sra_w_test: +; CHECK-NOT: and.v +; CHECK: sra.w + +@llvm_mips_sra_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_sra_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16 +@llvm_mips_sra_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 + +define void @llvm_mips_sra_d_test() nounwind { +entry: + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2 + %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1) + store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES + ret void +} + +declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind + +; CHECK-LABEL: llvm_mips_sra_d_test: +; CHECK-NOT: and.v +; CHECK: sra.d + +@llvm_mips_srl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_srl_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 +@llvm_mips_srl_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 + +define void @llvm_mips_srl_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1) + store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES + ret void +} + +declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind + +; CHECK-LABEL: llvm_mips_srl_b_test: +; CHECK-NOT: andi.b +; CHECK: srl.b + +@llvm_mips_srl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_srl_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 +@llvm_mips_srl_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 + +define void @llvm_mips_srl_h_test() nounwind { +entry: + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2 + %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1) + store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES + ret void +} + +declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind + +; CHECK-LABEL: llvm_mips_srl_h_test: +; CHECK-NOT: and.v +; CHECK: srl.h + +@llvm_mips_srl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_srl_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 +@llvm_mips_srl_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 + +define void @llvm_mips_srl_w_test() nounwind { +entry: + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2 + %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1) + store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES + ret void +} + +declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind + +; CHECK-LABEL: llvm_mips_srl_w_test: +; CHECK-NOT: and.v +; CHECK: srl.w + +@llvm_mips_srl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_srl_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16 +@llvm_mips_srl_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 + +define void @llvm_mips_srl_d_test() nounwind { +entry: + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2 + %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1) + store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES + ret void +} + +declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind + +; CHECK-LABEL: llvm_mips_srl_d_test: +; CHECK-NOT: and.v +; CHECK: srl.d diff --git a/test/CodeGen/PowerPC/andc.ll b/test/CodeGen/PowerPC/andc.ll index 6135db510ad53..df47bfc1e38ef 100644 --- a/test/CodeGen/PowerPC/andc.ll +++ b/test/CodeGen/PowerPC/andc.ll @@ -1,12 +1,13 @@ -; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-apple-darwin | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-unknown | FileCheck %s define i1 @and_cmp1(i32 %x, i32 %y) { ; CHECK-LABEL: and_cmp1: -; CHECK: andc [[REG1:r[0-9]+]], r4, r3 -; CHECK: cntlzw [[REG2:r[0-9]+]], [[REG1]] -; CHECK: rlwinm r3, [[REG2]], 27, 31, 31 -; CHECK: blr - +; CHECK: # BB#0: +; CHECK-NEXT: andc 3, 4, 3 +; CHECK-NEXT: cntlzw 3, 3 +; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31 +; CHECK-NEXT: blr %and = and i32 %x, %y %cmp = icmp eq i32 %and, %y ret i1 %cmp @@ -14,12 +15,12 @@ define i1 @and_cmp1(i32 %x, i32 %y) { define i1 @and_cmp_const(i32 %x) { ; CHECK-LABEL: and_cmp_const: -; CHECK: li [[REG1:r[0-9]+]], 43 -; CHECK: andc [[REG2:r[0-9]+]], [[REG1]], r3 -; CHECK: cntlzw [[REG3:r[0-9]+]], [[REG2]] -; CHECK: rlwinm r3, [[REG3]], 27, 31, 31 -; CHECK: blr - +; CHECK: # BB#0: +; CHECK-NEXT: li 4, 43 +; CHECK-NEXT: andc 3, 4, 3 +; CHECK-NEXT: cntlzw 3, 3 +; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31 +; CHECK-NEXT: blr %and = and i32 %x, 43 %cmp = icmp eq i32 %and, 43 ret i1 %cmp @@ -27,15 +28,26 @@ define i1 @and_cmp_const(i32 %x) { define i1 @foo(i32 %i) { ; CHECK-LABEL: foo: -; CHECK: lis [[REG1:r[0-9]+]], 4660 -; CHECK: ori [[REG2:r[0-9]+]], [[REG1]], 22136 -; CHECK: andc [[REG3:r[0-9]+]], [[REG2]], r3 -; CHECK: cntlzw [[REG4:r[0-9]+]], [[REG3]] -; CHECK: rlwinm r3, [[REG4]], 27, 31, 31 -; CHECK: blr - +; CHECK: # BB#0: +; CHECK-NEXT: lis 4, 4660 +; CHECK-NEXT: ori 4, 4, 22136 +; CHECK-NEXT: andc 3, 4, 3 +; CHECK-NEXT: cntlzw 3, 3 +; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31 +; CHECK-NEXT: blr %and = and i32 %i, 305419896 %cmp = icmp eq i32 %and, 305419896 ret i1 %cmp } +define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) { +; CHECK-LABEL: hidden_not_v4i32: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, 6 +; CHECK-NEXT: xxlandc 34, 35, 34 +; CHECK-NEXT: blr + %xor = xor <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15> + %and = and <4 x i32> %xor, <i32 6, i32 6, i32 6, i32 6> + ret <4 x i32> %and +} + diff --git a/test/CodeGen/WebAssembly/returned.ll b/test/CodeGen/WebAssembly/returned.ll index b059fd8a59879..dfd3fad794f1e 100644 --- a/test/CodeGen/WebAssembly/returned.ll +++ b/test/CodeGen/WebAssembly/returned.ll @@ -47,3 +47,34 @@ define void @test_constant_arg() { ret void } declare i32* @returns_arg(i32* returned) + +; Test that the optimization isn't performed on arguments without the +; "returned" attribute. + +; CHECK-LABEL: test_other_skipped: +; CHECK-NEXT: .param i32, i32, f64{{$}} +; CHECK-NEXT: {{^}} i32.call $drop=, do_something@FUNCTION, $0, $1, $2{{$}} +; CHECK-NEXT: {{^}} call do_something_with_i32@FUNCTION, $1{{$}} +; CHECK-NEXT: {{^}} call do_something_with_double@FUNCTION, $2{{$}} +declare i32 @do_something(i32 returned, i32, double) +declare void @do_something_with_i32(i32) +declare void @do_something_with_double(double) +define void @test_other_skipped(i32 %a, i32 %b, double %c) { + %call = call i32 @do_something(i32 %a, i32 %b, double %c) + call void @do_something_with_i32(i32 %b) + call void @do_something_with_double(double %c) + ret void +} + +; Test that the optimization is performed on arguments other than the first. + +; CHECK-LABEL: test_second_arg: +; CHECK-NEXT: .param i32, i32{{$}} +; CHECK-NEXT: .result i32{{$}} +; CHECK-NEXT: {{^}} i32.call $push0=, do_something_else@FUNCTION, $0, $1{{$}} +; CHECK-NEXT: return $pop0{{$}} +declare i32 @do_something_else(i32, i32 returned) +define i32 @test_second_arg(i32 %a, i32 %b) { + %call = call i32 @do_something_else(i32 %a, i32 %b) + ret i32 %b +} diff --git a/test/CodeGen/X86/GlobalISel/X86-regbankselect.mir b/test/CodeGen/X86/GlobalISel/X86-regbankselect.mir index c4e5fb2d05fc0..8e04239041a87 100644 --- a/test/CodeGen/X86/GlobalISel/X86-regbankselect.mir +++ b/test/CodeGen/X86/GlobalISel/X86-regbankselect.mir @@ -106,6 +106,10 @@ ret void } + define void @trunc_check() { + ret void + } + ... --- name: test_add_i8 @@ -632,3 +636,27 @@ body: | RET 0 ... +--- +name: trunc_check +alignment: 4 +legalized: true +# CHECK-LABEL: name: trunc_check +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gpr } +# CHECK-NEXT: - { id: 1, class: gpr } +# CHECK-NEXT: - { id: 2, class: gpr } +# CHECK-NEXT: - { id: 3, class: gpr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } + - { id: 3, class: _ } +body: | + bb.0 (%ir-block.0): + %0(s32) = IMPLICIT_DEF + %1(s1) = G_TRUNC %0(s32) + %2(s8) = G_TRUNC %0(s32) + %3(s16) = G_TRUNC %0(s32) + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/binop-isel.ll b/test/CodeGen/X86/GlobalISel/binop.ll index 8499dd958447b..8499dd958447b 100644 --- a/test/CodeGen/X86/GlobalISel/binop-isel.ll +++ b/test/CodeGen/X86/GlobalISel/binop.ll diff --git a/test/CodeGen/X86/GlobalISel/legalize-const.mir b/test/CodeGen/X86/GlobalISel/legalize-constant.mir index 612d33a77fc96..612d33a77fc96 100644 --- a/test/CodeGen/X86/GlobalISel/legalize-const.mir +++ b/test/CodeGen/X86/GlobalISel/legalize-constant.mir diff --git a/test/CodeGen/X86/GlobalISel/legalize-trunc.mir b/test/CodeGen/X86/GlobalISel/legalize-trunc.mir new file mode 100644 index 0000000000000..6b390d990ecfd --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-trunc.mir @@ -0,0 +1,31 @@ +# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 +--- | + define void @trunc_check() { + ret void + } + +... +--- +name: trunc_check +# ALL-LABEL: name: trunc_check +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } + - { id: 3, class: _ } +body: | + bb.1 (%ir-block.0): + %0(s32) = IMPLICIT_DEF + ; ALL: %1(s1) = G_TRUNC %0(s32) + %1(s1) = G_TRUNC %0(s32) + + ; ALL: %2(s8) = G_TRUNC %0(s32) + %2(s8) = G_TRUNC %0(s32) + + ; ALL: %3(s16) = G_TRUNC %0(s32) + %3(s16) = G_TRUNC %0(s32) + RET 0 + +... + diff --git a/test/CodeGen/X86/GlobalISel/memop-isel.ll b/test/CodeGen/X86/GlobalISel/memop.ll index 6fe66436e4a8a..6fe66436e4a8a 100644 --- a/test/CodeGen/X86/GlobalISel/memop-isel.ll +++ b/test/CodeGen/X86/GlobalISel/memop.ll diff --git a/test/CodeGen/X86/GlobalISel/select-add.mir b/test/CodeGen/X86/GlobalISel/select-add.mir new file mode 100644 index 0000000000000..27fcc223d2bbe --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-add.mir @@ -0,0 +1,226 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL + +--- | + define i64 @test_add_i64(i64 %arg1, i64 %arg2) { + %ret = add i64 %arg1, %arg2 + ret i64 %ret + } + + define i32 @test_add_i32(i32 %arg1, i32 %arg2) { + %ret = add i32 %arg1, %arg2 + ret i32 %ret + } + + define float @test_add_float(float %arg1, float %arg2) { + %ret = fadd float %arg1, %arg2 + ret float %ret + } + + define double @test_add_double(double %arg1, double %arg2) { + %ret = fadd double %arg1, %arg2 + ret double %ret + } + + define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { + %ret = add <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <4 x float> @test_add_v4f32(<4 x float> %arg1, <4 x float> %arg2) { + %ret = fadd <4 x float> %arg1, %arg2 + ret <4 x float> %ret + } +... + +--- +name: test_add_i64 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr64 } +# ALL-NEXT: - { id: 1, class: gr64 } +# ALL-NEXT: - { id: 2, class: gr64 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: %0 = COPY %rdi +# ALL-NEXT: %1 = COPY %rsi +# ALL-NEXT: %2 = ADD64rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s64) = COPY %rdi + %1(s64) = COPY %rsi + %2(s64) = G_ADD %0, %1 + %rax = COPY %2(s64) + +... + +--- +name: test_add_i32 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr32 } +# ALL-NEXT: - { id: 1, class: gr32 } +# ALL-NEXT: - { id: 2, class: gr32 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: %0 = COPY %edi +# ALL-NEXT: %1 = COPY %esi +# ALL-NEXT: %2 = ADD32rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s32) = COPY %edi + %1(s32) = COPY %esi + %2(s32) = G_ADD %0, %1 + %rax = COPY %2(s32) + +... +--- +name: test_add_float +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512F-NEXT: - { id: 0, class: fr32 } +# NO_AVX512F-NEXT: - { id: 1, class: fr32 } +# NO_AVX512F-NEXT: - { id: 2, class: fr32 } +# AVX512ALL-NEXT: - { id: 0, class: fr32x } +# AVX512ALL-NEXT: - { id: 1, class: fr32x } +# AVX512ALL-NEXT: - { id: 2, class: fr32x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = ADDSSrr %0, %1 +# AVX-NEXT: %2 = VADDSSrr %0, %1 +# AVX512F-NEXT: %2 = VADDSSZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(s32) = COPY %xmm0 + %1(s32) = COPY %xmm1 + %2(s32) = G_FADD %0, %1 + %xmm0 = COPY %2(s32) + RET 0, implicit %xmm0 + +... +--- +name: test_add_double +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512F-NEXT: - { id: 0, class: fr64 } +# NO_AVX512F-NEXT: - { id: 1, class: fr64 } +# NO_AVX512F-NEXT: - { id: 2, class: fr64 } +# AVX512ALL-NEXT: - { id: 0, class: fr64x } +# AVX512ALL-NEXT: - { id: 1, class: fr64x } +# AVX512ALL-NEXT: - { id: 2, class: fr64x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = ADDSDrr %0, %1 +# AVX-NEXT: %2 = VADDSDrr %0, %1 +# AVX512F-NEXT: %2 = VADDSDZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(s64) = COPY %xmm0 + %1(s64) = COPY %xmm1 + %2(s64) = G_FADD %0, %1 + %xmm0 = COPY %2(s64) + RET 0, implicit %xmm0 + +... +--- +name: test_add_v4i32 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = PADDDrr %0, %1 +# AVX-NEXT: %2 = VPADDDrr %0, %1 +# AVX512F-NEXT: %2 = VPADDDrr %0, %1 +# AVX512VL-NEXT: %2 = VPADDDZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_ADD %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_add_v4f32 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = ADDPSrr %0, %1 +# AVX-NEXT: %2 = VADDPSrr %0, %1 +# AVX512F-NEXT: %2 = VADDPSrr %0, %1 +# AVX512VL-NEXT: %2 = VADDPSZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_FADD %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/frameIndex-instructionselect.mir b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir index 2fa9ac23a7afa..2fa9ac23a7afa 100644 --- a/test/CodeGen/X86/GlobalISel/frameIndex-instructionselect.mir +++ b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir diff --git a/test/CodeGen/X86/GlobalISel/x86_64-instructionselect.mir b/test/CodeGen/X86/GlobalISel/select-memop.mir index 17522c3cb45eb..943c9aceb4d19 100644 --- a/test/CodeGen/X86/GlobalISel/x86_64-instructionselect.mir +++ b/test/CodeGen/X86/GlobalISel/select-memop.mir @@ -4,67 +4,7 @@ # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL --- | - define i64 @test_add_i64(i64 %arg1, i64 %arg2) { - %ret = add i64 %arg1, %arg2 - ret i64 %ret - } - - define i32 @test_add_i32(i32 %arg1, i32 %arg2) { - %ret = add i32 %arg1, %arg2 - ret i32 %ret - } - - define i64 @test_sub_i64(i64 %arg1, i64 %arg2) { - %ret = sub i64 %arg1, %arg2 - ret i64 %ret - } - - define i32 @test_sub_i32(i32 %arg1, i32 %arg2) { - %ret = sub i32 %arg1, %arg2 - ret i32 %ret - } - - define float @test_add_float(float %arg1, float %arg2) { - %ret = fadd float %arg1, %arg2 - ret float %ret - } - - define double @test_add_double(double %arg1, double %arg2) { - %ret = fadd double %arg1, %arg2 - ret double %ret - } - - define float @test_sub_float(float %arg1, float %arg2) { - %ret = fsub float %arg1, %arg2 - ret float %ret - } - - define double @test_sub_double(double %arg1, double %arg2) { - %ret = fsub double %arg1, %arg2 - ret double %ret - } - - define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { - %ret = add <4 x i32> %arg1, %arg2 - ret <4 x i32> %ret - } - - define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { - %ret = sub <4 x i32> %arg1, %arg2 - ret <4 x i32> %ret - } - - define <4 x float> @test_add_v4f32(<4 x float> %arg1, <4 x float> %arg2) { - %ret = fadd <4 x float> %arg1, %arg2 - ret <4 x float> %ret - } - - define <4 x float> @test_sub_v4f32(<4 x float> %arg1, <4 x float> %arg2) { - %ret = fsub <4 x float> %arg1, %arg2 - ret <4 x float> %ret - } - - define i8 @test_load_i8(i8* %p1) { + define i8 @test_load_i8(i8* %p1) { %r = load i8, i8* %p1 ret i8 %r } @@ -88,12 +28,12 @@ %r = load float, float* %p1 ret float %r } - + define float @test_load_float_vecreg(float* %p1) { %r = load float, float* %p1 ret float %r } - + define double @test_load_double(double* %p1) { %r = load double, double* %p1 @@ -139,7 +79,7 @@ store double %val, double* %p1 ret double* %p1 } - + define double* @test_store_double_vec(double %val, double* %p1) { store double %val, double* %p1 ret double* %p1 @@ -156,386 +96,6 @@ } ... - ---- -name: test_add_i64 -legalized: true -regBankSelected: true -# ALL: registers: -# ALL-NEXT: - { id: 0, class: gr64 } -# ALL-NEXT: - { id: 1, class: gr64 } -# ALL-NEXT: - { id: 2, class: gr64 } -registers: - - { id: 0, class: gpr } - - { id: 1, class: gpr } - - { id: 2, class: gpr } -# ALL: %0 = COPY %rdi -# ALL-NEXT: %1 = COPY %rsi -# ALL-NEXT: %2 = ADD64rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %edi, %esi - - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi - %2(s64) = G_ADD %0, %1 - %rax = COPY %2(s64) - -... - ---- -name: test_add_i32 -legalized: true -regBankSelected: true -# ALL: registers: -# ALL-NEXT: - { id: 0, class: gr32 } -# ALL-NEXT: - { id: 1, class: gr32 } -# ALL-NEXT: - { id: 2, class: gr32 } -registers: - - { id: 0, class: gpr } - - { id: 1, class: gpr } - - { id: 2, class: gpr } -# ALL: %0 = COPY %edi -# ALL-NEXT: %1 = COPY %esi -# ALL-NEXT: %2 = ADD32rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %edi, %esi - - %0(s32) = COPY %edi - %1(s32) = COPY %esi - %2(s32) = G_ADD %0, %1 - %rax = COPY %2(s32) - -... - ---- -name: test_sub_i64 -legalized: true -regBankSelected: true -# ALL: registers: -# ALL-NEXT: - { id: 0, class: gr64 } -# ALL-NEXT: - { id: 1, class: gr64 } -# ALL-NEXT: - { id: 2, class: gr64 } -registers: - - { id: 0, class: gpr } - - { id: 1, class: gpr } - - { id: 2, class: gpr } -# ALL: %0 = COPY %rdi -# ALL-NEXT: %1 = COPY %rsi -# ALL-NEXT: %2 = SUB64rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %edi, %esi - - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi - %2(s64) = G_SUB %0, %1 - %rax = COPY %2(s64) - -... - ---- -name: test_sub_i32 -legalized: true -regBankSelected: true -# ALL: registers: -# ALL-NEXT: - { id: 0, class: gr32 } -# ALL-NEXT: - { id: 1, class: gr32 } -# ALL-NEXT: - { id: 2, class: gr32 } -registers: - - { id: 0, class: gpr } - - { id: 1, class: gpr } - - { id: 2, class: gpr } -# ALL: %0 = COPY %edi -# ALL-NEXT: %1 = COPY %esi -# ALL-NEXT: %2 = SUB32rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %edi, %esi - - %0(s32) = COPY %edi - %1(s32) = COPY %esi - %2(s32) = G_SUB %0, %1 - %rax = COPY %2(s32) - -... - ---- -name: test_add_float -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512F-NEXT: - { id: 0, class: fr32 } -# NO_AVX512F-NEXT: - { id: 1, class: fr32 } -# NO_AVX512F-NEXT: - { id: 2, class: fr32 } -# AVX512ALL-NEXT: - { id: 0, class: fr32x } -# AVX512ALL-NEXT: - { id: 1, class: fr32x } -# AVX512ALL-NEXT: - { id: 2, class: fr32x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = ADDSSrr %0, %1 -# AVX-NEXT: %2 = VADDSSrr %0, %1 -# AVX512F-NEXT: %2 = VADDSSZrr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 - %2(s32) = G_FADD %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 - -... ---- -name: test_add_double -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512F-NEXT: - { id: 0, class: fr64 } -# NO_AVX512F-NEXT: - { id: 1, class: fr64 } -# NO_AVX512F-NEXT: - { id: 2, class: fr64 } -# AVX512ALL-NEXT: - { id: 0, class: fr64x } -# AVX512ALL-NEXT: - { id: 1, class: fr64x } -# AVX512ALL-NEXT: - { id: 2, class: fr64x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = ADDSDrr %0, %1 -# AVX-NEXT: %2 = VADDSDrr %0, %1 -# AVX512F-NEXT: %2 = VADDSDZrr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 - %2(s64) = G_FADD %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 - -... ---- -name: test_sub_float -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512F-NEXT: - { id: 0, class: fr32 } -# NO_AVX512F-NEXT: - { id: 1, class: fr32 } -# NO_AVX512F-NEXT: - { id: 2, class: fr32 } -# AVX512ALL-NEXT: - { id: 0, class: fr32x } -# AVX512ALL-NEXT: - { id: 1, class: fr32x } -# AVX512ALL-NEXT: - { id: 2, class: fr32x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = SUBSSrr %0, %1 -# AVX-NEXT: %2 = VSUBSSrr %0, %1 -# AVX512F-NEXT: %2 = VSUBSSZrr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 - %2(s32) = G_FSUB %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 - -... ---- -name: test_sub_double -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512F-NEXT: - { id: 0, class: fr64 } -# NO_AVX512F-NEXT: - { id: 1, class: fr64 } -# NO_AVX512F-NEXT: - { id: 2, class: fr64 } -# AVX512ALL-NEXT: - { id: 0, class: fr64x } -# AVX512ALL-NEXT: - { id: 1, class: fr64x } -# AVX512ALL-NEXT: - { id: 2, class: fr64x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = SUBSDrr %0, %1 -# AVX-NEXT: %2 = VSUBSDrr %0, %1 -# AVX512F-NEXT: %2 = VSUBSDZrr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 - %2(s64) = G_FSUB %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 -... ---- -name: test_add_v4i32 -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } -# AVX512VL-NEXT: - { id: 0, class: vr128x } -# AVX512VL-NEXT: - { id: 1, class: vr128x } -# AVX512VL-NEXT: - { id: 2, class: vr128x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = PADDDrr %0, %1 -# AVX-NEXT: %2 = VPADDDrr %0, %1 -# AVX512F-NEXT: %2 = VPADDDrr %0, %1 -# AVX512VL-NEXT: %2 = VPADDDZ128rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 - %2(<4 x s32>) = G_ADD %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 - -... ---- -name: test_sub_v4i32 -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } -# AVX512VL-NEXT: - { id: 0, class: vr128x } -# AVX512VL-NEXT: - { id: 1, class: vr128x } -# AVX512VL-NEXT: - { id: 2, class: vr128x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = PSUBDrr %0, %1 -# AVX-NEXT: %2 = VPSUBDrr %0, %1 -# AVX512F-NEXT: %2 = VPSUBDrr %0, %1 -# AVX512VL-NEXT: %2 = VPSUBDZ128rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 - %2(<4 x s32>) = G_SUB %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 - -... ---- -name: test_add_v4f32 -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } -# AVX512VL-NEXT: - { id: 0, class: vr128x } -# AVX512VL-NEXT: - { id: 1, class: vr128x } -# AVX512VL-NEXT: - { id: 2, class: vr128x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = ADDPSrr %0, %1 -# AVX-NEXT: %2 = VADDPSrr %0, %1 -# AVX512F-NEXT: %2 = VADDPSrr %0, %1 -# AVX512VL-NEXT: %2 = VADDPSZ128rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 - %2(<4 x s32>) = G_FADD %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 - -... ---- -name: test_sub_v4f32 -alignment: 4 -legalized: true -regBankSelected: true -selected: false -tracksRegLiveness: true -# ALL: registers: -# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } -# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } -# AVX512VL-NEXT: - { id: 0, class: vr128x } -# AVX512VL-NEXT: - { id: 1, class: vr128x } -# AVX512VL-NEXT: - { id: 2, class: vr128x } -registers: - - { id: 0, class: vecr } - - { id: 1, class: vecr } - - { id: 2, class: vecr } -# ALL: %0 = COPY %xmm0 -# ALL-NEXT: %1 = COPY %xmm1 -# SSE-NEXT: %2 = SUBPSrr %0, %1 -# AVX-NEXT: %2 = VSUBPSrr %0, %1 -# AVX512F-NEXT: %2 = VSUBPSrr %0, %1 -# AVX512VL-NEXT: %2 = VSUBPSZ128rr %0, %1 -body: | - bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 - - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 - %2(<4 x s32>) = G_FSUB %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 - -... --- # ALL-LABEL: name: test_load_i8 name: test_load_i8 diff --git a/test/CodeGen/X86/GlobalISel/select-sub.mir b/test/CodeGen/X86/GlobalISel/select-sub.mir new file mode 100644 index 0000000000000..d4db6eec6d802 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-sub.mir @@ -0,0 +1,225 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL + +--- | + define i64 @test_sub_i64(i64 %arg1, i64 %arg2) { + %ret = sub i64 %arg1, %arg2 + ret i64 %ret + } + + define i32 @test_sub_i32(i32 %arg1, i32 %arg2) { + %ret = sub i32 %arg1, %arg2 + ret i32 %ret + } + + define float @test_sub_float(float %arg1, float %arg2) { + %ret = fsub float %arg1, %arg2 + ret float %ret + } + + define double @test_sub_double(double %arg1, double %arg2) { + %ret = fsub double %arg1, %arg2 + ret double %ret + } + + define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { + %ret = sub <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <4 x float> @test_sub_v4f32(<4 x float> %arg1, <4 x float> %arg2) { + %ret = fsub <4 x float> %arg1, %arg2 + ret <4 x float> %ret + } + +... +--- +name: test_sub_i64 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr64 } +# ALL-NEXT: - { id: 1, class: gr64 } +# ALL-NEXT: - { id: 2, class: gr64 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: %0 = COPY %rdi +# ALL-NEXT: %1 = COPY %rsi +# ALL-NEXT: %2 = SUB64rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s64) = COPY %rdi + %1(s64) = COPY %rsi + %2(s64) = G_SUB %0, %1 + %rax = COPY %2(s64) + +... + +--- +name: test_sub_i32 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr32 } +# ALL-NEXT: - { id: 1, class: gr32 } +# ALL-NEXT: - { id: 2, class: gr32 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: %0 = COPY %edi +# ALL-NEXT: %1 = COPY %esi +# ALL-NEXT: %2 = SUB32rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s32) = COPY %edi + %1(s32) = COPY %esi + %2(s32) = G_SUB %0, %1 + %rax = COPY %2(s32) + +... +--- +name: test_sub_float +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512F-NEXT: - { id: 0, class: fr32 } +# NO_AVX512F-NEXT: - { id: 1, class: fr32 } +# NO_AVX512F-NEXT: - { id: 2, class: fr32 } +# AVX512ALL-NEXT: - { id: 0, class: fr32x } +# AVX512ALL-NEXT: - { id: 1, class: fr32x } +# AVX512ALL-NEXT: - { id: 2, class: fr32x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = SUBSSrr %0, %1 +# AVX-NEXT: %2 = VSUBSSrr %0, %1 +# AVX512F-NEXT: %2 = VSUBSSZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(s32) = COPY %xmm0 + %1(s32) = COPY %xmm1 + %2(s32) = G_FSUB %0, %1 + %xmm0 = COPY %2(s32) + RET 0, implicit %xmm0 + +... +--- +name: test_sub_double +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512F-NEXT: - { id: 0, class: fr64 } +# NO_AVX512F-NEXT: - { id: 1, class: fr64 } +# NO_AVX512F-NEXT: - { id: 2, class: fr64 } +# AVX512ALL-NEXT: - { id: 0, class: fr64x } +# AVX512ALL-NEXT: - { id: 1, class: fr64x } +# AVX512ALL-NEXT: - { id: 2, class: fr64x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = SUBSDrr %0, %1 +# AVX-NEXT: %2 = VSUBSDrr %0, %1 +# AVX512F-NEXT: %2 = VSUBSDZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(s64) = COPY %xmm0 + %1(s64) = COPY %xmm1 + %2(s64) = G_FSUB %0, %1 + %xmm0 = COPY %2(s64) + RET 0, implicit %xmm0 +... +--- +name: test_sub_v4i32 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = PSUBDrr %0, %1 +# AVX-NEXT: %2 = VPSUBDrr %0, %1 +# AVX512F-NEXT: %2 = VPSUBDrr %0, %1 +# AVX512VL-NEXT: %2 = VPSUBDZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_SUB %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_sub_v4f32 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +tracksRegLiveness: true +# ALL: registers: +# NO_AVX512VL-NEXT: - { id: 0, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 1, class: vr128 } +# NO_AVX512VL-NEXT: - { id: 2, class: vr128 } +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %0 = COPY %xmm0 +# ALL-NEXT: %1 = COPY %xmm1 +# SSE-NEXT: %2 = SUBPSrr %0, %1 +# AVX-NEXT: %2 = VSUBPSrr %0, %1 +# AVX512F-NEXT: %2 = VSUBPSrr %0, %1 +# AVX512VL-NEXT: %2 = VSUBPSZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_FSUB %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-trunc.mir b/test/CodeGen/X86/GlobalISel/select-trunc.mir new file mode 100644 index 0000000000000..714340248ff6f --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-trunc.mir @@ -0,0 +1,183 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK +--- | + define i1 @trunc_i32toi1(i32 %a) { + %r = trunc i32 %a to i1 + ret i1 %r + } + + define i8 @trunc_i32toi8(i32 %a) { + %r = trunc i32 %a to i8 + ret i8 %r + } + + define i16 @trunc_i32toi16(i32 %a) { + %r = trunc i32 %a to i16 + ret i16 %r + } + + define i8 @trunc_i64toi8(i64 %a) { + %r = trunc i64 %a to i8 + ret i8 %r + } + + define i16 @trunc_i64toi16(i64 %a) { + %r = trunc i64 %a to i16 + ret i16 %r + } + + define i32 @trunc_i64toi32(i64 %a) { + %r = trunc i64 %a to i32 + ret i32 %r + } + +... +--- +name: trunc_i32toi1 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: trunc_i32toi1 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr32 } +# CHECK-NEXT: - { id: 1, class: gr8 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } +# CHECK: body: +# CHECK: %1 = COPY %0.sub_8 +body: | + bb.1 (%ir-block.0): + liveins: %edi + + %0(s32) = COPY %edi + %1(s1) = G_TRUNC %0(s32) + %al = COPY %1(s1) + RET 0, implicit %al + +... +--- +name: trunc_i32toi8 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: trunc_i32toi8 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr32 } +# CHECK-NEXT: - { id: 1, class: gr8 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } +# CHECK: body: +# CHECK: %1 = COPY %0.sub_8 +body: | + bb.1 (%ir-block.0): + liveins: %edi + + %0(s32) = COPY %edi + %1(s8) = G_TRUNC %0(s32) + %al = COPY %1(s8) + RET 0, implicit %al + +... +--- +name: trunc_i32toi16 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: trunc_i32toi16 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr32 } +# CHECK-NEXT: - { id: 1, class: gr16 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } +# CHECK: body: +# CHECK: %1 = COPY %0.sub_16 +body: | + bb.1 (%ir-block.0): + liveins: %edi + + %0(s32) = COPY %edi + %1(s16) = G_TRUNC %0(s32) + %ax = COPY %1(s16) + RET 0, implicit %ax + +... +--- +name: trunc_i64toi8 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: trunc_i64toi8 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64 } +# CHECK-NEXT: - { id: 1, class: gr8 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } +# CHECK: body: +# CHECK: %1 = COPY %0.sub_8 +body: | + bb.1 (%ir-block.0): + liveins: %rdi + + %0(s64) = COPY %rdi + %1(s8) = G_TRUNC %0(s64) + %al = COPY %1(s8) + RET 0, implicit %al + +... +--- +name: trunc_i64toi16 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: trunc_i64toi16 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64 } +# CHECK-NEXT: - { id: 1, class: gr16 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } +# CHECK: body: +# CHECK: %1 = COPY %0.sub_16 +body: | + bb.1 (%ir-block.0): + liveins: %rdi + + %0(s64) = COPY %rdi + %1(s16) = G_TRUNC %0(s64) + %ax = COPY %1(s16) + RET 0, implicit %ax + +... +--- +name: trunc_i64toi32 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: trunc_i64toi32 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64 } +# CHECK-NEXT: - { id: 1, class: gr32 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } +# CHECK: body: +# CHECK: %1 = COPY %0.sub_32 +body: | + bb.1 (%ir-block.0): + liveins: %rdi + + %0(s64) = COPY %rdi + %1(s32) = G_TRUNC %0(s64) + %eax = COPY %1(s32) + RET 0, implicit %eax + +... diff --git a/test/CodeGen/X86/GlobalISel/trunc.ll b/test/CodeGen/X86/GlobalISel/trunc.ll new file mode 100644 index 0000000000000..a56fc3b5a87f4 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/trunc.ll @@ -0,0 +1,57 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=CHECK + +define i1 @trunc_i32toi1(i32 %a) { +; CHECK-LABEL: trunc_i32toi1: +; CHECK: # BB#0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq + %r = trunc i32 %a to i1 + ret i1 %r +} + +define i8 @trunc_i32toi8(i32 %a) { +; CHECK-LABEL: trunc_i32toi8: +; CHECK: # BB#0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq + %r = trunc i32 %a to i8 + ret i8 %r +} + +define i16 @trunc_i32toi16(i32 %a) { +; CHECK-LABEL: trunc_i32toi16: +; CHECK: # BB#0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq + %r = trunc i32 %a to i16 + ret i16 %r +} + +define i8 @trunc_i64toi8(i64 %a) { +; CHECK-LABEL: trunc_i64toi8: +; CHECK: # BB#0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq + %r = trunc i64 %a to i8 + ret i8 %r +} + +define i16 @trunc_i64toi16(i64 %a) { +; CHECK-LABEL: trunc_i64toi16: +; CHECK: # BB#0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq + %r = trunc i64 %a to i16 + ret i16 %r +} + +define i32 @trunc_i64toi32(i64 %a) { +; CHECK-LABEL: trunc_i64toi32: +; CHECK: # BB#0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq + %r = trunc i64 %a to i32 + ret i32 %r +} + diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll index 4303b62544642..f89f6e1de1abe 100644 --- a/test/CodeGen/X86/MergeConsecutiveStores.ll +++ b/test/CodeGen/X86/MergeConsecutiveStores.ll @@ -582,3 +582,22 @@ define void @merge_vec_element_and_scalar_load([6 x i64]* %array) { ; CHECK-NEXT: movq %rcx, 40(%rdi) ; CHECK-NEXT: retq } + + + +; Don't let a non-consecutive store thwart merging of the last two. +define void @almost_consecutive_stores(i8* %p) { + store i8 0, i8* %p + %p1 = getelementptr i8, i8* %p, i64 42 + store i8 1, i8* %p1 + %p2 = getelementptr i8, i8* %p, i64 2 + store i8 2, i8* %p2 + %p3 = getelementptr i8, i8* %p, i64 3 + store i8 3, i8* %p3 + ret void +; CHECK-LABEL: almost_consecutive_stores +; CHECK-DAG: movb $0, (%rdi) +; CHECK-DAG: movb $1, 42(%rdi) +; CHECK-DAG: movw $770, 2(%rdi) +; CHECK: retq +} diff --git a/test/CodeGen/X86/avx-logic.ll b/test/CodeGen/X86/avx-logic.ll index e9e7d5aea2737..89abbabee27ce 100644 --- a/test/CodeGen/X86/avx-logic.ll +++ b/test/CodeGen/X86/avx-logic.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 define <4 x double> @andpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp { ; CHECK-LABEL: andpd256: @@ -271,3 +271,35 @@ entry: ret <2 x i64> %x } +define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind { +; AVX-LABEL: and_xor_splat1_v4i32: +; AVX: # BB#0: +; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq +; +; AVX512-LABEL: and_xor_splat1_v4i32: +; AVX512: # BB#0: +; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 +; AVX512-NEXT: vandnps %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq + %xor = xor <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> + %and = and <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1> + ret <4 x i32> %and +} + +define <4 x i64> @and_xor_splat1_v4i64(<4 x i64> %x) nounwind { +; AVX-LABEL: and_xor_splat1_v4i64: +; AVX: # BB#0: +; AVX-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0 +; AVX-NEXT: retq +; +; AVX512-LABEL: and_xor_splat1_v4i64: +; AVX512: # BB#0: +; AVX512-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1 +; AVX512-NEXT: vandnps %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: retq + %xor = xor <4 x i64> %x, <i64 1, i64 1, i64 1, i64 1> + %and = and <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1> + ret <4 x i64> %and +} + diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll index 796ee83b6fa79..b31b00e54e83a 100644 --- a/test/CodeGen/X86/avx512-ext.ll +++ b/test/CodeGen/X86/avx512-ext.ll @@ -542,7 +542,7 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind re ; KNL: ## BB#0: ; KNL-NEXT: vpslld $31, %xmm0, %xmm0 ; KNL-NEXT: vpsrad $31, %xmm0, %xmm0 -; KNL-NEXT: vpmovsxdq %xmm0, %ymm0 +; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; KNL-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero ; KNL-NEXT: vpand %ymm1, %ymm0, %ymm0 ; KNL-NEXT: retq @@ -923,7 +923,7 @@ define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind ; KNL: ## BB#0: ; KNL-NEXT: vpslld $31, %xmm0, %xmm0 ; KNL-NEXT: vpsrad $31, %xmm0, %xmm0 -; KNL-NEXT: vpmovsxdq %xmm0, %ymm0 +; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; KNL-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; KNL-NEXT: vpand %ymm1, %ymm0, %ymm0 ; KNL-NEXT: retq @@ -1110,7 +1110,7 @@ define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind ; KNL: ## BB#0: ; KNL-NEXT: vpslld $31, %xmm0, %xmm0 ; KNL-NEXT: vpsrad $31, %xmm0, %xmm0 -; KNL-NEXT: vpmovsxdq %xmm0, %ymm0 +; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; KNL-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; KNL-NEXT: vpand %ymm1, %ymm0, %ymm0 ; KNL-NEXT: retq @@ -1173,7 +1173,7 @@ define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind ; KNL: ## BB#0: ; KNL-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL-NEXT: vpsrad $31, %xmm1, %xmm1 -; KNL-NEXT: vpmovsxdq %xmm1, %ymm1 +; KNL-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; KNL-NEXT: vpand %ymm0, %ymm1, %ymm0 ; KNL-NEXT: retq diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll index aec1339d653da..7103efe050a49 100644 --- a/test/CodeGen/X86/avx512-mask-op.ll +++ b/test/CodeGen/X86/avx512-mask-op.ll @@ -1430,7 +1430,8 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) { define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) { ; KNL-LABEL: store_v2i1: ; KNL: ## BB#0: -; KNL-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; KNL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; KNL-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpsllq $63, %zmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax @@ -1447,7 +1448,8 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) { ; ; AVX512BW-LABEL: store_v2i1: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax @@ -1457,7 +1459,8 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) { ; ; AVX512DQ-LABEL: store_v2i1: ; AVX512DQ: ## BB#0: -; AVX512DQ-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; AVX512DQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512DQ-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512DQ-NEXT: kmovb %k0, (%rdi) @@ -1471,7 +1474,7 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) { define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) { ; KNL-LABEL: store_v4i1: ; KNL: ## BB#0: -; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; KNL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpslld $31, %ymm0, %ymm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 @@ -1489,7 +1492,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) { ; ; AVX512BW-LABEL: store_v4i1: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX512BW-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0 @@ -1500,7 +1503,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) { ; ; AVX512DQ-LABEL: store_v4i1: ; AVX512DQ: ## BB#0: -; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX512DQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX512DQ-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0 diff --git a/test/CodeGen/X86/bswap_tree.ll b/test/CodeGen/X86/bswap_tree.ll new file mode 100644 index 0000000000000..35a28af855796 --- /dev/null +++ b/test/CodeGen/X86/bswap_tree.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=CHECK64 + +; Check reconstructing bswap from shifted masks and tree of ORs + +; Match a 32-bit packed halfword bswap. That is +; ((x & 0x000000ff) << 8) | +; ((x & 0x0000ff00) >> 8) | +; ((x & 0x00ff0000) << 8) | +; ((x & 0xff000000) >> 8) +; => (rotl (bswap x), 16) +define i32 @test1(i32 %x) nounwind { +; CHECK-LABEL: test1: +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: andl $16711680, %edx # imm = 0xFF0000 +; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: andl $-16777216, %eax # imm = 0xFF000000 +; CHECK-NEXT: shll $8, %edx +; CHECK-NEXT: shrl $8, %eax +; CHECK-NEXT: bswapl %ecx +; CHECK-NEXT: shrl $16, %ecx +; CHECK-NEXT: orl %edx, %eax +; CHECK-NEXT: orl %ecx, %eax +; CHECK-NEXT: retl +; +; CHECK64-LABEL: test1: +; CHECK64: # BB#0: +; CHECK64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> +; CHECK64-NEXT: movl %edi, %eax +; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000 +; CHECK64-NEXT: movl %edi, %ecx +; CHECK64-NEXT: andl $-16777216, %ecx # imm = 0xFF000000 +; CHECK64-NEXT: shll $8, %eax +; CHECK64-NEXT: shrl $8, %ecx +; CHECK64-NEXT: bswapl %edi +; CHECK64-NEXT: shrl $16, %edi +; CHECK64-NEXT: orl %eax, %ecx +; CHECK64-NEXT: leal (%rcx,%rdi), %eax +; CHECK64-NEXT: retq + %byte0 = and i32 %x, 255 ; 0x000000ff + %byte1 = and i32 %x, 65280 ; 0x0000ff00 + %byte2 = and i32 %x, 16711680 ; 0x00ff0000 + %byte3 = and i32 %x, 4278190080 ; 0xff000000 + %tmp0 = shl i32 %byte0, 8 + %tmp1 = lshr i32 %byte1, 8 + %tmp2 = shl i32 %byte2, 8 + %tmp3 = lshr i32 %byte3, 8 + %or0 = or i32 %tmp0, %tmp1 + %or1 = or i32 %tmp2, %tmp3 + %result = or i32 %or0, %or1 + ret i32 %result +} + +; the same as test1, just shifts before the "and" +; ((x << 8) & 0x0000ff00) | +; ((x >> 8) & 0x000000ff) | +; ((x << 8) & 0xff000000) | +; ((x >> 8) & 0x00ff0000) +define i32 @test2(i32 %x) nounwind { +; CHECK-LABEL: test2: +; CHECK: # BB#0: +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %eax, %ecx +; CHECK-NEXT: shll $8, %ecx +; CHECK-NEXT: shrl $8, %eax +; CHECK-NEXT: movzwl %cx, %edx +; CHECK-NEXT: movzbl %al, %esi +; CHECK-NEXT: andl $-16777216, %ecx # imm = 0xFF000000 +; CHECK-NEXT: andl $16711680, %eax # imm = 0xFF0000 +; CHECK-NEXT: orl %edx, %esi +; CHECK-NEXT: orl %ecx, %eax +; CHECK-NEXT: orl %esi, %eax +; CHECK-NEXT: popl %esi +; CHECK-NEXT: retl +; +; CHECK64-LABEL: test2: +; CHECK64: # BB#0: +; CHECK64-NEXT: movl %edi, %ecx +; CHECK64-NEXT: shll $8, %ecx +; CHECK64-NEXT: shrl $8, %edi +; CHECK64-NEXT: movzwl %cx, %edx +; CHECK64-NEXT: movzbl %dil, %eax +; CHECK64-NEXT: andl $-16777216, %ecx # imm = 0xFF000000 +; CHECK64-NEXT: andl $16711680, %edi # imm = 0xFF0000 +; CHECK64-NEXT: orl %edx, %eax +; CHECK64-NEXT: orl %ecx, %edi +; CHECK64-NEXT: orl %edi, %eax +; CHECK64-NEXT: retq + %byte1 = shl i32 %x, 8 + %byte0 = lshr i32 %x, 8 + %byte3 = shl i32 %x, 8 + %byte2 = lshr i32 %x, 8 + %tmp1 = and i32 %byte1, 65280 ; 0x0000ff00 + %tmp0 = and i32 %byte0, 255 ; 0x000000ff + %tmp3 = and i32 %byte3, 4278190080 ; 0xff000000 + %tmp2 = and i32 %byte2, 16711680 ; 0x00ff0000 + %or0 = or i32 %tmp0, %tmp1 + %or1 = or i32 %tmp2, %tmp3 + %result = or i32 %or0, %or1 + ret i32 %result +} diff --git a/test/CodeGen/X86/bswap_tree2.ll b/test/CodeGen/X86/bswap_tree2.ll new file mode 100644 index 0000000000000..a9c74df9d0d91 --- /dev/null +++ b/test/CodeGen/X86/bswap_tree2.ll @@ -0,0 +1,150 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=CHECK64 + +; Check a few invalid patterns for halfword bswap pattern matching + +; Don't match a near-miss 32-bit packed halfword bswap +; (with only half of the swap tree valid). + define i32 @test1(i32 %x) nounwind { +; CHECK-LABEL: test1: +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: andl $16711680, %edx # imm = 0xFF0000 +; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: orl $-16777216, %eax # imm = 0xFF000000 +; CHECK-NEXT: shll $8, %edx +; CHECK-NEXT: shrl $8, %eax +; CHECK-NEXT: bswapl %ecx +; CHECK-NEXT: shrl $16, %ecx +; CHECK-NEXT: orl %edx, %eax +; CHECK-NEXT: orl %ecx, %eax +; CHECK-NEXT: retl +; +; CHECK64-LABEL: test1: +; CHECK64: # BB#0: +; CHECK64-NEXT: movl %edi, %ecx +; CHECK64-NEXT: andl $16711680, %ecx # imm = 0xFF0000 +; CHECK64-NEXT: movl %edi, %eax +; CHECK64-NEXT: orl $-16777216, %eax # imm = 0xFF000000 +; CHECK64-NEXT: shll $8, %ecx +; CHECK64-NEXT: shrl $8, %eax +; CHECK64-NEXT: bswapl %edi +; CHECK64-NEXT: shrl $16, %edi +; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: orl %edi, %eax +; CHECK64-NEXT: retq + %byte0 = and i32 %x, 255 ; 0x000000ff + %byte1 = and i32 %x, 65280 ; 0x0000ff00 + %byte2 = and i32 %x, 16711680 ; 0x00ff0000 + %byte3 = or i32 %x, 4278190080 ; 0xff000000 + %tmp0 = shl i32 %byte0, 8 + %tmp1 = lshr i32 %byte1, 8 + %tmp2 = shl i32 %byte2, 8 + %tmp3 = lshr i32 %byte3, 8 + %or0 = or i32 %tmp0, %tmp1 + %or1 = or i32 %tmp2, %tmp3 + %result = or i32 %or0, %or1 + ret i32 %result +} + +; Don't match a near-miss 32-bit packed halfword bswap +; (with swapped lshr/shl) +; ((x >> 8) & 0x0000ff00) | +; ((x << 8) & 0x000000ff) | +; ((x << 8) & 0xff000000) | +; ((x >> 8) & 0x00ff0000) +define i32 @test2(i32 %x) nounwind { +; CHECK-LABEL: test2: +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: shrl $8, %eax +; CHECK-NEXT: shll $8, %ecx +; CHECK-NEXT: movl %eax, %edx +; CHECK-NEXT: andl $65280, %edx # imm = 0xFF00 +; CHECK-NEXT: andl $-16777216, %ecx # imm = 0xFF000000 +; CHECK-NEXT: andl $16711680, %eax # imm = 0xFF0000 +; CHECK-NEXT: orl %ecx, %eax +; CHECK-NEXT: orl %edx, %eax +; CHECK-NEXT: retl +; +; CHECK64-LABEL: test2: +; CHECK64: # BB#0: +; CHECK64-NEXT: movl %edi, %eax +; CHECK64-NEXT: shrl $8, %eax +; CHECK64-NEXT: shll $8, %edi +; CHECK64-NEXT: movl %eax, %ecx +; CHECK64-NEXT: andl $65280, %ecx # imm = 0xFF00 +; CHECK64-NEXT: andl $-16777216, %edi # imm = 0xFF000000 +; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000 +; CHECK64-NEXT: orl %edi, %eax +; CHECK64-NEXT: leal (%rax,%rcx), %eax +; CHECK64-NEXT: retq + %byte1 = lshr i32 %x, 8 + %byte0 = shl i32 %x, 8 + %byte3 = shl i32 %x, 8 + %byte2 = lshr i32 %x, 8 + %tmp1 = and i32 %byte1, 65280 ; 0x0000ff00 + %tmp0 = and i32 %byte0, 255 ; 0x000000ff + %tmp3 = and i32 %byte3, 4278190080 ; 0xff000000 + %tmp2 = and i32 %byte2, 16711680 ; 0x00ff0000 + %or0 = or i32 %tmp0, %tmp1 + %or1 = or i32 %tmp2, %tmp3 + %result = or i32 %or0, %or1 + ret i32 %result +} + +; Invalid pattern involving a unary op +define i32 @test3(float %x) nounwind { +; CHECK-LABEL: test3: +; CHECK: # BB#0: +; CHECK-NEXT: subl $8, %esp +; CHECK-NEXT: flds {{[0-9]+}}(%esp) +; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp) +; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movw $3199, {{[0-9]+}}(%esp) # imm = 0xC7F +; CHECK-NEXT: fldcw {{[0-9]+}}(%esp) +; CHECK-NEXT: movw %ax, {{[0-9]+}}(%esp) +; CHECK-NEXT: fistpl {{[0-9]+}}(%esp) +; CHECK-NEXT: fldcw {{[0-9]+}}(%esp) +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: shll $8, %edx +; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: shrl $8, %eax +; CHECK-NEXT: andl $65280, %ecx # imm = 0xFF00 +; CHECK-NEXT: andl $-16777216, %edx # imm = 0xFF000000 +; CHECK-NEXT: andl $16711680, %eax # imm = 0xFF0000 +; CHECK-NEXT: orl %edx, %eax +; CHECK-NEXT: orl %ecx, %eax +; CHECK-NEXT: addl $8, %esp +; CHECK-NEXT: retl +; +; CHECK64-LABEL: test3: +; CHECK64: # BB#0: +; CHECK64-NEXT: cvttss2si %xmm0, %ecx +; CHECK64-NEXT: movl %ecx, %edx +; CHECK64-NEXT: shll $8, %edx +; CHECK64-NEXT: movl %ecx, %eax +; CHECK64-NEXT: shrl $8, %eax +; CHECK64-NEXT: andl $65280, %ecx # imm = 0xFF00 +; CHECK64-NEXT: andl $-16777216, %edx # imm = 0xFF000000 +; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000 +; CHECK64-NEXT: orl %edx, %eax +; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: retq + %integer = fptosi float %x to i32 + %byte0 = shl i32 %integer, 8 + %byte3 = shl i32 %integer, 8 + %byte2 = lshr i32 %integer, 8 + %tmp1 = and i32 %integer, 65280 ; 0x0000ff00 + %tmp0 = and i32 %byte0, 255 ; 0x000000ff + %tmp3 = and i32 %byte3, 4278190080 ; 0xff000000 + %tmp2 = and i32 %byte2, 16711680 ; 0x00ff0000 + %or0 = or i32 %tmp0, %tmp1 + %or1 = or i32 %tmp2, %tmp3 + %result = or i32 %or0, %or1 + ret i32 %result +} diff --git a/test/CodeGen/X86/combine-or.ll b/test/CodeGen/X86/combine-or.ll index e4cf296432ba9..d7f52d2479885 100644 --- a/test/CodeGen/X86/combine-or.ll +++ b/test/CodeGen/X86/combine-or.ll @@ -430,6 +430,7 @@ define <4 x i32> @test2f(<4 x i32> %a, <4 x i32> %b) { ret <4 x i32> %or } +; TODO: Why would we do this? ; (or (and X, c1), c2) -> (and (or X, c2), c1|c2) define <2 x i64> @or_and_v2i64(<2 x i64> %a0) { @@ -438,16 +439,17 @@ define <2 x i64> @or_and_v2i64(<2 x i64> %a0) { ; CHECK-NEXT: andps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: orps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq - %1 = and <2 x i64> %a0, <i64 1, i64 1> + %1 = and <2 x i64> %a0, <i64 7, i64 7> %2 = or <2 x i64> %1, <i64 3, i64 3> ret <2 x i64> %2 } +; If all masked bits are going to be set, that's a constant fold. + define <4 x i32> @or_and_v4i32(<4 x i32> %a0) { ; CHECK-LABEL: or_and_v4i32: ; CHECK: # BB#0: -; CHECK-NEXT: andps {{.*}}(%rip), %xmm0 -; CHECK-NEXT: orps {{.*}}(%rip), %xmm0 +; CHECK-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3] ; CHECK-NEXT: retq %1 = and <4 x i32> %a0, <i32 1, i32 1, i32 1, i32 1> %2 = or <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3> @@ -459,9 +461,7 @@ define <4 x i32> @or_and_v4i32(<4 x i32> %a0) { define <2 x i64> @or_zext_v2i32(<2 x i32> %a0) { ; CHECK-LABEL: or_zext_v2i32: ; CHECK: # BB#0: -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] -; CHECK-NEXT: por {{.*}}(%rip), %xmm0 +; CHECK-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295] ; CHECK-NEXT: retq %1 = zext <2 x i32> %a0 to <2 x i64> %2 = or <2 x i64> %1, <i64 4294967295, i64 4294967295> @@ -471,9 +471,7 @@ define <2 x i64> @or_zext_v2i32(<2 x i32> %a0) { define <4 x i32> @or_zext_v4i16(<4 x i16> %a0) { ; CHECK-LABEL: or_zext_v4i16: ; CHECK: # BB#0: -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] -; CHECK-NEXT: por {{.*}}(%rip), %xmm0 +; CHECK-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,65535,65535] ; CHECK-NEXT: retq %1 = zext <4 x i16> %a0 to <4 x i32> %2 = or <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535> diff --git a/test/CodeGen/X86/dbg-baseptr.ll b/test/CodeGen/X86/dbg-baseptr.ll new file mode 100644 index 0000000000000..f69c78af73677 --- /dev/null +++ b/test/CodeGen/X86/dbg-baseptr.ll @@ -0,0 +1,75 @@ +; RUN: llc -o - %s | FileCheck %s +; This test checks that parameters on the stack pointer are correctly +; referenced by debug info. +target triple = "x86_64--" + +@glob = external global i64 +@ptr = external global i32* +%struct.s = type { i32, i32, i32, i32, i32 } + +; CHECK-LABEL: f0: +; CHECK: DEBUG_VALUE: f:input <- [%RSP+8] +define i32 @f0(%struct.s* byval align 8 %input) !dbg !8 { + call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18 + ret i32 42 +} + +; CHECK-LABEL: f1: +; CHECK: DEBUG_VALUE: f:input <- [%RBP+16] +define i32 @f1(%struct.s* byval align 8 %input) !dbg !8 { + %val = load i64, i64* @glob + ; this alloca should force FP usage. + %stackspace = alloca i32, i64 %val, align 1 + store i32* %stackspace, i32** @ptr + call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18 + ret i32 42 +} + +; CHECK-LABEL: f2: +; Just check that we are indeed aligning the stack and setting up a base pointer +; in RBX. +; CHECK: pushq %rbp +; CHECK: movq %rsp, %rbp +; CHECK: pushq %rbx +; CHECK: andq $-64, %rsp +; CHECK: subq $64, %rsp +; CHECK: movq %rsp, %rbx +; The parameter should still be referenced through RBP though. +; CHECK-NOT: DEBUG_VALUE: f:input <- [%RBX +; CHECK: DEBUG_VALUE: f:input <- [%RBP+16] +define i32 @f2(%struct.s* byval align 8 %input) !dbg !8 { + %val = load i64, i64* @glob + %stackspace = alloca i32, i64 %val, align 64 + store i32* %stackspace, i32** @ptr + call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18 + ret i32 42 +} + +declare void @llvm.dbg.declare(metadata, metadata, metadata) + +!llvm.dbg.cu = !{!2} +!llvm.module.flags = !{!0, !1} + +!0 = !{i32 2, !"Dwarf Version", i32 4} +!1 = !{i32 2, !"Debug Info Version", i32 3} +!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3) +!3 = !DIFile(filename: "dbg-baseptr.ll", directory: "/") +!4 = !DILocalVariable(name: "input", arg: 1, scope: !8, file: !3, line: 5, type: !9) +!5 = !{} + +!6 = !DISubroutineType(types: !7) +!7 = !{!10, !9} + +!8 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5) + +!9 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s", elements: !11) +!10 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned) +!11 = !{!12, !13, !14, !15, !16} +!12 = !DIDerivedType(tag: DW_TAG_member, name: "a", baseType: !10, size: 32) +!13 = !DIDerivedType(tag: DW_TAG_member, name: "b", baseType: !10, size: 32, offset: 32) +!14 = !DIDerivedType(tag: DW_TAG_member, name: "c", baseType: !10, size: 32, offset: 64) +!15 = !DIDerivedType(tag: DW_TAG_member, name: "d", baseType: !10, size: 32, offset: 96) +!16 = !DIDerivedType(tag: DW_TAG_member, name: "e", baseType: !10, size: 32, offset: 128) + +!17 = !DIExpression() +!18 = !DILocation(line: 5, scope: !8) diff --git a/test/CodeGen/X86/extract-store.ll b/test/CodeGen/X86/extract-store.ll index 1751f03731d3a..5286a1b635d1a 100644 --- a/test/CodeGen/X86/extract-store.ll +++ b/test/CodeGen/X86/extract-store.ll @@ -5,6 +5,10 @@ ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE41-X64 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 --check-prefix=AVX-X32 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=AVX-X64 +; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx -enable-legalize-types-checking \ +; RUN: | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE-F128 +; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx -enable-legalize-types-checking \ +; RUN: | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE-F128 define void @extract_i8_0(i8* nocapture %dst, <16 x i8> %foo) nounwind { ; SSE2-X32-LABEL: extract_i8_0: @@ -458,6 +462,26 @@ define void @extract_f64_1(double* nocapture %dst, <2 x double> %foo) nounwind { ret void } +define void @extract_f128_0(fp128* nocapture %dst, <2 x fp128> %foo) nounwind { +; SSE-F128-LABEL: extract_f128_0: +; SSE-F128: # BB#0: +; SSE-F128-NEXT: movaps %xmm0, (%rdi) +; SSE-F128-NEXT: retq + %vecext = extractelement <2 x fp128> %foo, i32 0 + store fp128 %vecext, fp128* %dst, align 1 + ret void +} + +define void @extract_f128_1(fp128* nocapture %dst, <2 x fp128> %foo) nounwind { +; SSE-F128-LABEL: extract_f128_1: +; SSE-F128: # BB#0: +; SSE-F128-NEXT: movaps %xmm1, (%rdi) +; SSE-F128-NEXT: retq + %vecext = extractelement <2 x fp128> %foo, i32 1 + store fp128 %vecext, fp128* %dst, align 1 + ret void +} + define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) nounwind { ; X32-LABEL: extract_i8_undef: ; X32: # BB#0: @@ -535,3 +559,16 @@ define void @extract_f64_undef(double* nocapture %dst, <2 x double> %foo) nounwi store double %vecext, double* %dst, align 1 ret void } + +define void @extract_f128_undef(fp128* nocapture %dst, <2 x fp128> %foo) nounwind { +; X32-LABEL: extract_f128_undef: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: extract_f128_undef: +; X64: # BB#0: +; X64-NEXT: retq + %vecext = extractelement <2 x fp128> %foo, i32 2 ; undef + store fp128 %vecext, fp128* %dst, align 1 + ret void +} diff --git a/test/CodeGen/X86/fp128-extract.ll b/test/CodeGen/X86/fp128-extract.ll new file mode 100644 index 0000000000000..5006ac898c717 --- /dev/null +++ b/test/CodeGen/X86/fp128-extract.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \ +; RUN: -enable-legalize-types-checking | FileCheck %s +; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx \ +; RUN: -enable-legalize-types-checking | FileCheck %s + +; Test the softened result of extractelement op code. +define fp128 @TestExtract(<2 x double> %x) { +entry: + ; Simplified instruction pattern from the output of llvm before r289042, + ; for a boost function ...::insert<...>::traverse<...>(). + %a = fpext <2 x double> %x to <2 x fp128> + %0 = extractelement <2 x fp128> %a, i32 0 + %1 = extractelement <2 x fp128> %a, i32 1 + %2 = fmul fp128 %0, %1 + ret fp128 %2 +; CHECK-LABEL: TestExtract: +; CHECK: movaps %xmm0, (%rsp) +; CHECK: callq __extenddftf2 +; CHECK: callq __extenddftf2 +; CHECK: callq __multf3 +; CHECK: retq +} diff --git a/test/CodeGen/X86/i64-to-float.ll b/test/CodeGen/X86/i64-to-float.ll index da92bdb55d7c6..3da1a360e2904 100644 --- a/test/CodeGen/X86/i64-to-float.ll +++ b/test/CodeGen/X86/i64-to-float.ll @@ -224,36 +224,32 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind { ; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0] ; X64-SSE-NEXT: movdqa %xmm0, %xmm2 ; X64-SSE-NEXT: pxor %xmm1, %xmm2 -; X64-SSE-NEXT: movdqa {{.*#+}} xmm3 = [18446744073709551361,18446744073709551361] -; X64-SSE-NEXT: movdqa %xmm1, %xmm4 -; X64-SSE-NEXT: pxor %xmm3, %xmm4 -; X64-SSE-NEXT: movdqa %xmm4, %xmm5 -; X64-SSE-NEXT: pcmpgtd %xmm2, %xmm5 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] -; X64-SSE-NEXT: pcmpeqd %xmm2, %xmm4 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3] -; X64-SSE-NEXT: pand %xmm6, %xmm2 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] -; X64-SSE-NEXT: por %xmm2, %xmm4 -; X64-SSE-NEXT: movdqa %xmm4, %xmm2 +; X64-SSE-NEXT: movdqa {{.*#+}} xmm3 = [18446744071562067713,18446744071562067713] +; X64-SSE-NEXT: movdqa %xmm3, %xmm4 +; X64-SSE-NEXT: pcmpgtd %xmm2, %xmm4 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; X64-SSE-NEXT: pcmpeqd %xmm3, %xmm2 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; X64-SSE-NEXT: pand %xmm5, %xmm2 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] +; X64-SSE-NEXT: por %xmm2, %xmm3 +; X64-SSE-NEXT: movdqa %xmm3, %xmm2 ; X64-SSE-NEXT: pandn %xmm0, %xmm2 -; X64-SSE-NEXT: pand %xmm3, %xmm4 -; X64-SSE-NEXT: por %xmm2, %xmm4 -; X64-SSE-NEXT: movdqa %xmm4, %xmm0 -; X64-SSE-NEXT: pxor %xmm1, %xmm0 -; X64-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255] -; X64-SSE-NEXT: pxor %xmm2, %xmm1 -; X64-SSE-NEXT: movdqa %xmm0, %xmm3 -; X64-SSE-NEXT: pcmpgtd %xmm1, %xmm3 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2] +; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm3 +; X64-SSE-NEXT: por %xmm2, %xmm3 +; X64-SSE-NEXT: pxor %xmm3, %xmm1 +; X64-SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483903,2147483903] +; X64-SSE-NEXT: movdqa %xmm1, %xmm2 +; X64-SSE-NEXT: pcmpgtd %xmm0, %xmm2 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2] ; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm1 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] -; X64-SSE-NEXT: pand %xmm5, %xmm0 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3] +; X64-SSE-NEXT: pand %xmm4, %xmm0 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; X64-SSE-NEXT: por %xmm0, %xmm1 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0 -; X64-SSE-NEXT: pandn %xmm4, %xmm0 -; X64-SSE-NEXT: pand %xmm2, %xmm1 +; X64-SSE-NEXT: pandn %xmm3, %xmm0 +; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; X64-SSE-NEXT: por %xmm0, %xmm1 ; X64-SSE-NEXT: movd %xmm1, %rax ; X64-SSE-NEXT: xorps %xmm0, %xmm0 diff --git a/test/CodeGen/X86/known-signbits-vector.ll b/test/CodeGen/X86/known-signbits-vector.ll index cea9ac26edbc5..4c3c8bbd793e5 100644 --- a/test/CodeGen/X86/known-signbits-vector.ll +++ b/test/CodeGen/X86/known-signbits-vector.ll @@ -100,21 +100,27 @@ define float @signbits_ashr_extract_sitofp(<2 x i64> %a0) nounwind { define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwind { ; X32-LABEL: signbits_ashr_insert_ashr_extract_sitofp: ; X32: # BB#0: -; X32-NEXT: pushl %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: pushl %ebp +; X32-NEXT: movl %esp, %ebp +; X32-NEXT: andl $-8, %esp +; X32-NEXT: subl $16, %esp +; X32-NEXT: movl 8(%ebp), %eax +; X32-NEXT: movl 12(%ebp), %ecx ; X32-NEXT: shrdl $30, %ecx, %eax ; X32-NEXT: sarl $30, %ecx ; X32-NEXT: vmovd %eax, %xmm0 ; X32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 -; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 -; X32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; X32-NEXT: vpinsrd $2, 16(%ebp), %xmm0, %xmm0 +; X32-NEXT: vpinsrd $3, 20(%ebp), %xmm0, %xmm0 +; X32-NEXT: vpsrad $3, %xmm0, %xmm1 ; X32-NEXT: vpsrlq $3, %xmm0, %xmm0 -; X32-NEXT: vmovd %xmm0, %eax -; X32-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0 -; X32-NEXT: vmovss %xmm0, (%esp) -; X32-NEXT: flds (%esp) -; X32-NEXT: popl %eax +; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) +; X32-NEXT: fildll {{[0-9]+}}(%esp) +; X32-NEXT: fstps {{[0-9]+}}(%esp) +; X32-NEXT: flds {{[0-9]+}}(%esp) +; X32-NEXT: movl %ebp, %esp +; X32-NEXT: popl %ebp ; X32-NEXT: retl ; ; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp: @@ -127,7 +133,7 @@ define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwin ; X64-NEXT: vpsrlq $3, %xmm0, %xmm0 ; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0 +; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 ; X64-NEXT: retq %1 = ashr i64 %a0, 30 %2 = insertelement <2 x i64> undef, i64 %1, i32 0 diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll index fdc5ace8d9bcf..d332b2f3169f0 100644 --- a/test/CodeGen/X86/madd.ll +++ b/test/CodeGen/X86/madd.ll @@ -1,27 +1,86 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512 -;SSE2-label: @_Z10test_shortPsS_i -;SSE2: movdqu -;SSE2-NEXT: movdqu -;SSE2-NEXT: pmaddwd -;SSE2-NEXT: paddd - -;AVX2-label: @_Z10test_shortPsS_i -;AVX2: vmovdqu -;AVX2-NEXT: vpmaddwd -;AVX2-NEXT: vinserti128 -;AVX2-NEXT: vpaddd - -;AVX512-label: @_Z10test_shortPsS_i -;AVX512: vmovdqu -;AVX512-NEXT: vpmaddwd -;AVX512-NEXT: vinserti128 -;AVX512-NEXT: vpaddd - define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 { +; SSE2-LABEL: _Z10test_shortPsS_i: +; SSE2: # BB#0: # %entry +; SSE2-NEXT: movl %edx, %eax +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: .p2align 4, 0x90 +; SSE2-NEXT: .LBB0_1: # %vector.body +; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 +; SSE2-NEXT: movdqu (%rdi), %xmm2 +; SSE2-NEXT: movdqu (%rsi), %xmm3 +; SSE2-NEXT: pmaddwd %xmm2, %xmm3 +; SSE2-NEXT: paddd %xmm3, %xmm1 +; SSE2-NEXT: addq $16, %rsi +; SSE2-NEXT: addq $16, %rdi +; SSE2-NEXT: addq $-8, %rax +; SSE2-NEXT: jne .LBB0_1 +; SSE2-NEXT: # BB#2: # %middle.block +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: paddd %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: retq +; +; AVX2-LABEL: _Z10test_shortPsS_i: +; AVX2: # BB#0: # %entry +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: .p2align 4, 0x90 +; AVX2-NEXT: .LBB0_1: # %vector.body +; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX2-NEXT: vmovdqu (%rsi), %xmm2 +; AVX2-NEXT: vpmaddwd (%rdi), %xmm2, %xmm2 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm2 +; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: addq $16, %rsi +; AVX2-NEXT: addq $16, %rdi +; AVX2-NEXT: addq $-8, %rax +; AVX2-NEXT: jne .LBB0_1 +; AVX2-NEXT: # BB#2: # %middle.block +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: _Z10test_shortPsS_i: +; AVX512: # BB#0: # %entry +; AVX512-NEXT: movl %edx, %eax +; AVX512-NEXT: vpxor %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512-NEXT: .p2align 4, 0x90 +; AVX512-NEXT: .LBB0_1: # %vector.body +; AVX512-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX512-NEXT: vmovdqu (%rsi), %xmm2 +; AVX512-NEXT: vpmaddwd (%rdi), %xmm2, %xmm2 +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm2 +; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: addq $16, %rsi +; AVX512-NEXT: addq $16, %rdi +; AVX512-NEXT: addq $-8, %rax +; AVX512-NEXT: jne .LBB0_1 +; AVX512-NEXT: # BB#2: # %middle.block +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body @@ -54,20 +113,227 @@ middle.block: ret i32 %13 } -;AVX2-label: @_Z9test_charPcS_i -;AVX2: vpmovsxbw -;AVX2-NEXT: vpmovsxbw -;AVX2-NEXT: vpmaddwd -;AVX2-NEXT: vpaddd +define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 { +; SSE2-LABEL: test_unsigned_short: +; SSE2: # BB#0: # %entry +; SSE2-NEXT: movl %edx, %eax +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: .p2align 4, 0x90 +; SSE2-NEXT: .LBB1_1: # %vector.body +; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 +; SSE2-NEXT: movdqu (%rdi), %xmm2 +; SSE2-NEXT: movdqu (%rsi), %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pmulhuw %xmm2, %xmm4 +; SSE2-NEXT: pmullw %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] +; SSE2-NEXT: paddd %xmm3, %xmm1 +; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: addq $16, %rsi +; SSE2-NEXT: addq $16, %rdi +; SSE2-NEXT: addq $-8, %rax +; SSE2-NEXT: jne .LBB1_1 +; SSE2-NEXT: # BB#2: # %middle.block +; SSE2-NEXT: paddd %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; SSE2-NEXT: paddd %xmm1, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: retq +; +; AVX2-LABEL: test_unsigned_short: +; AVX2: # BB#0: # %entry +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: .p2align 4, 0x90 +; AVX2-NEXT: .LBB1_1: # %vector.body +; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: addq $16, %rsi +; AVX2-NEXT: addq $16, %rdi +; AVX2-NEXT: addq $-8, %rax +; AVX2-NEXT: jne .LBB1_1 +; AVX2-NEXT: # BB#2: # %middle.block +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_unsigned_short: +; AVX512: # BB#0: # %entry +; AVX512-NEXT: movl %edx, %eax +; AVX512-NEXT: vpxor %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: .p2align 4, 0x90 +; AVX512-NEXT: .LBB1_1: # %vector.body +; AVX512-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX512-NEXT: vpmulld %ymm1, %ymm2, %ymm1 +; AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: addq $16, %rsi +; AVX512-NEXT: addq $16, %rdi +; AVX512-NEXT: addq $-8, %rax +; AVX512-NEXT: jne .LBB1_1 +; AVX512-NEXT: # BB#2: # %middle.block +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq +entry: + %3 = zext i32 %2 to i64 + br label %vector.body -;AVX512-label: @_Z9test_charPcS_i -;AVX512: vpmovsxbw -;AVX512-NEXT: vpmovsxbw -;AVX512-NEXT: vpmaddwd -;AVX512-NEXT: vinserti64x4 -;AVX512-NEXT: vpaddd +vector.body: + %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ] + %vec.phi = phi <8 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ] + %4 = getelementptr inbounds i16, i16* %0, i64 %index + %5 = bitcast i16* %4 to <8 x i16>* + %wide.load = load <8 x i16>, <8 x i16>* %5, align 2 + %6 = zext <8 x i16> %wide.load to <8 x i32> + %7 = getelementptr inbounds i16, i16* %1, i64 %index + %8 = bitcast i16* %7 to <8 x i16>* + %wide.load14 = load <8 x i16>, <8 x i16>* %8, align 2 + %9 = zext <8 x i16> %wide.load14 to <8 x i32> + %10 = mul nsw <8 x i32> %9, %6 + %11 = add nsw <8 x i32> %10, %vec.phi + %index.next = add i64 %index, 8 + %12 = icmp eq i64 %index.next, %3 + br i1 %12, label %middle.block, label %vector.body + +middle.block: + %rdx.shuf = shufflevector <8 x i32> %11, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef> + %bin.rdx = add <8 x i32> %11, %rdx.shuf + %rdx.shuf15 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> + %bin.rdx16 = add <8 x i32> %bin.rdx, %rdx.shuf15 + %rdx.shuf17 = shufflevector <8 x i32> %bin.rdx16, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> + %bin.rdx18 = add <8 x i32> %bin.rdx16, %rdx.shuf17 + %13 = extractelement <8 x i32> %bin.rdx18, i32 0 + ret i32 %13 +} define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 { +; SSE2-LABEL: _Z9test_charPcS_i: +; SSE2: # BB#0: # %entry +; SSE2-NEXT: movl %edx, %eax +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: .p2align 4, 0x90 +; SSE2-NEXT: .LBB2_1: # %vector.body +; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 +; SSE2-NEXT: movq {{.*#+}} xmm4 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm4 +; SSE2-NEXT: movq {{.*#+}} xmm5 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm5 +; SSE2-NEXT: pmullw %xmm4, %xmm5 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] +; SSE2-NEXT: psrad $16, %xmm4 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $16, %xmm5 +; SSE2-NEXT: movq {{.*#+}} xmm6 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm6 +; SSE2-NEXT: movq {{.*#+}} xmm7 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm7 +; SSE2-NEXT: pmullw %xmm6, %xmm7 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] +; SSE2-NEXT: psrad $16, %xmm6 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $16, %xmm7 +; SSE2-NEXT: paddd %xmm7, %xmm2 +; SSE2-NEXT: paddd %xmm6, %xmm3 +; SSE2-NEXT: paddd %xmm5, %xmm1 +; SSE2-NEXT: paddd %xmm4, %xmm0 +; SSE2-NEXT: addq $16, %rsi +; SSE2-NEXT: addq $16, %rdi +; SSE2-NEXT: addq $-16, %rax +; SSE2-NEXT: jne .LBB2_1 +; SSE2-NEXT: # BB#2: # %middle.block +; SSE2-NEXT: paddd %xmm3, %xmm0 +; SSE2-NEXT: paddd %xmm2, %xmm1 +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: paddd %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: retq +; +; AVX2-LABEL: _Z9test_charPcS_i: +; AVX2: # BB#0: # %entry +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: .p2align 4, 0x90 +; AVX2-NEXT: .LBB2_1: # %vector.body +; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX2-NEXT: vpmovsxbw (%rdi), %ymm2 +; AVX2-NEXT: vpmovsxbw (%rsi), %ymm3 +; AVX2-NEXT: vpmaddwd %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: addq $16, %rsi +; AVX2-NEXT: addq $16, %rdi +; AVX2-NEXT: addq $-16, %rax +; AVX2-NEXT: jne .LBB2_1 +; AVX2-NEXT: # BB#2: # %middle.block +; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: _Z9test_charPcS_i: +; AVX512: # BB#0: # %entry +; AVX512-NEXT: movl %edx, %eax +; AVX512-NEXT: vpxord %zmm0, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX512-NEXT: .p2align 4, 0x90 +; AVX512-NEXT: .LBB2_1: # %vector.body +; AVX512-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX512-NEXT: vpmovsxbw (%rdi), %ymm2 +; AVX512-NEXT: vpmovsxbw (%rsi), %ymm3 +; AVX512-NEXT: vpmaddwd %ymm2, %ymm3, %ymm2 +; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2 +; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0 +; AVX512-NEXT: addq $16, %rsi +; AVX512-NEXT: addq $16, %rdi +; AVX512-NEXT: addq $-16, %rax +; AVX512-NEXT: jne .LBB2_1 +; AVX512-NEXT: # BB#2: # %middle.block +; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm0[4,5,6,7,0,1,0,1] +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm0[2,3,0,1,0,1,0,1] +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15] +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body diff --git a/test/CodeGen/X86/merge_store.ll b/test/CodeGen/X86/merge_store.ll index 31c1f65824260..dcb7bd010e56b 100644 --- a/test/CodeGen/X86/merge_store.ll +++ b/test/CodeGen/X86/merge_store.ll @@ -28,3 +28,34 @@ entry: for.end: ret void } + + + +;; CHECK-LABEL: indexed-store-merge + +;; We should be able to merge the 4 consecutive stores. +;; FIXMECHECK: movl $0, 2(%rsi,%rdi) + +;; CHECK: movb $0, 2(%rsi,%rdi) +;; CHECK: movb $0, 3(%rsi,%rdi) +;; CHECK: movb $0, 4(%rsi,%rdi) +;; CHECK: movb $0, 5(%rsi,%rdi) +;; CHECK: movb $0, (%rsi) +define void @indexed-store-merge(i64 %p, i8* %v) { +entry: + %p2 = add nsw i64 %p, 2 + %v2 = getelementptr i8, i8* %v, i64 %p2 + store i8 0, i8* %v2, align 2 + %p3 = add nsw i64 %p, 3 + %v3 = getelementptr i8, i8* %v, i64 %p3 + store i8 0, i8* %v3, align 1 + %p4 = add nsw i64 %p, 4 + %v4 = getelementptr i8, i8* %v, i64 %p4 + store i8 0, i8* %v4, align 2 + %p5 = add nsw i64 %p, 5 + %v5 = getelementptr i8, i8* %v, i64 %p5 + store i8 0, i8* %v5, align 1 + %v0 = getelementptr i8, i8* %v, i64 0 + store i8 0, i8* %v0, align 2 + ret void +} diff --git a/test/CodeGen/X86/sse-schedule.ll b/test/CodeGen/X86/sse-schedule.ll new file mode 100644 index 0000000000000..52e6b61aedfe8 --- /dev/null +++ b/test/CodeGen/X86/sse-schedule.ll @@ -0,0 +1,2415 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=slm | FileCheck %s --check-prefix=CHECK --check-prefix=SLM +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=sandybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=ivybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2 + +define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_addps: +; GENERIC: # BB#0: +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: addps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_addps: +; ATOM: # BB#0: +; ATOM-NEXT: addps %xmm1, %xmm0 +; ATOM-NEXT: addps (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_addps: +; SLM: # BB#0: +; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: addps (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_addps: +; SANDY: # BB#0: +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_addps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_addps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fadd <4 x float> %a0, %a1 + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = fadd <4 x float> %1, %2 + ret <4 x float> %3 +} + +define float @test_addss(float %a0, float %a1, float *%a2) { +; GENERIC-LABEL: test_addss: +; GENERIC: # BB#0: +; GENERIC-NEXT: addss %xmm1, %xmm0 +; GENERIC-NEXT: addss (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_addss: +; ATOM: # BB#0: +; ATOM-NEXT: addss %xmm1, %xmm0 +; ATOM-NEXT: addss (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_addss: +; SLM: # BB#0: +; SLM-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: addss (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_addss: +; SANDY: # BB#0: +; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_addss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_addss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fadd float %a0, %a1 + %2 = load float, float *%a2, align 4 + %3 = fadd float %1, %2 + ret float %3 +} + +define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_andps: +; GENERIC: # BB#0: +; GENERIC-NEXT: andps %xmm1, %xmm0 +; GENERIC-NEXT: andps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_andps: +; ATOM: # BB#0: +; ATOM-NEXT: andps %xmm1, %xmm0 +; ATOM-NEXT: andps (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_andps: +; SLM: # BB#0: +; SLM-NEXT: andps %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: andps (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_andps: +; SANDY: # BB#0: +; SANDY-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_andps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_andps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <4 x float> %a0 to <4 x i32> + %2 = bitcast <4 x float> %a1 to <4 x i32> + %3 = and <4 x i32> %1, %2 + %4 = load <4 x float>, <4 x float> *%a2, align 16 + %5 = bitcast <4 x float> %4 to <4 x i32> + %6 = and <4 x i32> %3, %5 + %7 = bitcast <4 x i32> %6 to <4 x float> + ret <4 x float> %7 +} + +define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_andnotps: +; GENERIC: # BB#0: +; GENERIC-NEXT: andnps %xmm1, %xmm0 +; GENERIC-NEXT: andnps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_andnotps: +; ATOM: # BB#0: +; ATOM-NEXT: andnps %xmm1, %xmm0 +; ATOM-NEXT: andnps (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_andnotps: +; SLM: # BB#0: +; SLM-NEXT: andnps %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: andnps (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_andnotps: +; SANDY: # BB#0: +; SANDY-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_andnotps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_andnotps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <4 x float> %a0 to <4 x i32> + %2 = bitcast <4 x float> %a1 to <4 x i32> + %3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1> + %4 = and <4 x i32> %3, %2 + %5 = load <4 x float>, <4 x float> *%a2, align 16 + %6 = bitcast <4 x float> %5 to <4 x i32> + %7 = xor <4 x i32> %4, <i32 -1, i32 -1, i32 -1, i32 -1> + %8 = and <4 x i32> %6, %7 + %9 = bitcast <4 x i32> %8 to <4 x float> + ret <4 x float> %9 +} + +define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_cmpps: +; GENERIC: # BB#0: +; GENERIC-NEXT: cmpeqps %xmm0, %xmm1 +; GENERIC-NEXT: cmpeqps (%rdi), %xmm0 +; GENERIC-NEXT: orps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cmpps: +; ATOM: # BB#0: +; ATOM-NEXT: cmpeqps %xmm0, %xmm1 +; ATOM-NEXT: cmpeqps (%rdi), %xmm0 +; ATOM-NEXT: orps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cmpps: +; SLM: # BB#0: +; SLM-NEXT: cmpeqps %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: cmpeqps (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: orps %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cmpps: +; SANDY: # BB#0: +; SANDY-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00] +; SANDY-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cmpps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00] +; HASWELL-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cmpps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00] +; BTVER2-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fcmp oeq <4 x float> %a0, %a1 + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = fcmp oeq <4 x float> %a0, %2 + %4 = or <4 x i1> %1, %3 + %5 = sext <4 x i1> %4 to <4 x i32> + %6 = bitcast <4 x i32> %5 to <4 x float> + ret <4 x float> %6 +} + +define float @test_cmpss(float %a0, float %a1, float *%a2) { +; GENERIC-LABEL: test_cmpss: +; GENERIC: # BB#0: +; GENERIC-NEXT: cmpeqss %xmm1, %xmm0 +; GENERIC-NEXT: cmpeqss (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cmpss: +; ATOM: # BB#0: +; ATOM-NEXT: cmpeqss %xmm1, %xmm0 +; ATOM-NEXT: cmpeqss (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cmpss: +; SLM: # BB#0: +; SLM-NEXT: cmpeqss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: cmpeqss (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cmpss: +; SANDY: # BB#0: +; SANDY-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cmpss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cmpss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <4 x float> undef, float %a0, i32 0 + %2 = insertelement <4 x float> undef, float %a1, i32 0 + %3 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %1, <4 x float> %2, i8 0) + %4 = load float, float *%a2, align 4 + %5 = insertelement <4 x float> undef, float %4, i32 0 + %6 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %3, <4 x float> %5, i8 0) + %7 = extractelement <4 x float> %6, i32 0 + ret float %7 +} +declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind readnone + +define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_comiss: +; GENERIC: # BB#0: +; GENERIC-NEXT: comiss %xmm1, %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %cl +; GENERIC-NEXT: andb %al, %cl +; GENERIC-NEXT: comiss (%rdi), %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %dl +; GENERIC-NEXT: andb %al, %dl +; GENERIC-NEXT: orb %cl, %dl +; GENERIC-NEXT: movzbl %dl, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_comiss: +; ATOM: # BB#0: +; ATOM-NEXT: comiss %xmm1, %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %cl +; ATOM-NEXT: andb %al, %cl +; ATOM-NEXT: comiss (%rdi), %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %dl +; ATOM-NEXT: andb %al, %dl +; ATOM-NEXT: orb %cl, %dl +; ATOM-NEXT: movzbl %dl, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_comiss: +; SLM: # BB#0: +; SLM-NEXT: comiss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %cl # sched: [1:0.50] +; SLM-NEXT: andb %al, %cl # sched: [1:0.50] +; SLM-NEXT: comiss (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %dl # sched: [1:0.50] +; SLM-NEXT: andb %al, %dl # sched: [1:0.50] +; SLM-NEXT: orb %cl, %dl # sched: [1:0.50] +; SLM-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_comiss: +; SANDY: # BB#0: +; SANDY-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %cl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %cl # sched: [1:0.33] +; SANDY-NEXT: vcomiss (%rdi), %xmm0 # sched: [7:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %dl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %dl # sched: [1:0.33] +; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33] +; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_comiss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %cl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %cl # sched: [1:0.25] +; HASWELL-NEXT: vcomiss (%rdi), %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %dl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %dl # sched: [1:0.25] +; HASWELL-NEXT: orb %cl, %dl # sched: [1:0.25] +; HASWELL-NEXT: movzbl %dl, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_comiss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %cl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %cl # sched: [1:0.50] +; BTVER2-NEXT: vcomiss (%rdi), %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %dl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %dl # sched: [1:0.50] +; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50] +; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1) + %2 = load <4 x float>, <4 x float> *%a2, align 4 + %3 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %2) + %4 = or i32 %1, %3 + ret i32 %4 +} +declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone + +define float @test_cvtsi2ss(i32 %a0, i32 *%a1) { +; GENERIC-LABEL: test_cvtsi2ss: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtsi2ssl %edi, %xmm1 +; GENERIC-NEXT: cvtsi2ssl (%rsi), %xmm0 +; GENERIC-NEXT: addss %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtsi2ss: +; ATOM: # BB#0: +; ATOM-NEXT: cvtsi2ssl (%rsi), %xmm0 +; ATOM-NEXT: cvtsi2ssl %edi, %xmm1 +; ATOM-NEXT: addss %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtsi2ss: +; SLM: # BB#0: +; SLM-NEXT: cvtsi2ssl (%rsi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: cvtsi2ssl %edi, %xmm1 # sched: [4:0.50] +; SLM-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtsi2ss: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [4:1.00] +; SANDY-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtsi2ss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtsi2ss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sitofp i32 %a0 to float + %2 = load i32, i32 *%a1, align 4 + %3 = sitofp i32 %2 to float + %4 = fadd float %1, %3 + ret float %4 +} + +define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) { +; GENERIC-LABEL: test_cvtsi2ssq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtsi2ssq %rdi, %xmm1 +; GENERIC-NEXT: cvtsi2ssq (%rsi), %xmm0 +; GENERIC-NEXT: addss %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtsi2ssq: +; ATOM: # BB#0: +; ATOM-NEXT: cvtsi2ssq (%rsi), %xmm0 +; ATOM-NEXT: cvtsi2ssq %rdi, %xmm1 +; ATOM-NEXT: addss %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtsi2ssq: +; SLM: # BB#0: +; SLM-NEXT: cvtsi2ssq (%rsi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: cvtsi2ssq %rdi, %xmm1 # sched: [4:0.50] +; SLM-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtsi2ssq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [4:1.00] +; SANDY-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtsi2ssq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtsi2ssq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sitofp i64 %a0 to float + %2 = load i64, i64 *%a1, align 8 + %3 = sitofp i64 %2 to float + %4 = fadd float %1, %3 + ret float %4 +} + +define i32 @test_cvtss2si(float %a0, float *%a1) { +; GENERIC-LABEL: test_cvtss2si: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtss2si %xmm0, %ecx +; GENERIC-NEXT: cvtss2si (%rdi), %eax +; GENERIC-NEXT: addl %ecx, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtss2si: +; ATOM: # BB#0: +; ATOM-NEXT: cvtss2si (%rdi), %eax +; ATOM-NEXT: cvtss2si %xmm0, %ecx +; ATOM-NEXT: addl %ecx, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtss2si: +; SLM: # BB#0: +; SLM-NEXT: cvtss2si (%rdi), %eax # sched: [7:1.00] +; SLM-NEXT: cvtss2si %xmm0, %ecx # sched: [4:0.50] +; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtss2si: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtss2si %xmm0, %ecx # sched: [3:1.00] +; SANDY-NEXT: vcvtss2si (%rdi), %eax # sched: [7:1.00] +; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtss2si: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtss2si %xmm0, %ecx # sched: [4:1.00] +; HASWELL-NEXT: vcvtss2si (%rdi), %eax # sched: [8:1.00] +; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtss2si: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtss2si (%rdi), %eax # sched: [8:1.00] +; BTVER2-NEXT: vcvtss2si %xmm0, %ecx # sched: [3:1.00] +; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <4 x float> undef, float %a0, i32 0 + %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %1) + %3 = load float, float *%a1, align 4 + %4 = insertelement <4 x float> undef, float %3, i32 0 + %5 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %4) + %6 = add i32 %2, %5 + ret i32 %6 +} +declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone + +define i64 @test_cvtss2siq(float %a0, float *%a1) { +; GENERIC-LABEL: test_cvtss2siq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtss2si %xmm0, %rcx +; GENERIC-NEXT: cvtss2si (%rdi), %rax +; GENERIC-NEXT: addq %rcx, %rax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtss2siq: +; ATOM: # BB#0: +; ATOM-NEXT: cvtss2si (%rdi), %rax +; ATOM-NEXT: cvtss2si %xmm0, %rcx +; ATOM-NEXT: addq %rcx, %rax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtss2siq: +; SLM: # BB#0: +; SLM-NEXT: cvtss2si (%rdi), %rax # sched: [7:1.00] +; SLM-NEXT: cvtss2si %xmm0, %rcx # sched: [4:0.50] +; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtss2siq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtss2si %xmm0, %rcx # sched: [3:1.00] +; SANDY-NEXT: vcvtss2si (%rdi), %rax # sched: [7:1.00] +; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtss2siq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtss2si %xmm0, %rcx # sched: [4:1.00] +; HASWELL-NEXT: vcvtss2si (%rdi), %rax # sched: [8:1.00] +; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtss2siq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtss2si (%rdi), %rax # sched: [8:1.00] +; BTVER2-NEXT: vcvtss2si %xmm0, %rcx # sched: [3:1.00] +; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <4 x float> undef, float %a0, i32 0 + %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %1) + %3 = load float, float *%a1, align 4 + %4 = insertelement <4 x float> undef, float %3, i32 0 + %5 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %4) + %6 = add i64 %2, %5 + ret i64 %6 +} +declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone + +define i32 @test_cvttss2si(float %a0, float *%a1) { +; GENERIC-LABEL: test_cvttss2si: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvttss2si %xmm0, %ecx +; GENERIC-NEXT: cvttss2si (%rdi), %eax +; GENERIC-NEXT: addl %ecx, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvttss2si: +; ATOM: # BB#0: +; ATOM-NEXT: cvttss2si (%rdi), %eax +; ATOM-NEXT: cvttss2si %xmm0, %ecx +; ATOM-NEXT: addl %ecx, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvttss2si: +; SLM: # BB#0: +; SLM-NEXT: cvttss2si (%rdi), %eax # sched: [7:1.00] +; SLM-NEXT: cvttss2si %xmm0, %ecx # sched: [4:0.50] +; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvttss2si: +; SANDY: # BB#0: +; SANDY-NEXT: vcvttss2si %xmm0, %ecx # sched: [3:1.00] +; SANDY-NEXT: vcvttss2si (%rdi), %eax # sched: [7:1.00] +; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvttss2si: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvttss2si %xmm0, %ecx # sched: [4:1.00] +; HASWELL-NEXT: vcvttss2si (%rdi), %eax # sched: [8:1.00] +; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvttss2si: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvttss2si (%rdi), %eax # sched: [8:1.00] +; BTVER2-NEXT: vcvttss2si %xmm0, %ecx # sched: [3:1.00] +; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fptosi float %a0 to i32 + %2 = load float, float *%a1, align 4 + %3 = fptosi float %2 to i32 + %4 = add i32 %1, %3 + ret i32 %4 +} + +define i64 @test_cvttss2siq(float %a0, float *%a1) { +; GENERIC-LABEL: test_cvttss2siq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvttss2si %xmm0, %rcx +; GENERIC-NEXT: cvttss2si (%rdi), %rax +; GENERIC-NEXT: addq %rcx, %rax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvttss2siq: +; ATOM: # BB#0: +; ATOM-NEXT: cvttss2si (%rdi), %rax +; ATOM-NEXT: cvttss2si %xmm0, %rcx +; ATOM-NEXT: addq %rcx, %rax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvttss2siq: +; SLM: # BB#0: +; SLM-NEXT: cvttss2si (%rdi), %rax # sched: [7:1.00] +; SLM-NEXT: cvttss2si %xmm0, %rcx # sched: [4:0.50] +; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvttss2siq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvttss2si %xmm0, %rcx # sched: [3:1.00] +; SANDY-NEXT: vcvttss2si (%rdi), %rax # sched: [7:1.00] +; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvttss2siq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvttss2si %xmm0, %rcx # sched: [4:1.00] +; HASWELL-NEXT: vcvttss2si (%rdi), %rax # sched: [8:1.00] +; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvttss2siq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvttss2si (%rdi), %rax # sched: [8:1.00] +; BTVER2-NEXT: vcvttss2si %xmm0, %rcx # sched: [3:1.00] +; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fptosi float %a0 to i64 + %2 = load float, float *%a1, align 4 + %3 = fptosi float %2 to i64 + %4 = add i64 %1, %3 + ret i64 %4 +} + +define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_divps: +; GENERIC: # BB#0: +; GENERIC-NEXT: divps %xmm1, %xmm0 +; GENERIC-NEXT: divps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_divps: +; ATOM: # BB#0: +; ATOM-NEXT: divps %xmm1, %xmm0 +; ATOM-NEXT: divps (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_divps: +; SLM: # BB#0: +; SLM-NEXT: divps %xmm1, %xmm0 # sched: [34:34.00] +; SLM-NEXT: divps (%rdi), %xmm0 # sched: [37:34.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_divps: +; SANDY: # BB#0: +; SANDY-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; SANDY-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_divps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; HASWELL-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_divps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [19:19.00] +; BTVER2-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [24:19.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fdiv <4 x float> %a0, %a1 + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = fdiv <4 x float> %1, %2 + ret <4 x float> %3 +} + +define float @test_divss(float %a0, float %a1, float *%a2) { +; GENERIC-LABEL: test_divss: +; GENERIC: # BB#0: +; GENERIC-NEXT: divss %xmm1, %xmm0 +; GENERIC-NEXT: divss (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_divss: +; ATOM: # BB#0: +; ATOM-NEXT: divss %xmm1, %xmm0 +; ATOM-NEXT: divss (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_divss: +; SLM: # BB#0: +; SLM-NEXT: divss %xmm1, %xmm0 # sched: [34:34.00] +; SLM-NEXT: divss (%rdi), %xmm0 # sched: [37:34.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_divss: +; SANDY: # BB#0: +; SANDY-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; SANDY-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_divss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; HASWELL-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_divss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [19:19.00] +; BTVER2-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [24:19.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fdiv float %a0, %a1 + %2 = load float, float *%a2, align 4 + %3 = fdiv float %1, %2 + ret float %3 +} + +define void @test_ldmxcsr(i32 %a0) { +; GENERIC-LABEL: test_ldmxcsr: +; GENERIC: # BB#0: +; GENERIC-NEXT: movl %edi, -{{[0-9]+}}(%rsp) +; GENERIC-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_ldmxcsr: +; ATOM: # BB#0: +; ATOM-NEXT: movl %edi, -{{[0-9]+}}(%rsp) +; ATOM-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_ldmxcsr: +; SLM: # BB#0: +; SLM-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00] +; SLM-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_ldmxcsr: +; SANDY: # BB#0: +; SANDY-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00] +; SANDY-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [4:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_ldmxcsr: +; HASWELL: # BB#0: +; HASWELL-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00] +; HASWELL-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [6:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_ldmxcsr: +; BTVER2: # BB#0: +; BTVER2-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00] +; BTVER2-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = alloca i32, align 4 + %2 = bitcast i32* %1 to i8* + store i32 %a0, i32* %1 + call void @llvm.x86.sse.ldmxcsr(i8* %2) + ret void +} +declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind readnone + +define <4 x float> @test_maxps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_maxps: +; GENERIC: # BB#0: +; GENERIC-NEXT: maxps %xmm1, %xmm0 +; GENERIC-NEXT: maxps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_maxps: +; ATOM: # BB#0: +; ATOM-NEXT: maxps %xmm1, %xmm0 +; ATOM-NEXT: maxps (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_maxps: +; SLM: # BB#0: +; SLM-NEXT: maxps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: maxps (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_maxps: +; SANDY: # BB#0: +; SANDY-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_maxps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_maxps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %2) + ret <4 x float> %3 +} +declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_maxss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_maxss: +; GENERIC: # BB#0: +; GENERIC-NEXT: maxss %xmm1, %xmm0 +; GENERIC-NEXT: maxss (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_maxss: +; ATOM: # BB#0: +; ATOM-NEXT: maxss %xmm1, %xmm0 +; ATOM-NEXT: maxss (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_maxss: +; SLM: # BB#0: +; SLM-NEXT: maxss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: maxss (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_maxss: +; SANDY: # BB#0: +; SANDY-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_maxss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_maxss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %2) + ret <4 x float> %3 +} +declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_minps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_minps: +; GENERIC: # BB#0: +; GENERIC-NEXT: minps %xmm1, %xmm0 +; GENERIC-NEXT: minps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_minps: +; ATOM: # BB#0: +; ATOM-NEXT: minps %xmm1, %xmm0 +; ATOM-NEXT: minps (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_minps: +; SLM: # BB#0: +; SLM-NEXT: minps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: minps (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_minps: +; SANDY: # BB#0: +; SANDY-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_minps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_minps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %1, <4 x float> %2) + ret <4 x float> %3 +} +declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_minss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_minss: +; GENERIC: # BB#0: +; GENERIC-NEXT: minss %xmm1, %xmm0 +; GENERIC-NEXT: minss (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_minss: +; ATOM: # BB#0: +; ATOM-NEXT: minss %xmm1, %xmm0 +; ATOM-NEXT: minss (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_minss: +; SLM: # BB#0: +; SLM-NEXT: minss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: minss (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_minss: +; SANDY: # BB#0: +; SANDY-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_minss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_minss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %2) + ret <4 x float> %3 +} +declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone + +define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_movaps: +; GENERIC: # BB#0: +; GENERIC-NEXT: movaps (%rdi), %xmm0 +; GENERIC-NEXT: addps %xmm0, %xmm0 +; GENERIC-NEXT: movaps %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movaps: +; ATOM: # BB#0: +; ATOM-NEXT: movaps (%rdi), %xmm0 +; ATOM-NEXT: addps %xmm0, %xmm0 +; ATOM-NEXT: movaps %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movaps: +; SLM: # BB#0: +; SLM-NEXT: movaps (%rdi), %xmm0 # sched: [3:1.00] +; SLM-NEXT: addps %xmm0, %xmm0 # sched: [3:1.00] +; SLM-NEXT: movaps %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movaps: +; SANDY: # BB#0: +; SANDY-NEXT: vmovaps (%rdi), %xmm0 # sched: [4:0.50] +; SANDY-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movaps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovaps (%rdi), %xmm0 # sched: [4:0.50] +; HASWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movaps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovaps (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load <4 x float>, <4 x float> *%a0, align 16 + %2 = fadd <4 x float> %1, %1 + store <4 x float> %2, <4 x float> *%a1, align 16 + ret void +} + +; TODO (v)movhlps + +define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) { +; GENERIC-LABEL: test_movhlps: +; GENERIC: # BB#0: +; GENERIC-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movhlps: +; ATOM: # BB#0: +; ATOM-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movhlps: +; SLM: # BB#0: +; SLM-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movhlps: +; SANDY: # BB#0: +; SANDY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movhlps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movhlps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3> + ret <4 x float> %1 +} + +; TODO (v)movhps + +define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) { +; GENERIC-LABEL: test_movhps: +; GENERIC: # BB#0: +; GENERIC-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; GENERIC-NEXT: addps %xmm0, %xmm1 +; GENERIC-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; GENERIC-NEXT: movlps %xmm1, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movhps: +; ATOM: # BB#0: +; ATOM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; ATOM-NEXT: addps %xmm0, %xmm1 +; ATOM-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] +; ATOM-NEXT: movlps %xmm1, (%rdi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movhps: +; SLM: # BB#0: +; SLM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00] +; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: pextrq $1, %xmm1, (%rdi) # sched: [4:2.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movhps: +; SANDY: # BB#0: +; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [5:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movhps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movhps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast x86_mmx* %a2 to <2 x float>* + %2 = load <2 x float>, <2 x float> *%1, align 8 + %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %4 = shufflevector <4 x float> %a1, <4 x float> %3, <4 x i32> <i32 0, i32 1, i32 4, i32 5> + %5 = fadd <4 x float> %a0, %4 + %6 = shufflevector <4 x float> %5, <4 x float> undef, <2 x i32> <i32 2, i32 3> + store <2 x float> %6, <2 x float>* %1 + ret void +} + +; TODO (v)movlhps + +define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) { +; GENERIC-LABEL: test_movlhps: +; GENERIC: # BB#0: +; GENERIC-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movlhps: +; ATOM: # BB#0: +; ATOM-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; ATOM-NEXT: addps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movlhps: +; SLM: # BB#0: +; SLM-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movlhps: +; SANDY: # BB#0: +; SANDY-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movlhps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; HASWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movlhps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5> + %2 = fadd <4 x float> %a1, %1 + ret <4 x float> %2 +} + +define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) { +; GENERIC-LABEL: test_movlps: +; GENERIC: # BB#0: +; GENERIC-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; GENERIC-NEXT: addps %xmm0, %xmm1 +; GENERIC-NEXT: movlps %xmm1, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movlps: +; ATOM: # BB#0: +; ATOM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; ATOM-NEXT: addps %xmm0, %xmm1 +; ATOM-NEXT: movlps %xmm1, (%rdi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movlps: +; SLM: # BB#0: +; SLM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [4:1.00] +; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movlps %xmm1, (%rdi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movlps: +; SANDY: # BB#0: +; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [5:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movlps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [5:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movlps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast x86_mmx* %a2 to <2 x float>* + %2 = load <2 x float>, <2 x float> *%1, align 8 + %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %4 = shufflevector <4 x float> %a1, <4 x float> %3, <4 x i32> <i32 4, i32 5, i32 2, i32 3> + %5 = fadd <4 x float> %a0, %4 + %6 = shufflevector <4 x float> %5, <4 x float> undef, <2 x i32> <i32 0, i32 1> + store <2 x float> %6, <2 x float>* %1 + ret void +} + +define i32 @test_movmskps(<4 x float> %a0) { +; GENERIC-LABEL: test_movmskps: +; GENERIC: # BB#0: +; GENERIC-NEXT: movmskps %xmm0, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movmskps: +; ATOM: # BB#0: +; ATOM-NEXT: movmskps %xmm0, %eax +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movmskps: +; SLM: # BB#0: +; SLM-NEXT: movmskps %xmm0, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movmskps: +; SANDY: # BB#0: +; SANDY-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movmskps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovmskps %xmm0, %eax # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movmskps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) + ret i32 %1 +} +declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone + +define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_movntps: +; GENERIC: # BB#0: +; GENERIC-NEXT: movntps %xmm0, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movntps: +; ATOM: # BB#0: +; ATOM-NEXT: movntps %xmm0, (%rdi) +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movntps: +; SLM: # BB#0: +; SLM-NEXT: movntps %xmm0, (%rdi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movntps: +; SANDY: # BB#0: +; SANDY-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movntps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movntps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0 + ret void +} + +define void @test_movss_mem(float* %a0, float* %a1) { +; GENERIC-LABEL: test_movss_mem: +; GENERIC: # BB#0: +; GENERIC-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; GENERIC-NEXT: addss %xmm0, %xmm0 +; GENERIC-NEXT: movss %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movss_mem: +; ATOM: # BB#0: +; ATOM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; ATOM-NEXT: addss %xmm0, %xmm0 +; ATOM-NEXT: movss %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movss_mem: +; SLM: # BB#0: +; SLM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [3:1.00] +; SLM-NEXT: addss %xmm0, %xmm0 # sched: [3:1.00] +; SLM-NEXT: movss %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movss_mem: +; SANDY: # BB#0: +; SANDY-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [4:0.50] +; SANDY-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movss_mem: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [4:0.50] +; HASWELL-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movss_mem: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:1.00] +; BTVER2-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load float, float* %a0, align 1 + %2 = fadd float %1, %1 + store float %2, float *%a1, align 1 + ret void +} + +define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) { +; GENERIC-LABEL: test_movss_reg: +; GENERIC: # BB#0: +; GENERIC-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movss_reg: +; ATOM: # BB#0: +; ATOM-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movss_reg: +; SLM: # BB#0: +; SLM-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movss_reg: +; SANDY: # BB#0: +; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movss_reg: +; HASWELL: # BB#0: +; HASWELL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movss_reg: +; BTVER2: # BB#0: +; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3> + ret <4 x float> %1 +} + +define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_movups: +; GENERIC: # BB#0: +; GENERIC-NEXT: movups (%rdi), %xmm0 +; GENERIC-NEXT: addps %xmm0, %xmm0 +; GENERIC-NEXT: movups %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movups: +; ATOM: # BB#0: +; ATOM-NEXT: movups (%rdi), %xmm0 +; ATOM-NEXT: addps %xmm0, %xmm0 +; ATOM-NEXT: movups %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movups: +; SLM: # BB#0: +; SLM-NEXT: movups (%rdi), %xmm0 # sched: [3:1.00] +; SLM-NEXT: addps %xmm0, %xmm0 # sched: [3:1.00] +; SLM-NEXT: movups %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movups: +; SANDY: # BB#0: +; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [4:0.50] +; SANDY-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movups: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovups (%rdi), %xmm0 # sched: [4:0.50] +; HASWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movups: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovups (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load <4 x float>, <4 x float> *%a0, align 1 + %2 = fadd <4 x float> %1, %1 + store <4 x float> %2, <4 x float> *%a1, align 1 + ret void +} + +define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_mulps: +; GENERIC: # BB#0: +; GENERIC-NEXT: mulps %xmm1, %xmm0 +; GENERIC-NEXT: mulps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_mulps: +; ATOM: # BB#0: +; ATOM-NEXT: mulps %xmm1, %xmm0 +; ATOM-NEXT: mulps (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_mulps: +; SLM: # BB#0: +; SLM-NEXT: mulps %xmm1, %xmm0 # sched: [5:2.00] +; SLM-NEXT: mulps (%rdi), %xmm0 # sched: [8:2.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_mulps: +; SANDY: # BB#0: +; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_mulps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [9:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_mulps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fmul <4 x float> %a0, %a1 + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = fmul <4 x float> %1, %2 + ret <4 x float> %3 +} + +define float @test_mulss(float %a0, float %a1, float *%a2) { +; GENERIC-LABEL: test_mulss: +; GENERIC: # BB#0: +; GENERIC-NEXT: mulss %xmm1, %xmm0 +; GENERIC-NEXT: mulss (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_mulss: +; ATOM: # BB#0: +; ATOM-NEXT: mulss %xmm1, %xmm0 +; ATOM-NEXT: mulss (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_mulss: +; SLM: # BB#0: +; SLM-NEXT: mulss %xmm1, %xmm0 # sched: [5:2.00] +; SLM-NEXT: mulss (%rdi), %xmm0 # sched: [8:2.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_mulss: +; SANDY: # BB#0: +; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_mulss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [9:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_mulss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fmul float %a0, %a1 + %2 = load float, float *%a2, align 4 + %3 = fmul float %1, %2 + ret float %3 +} + +define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_orps: +; GENERIC: # BB#0: +; GENERIC-NEXT: orps %xmm1, %xmm0 +; GENERIC-NEXT: orps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_orps: +; ATOM: # BB#0: +; ATOM-NEXT: orps %xmm1, %xmm0 +; ATOM-NEXT: orps (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_orps: +; SLM: # BB#0: +; SLM-NEXT: orps %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: orps (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_orps: +; SANDY: # BB#0: +; SANDY-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_orps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_orps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <4 x float> %a0 to <4 x i32> + %2 = bitcast <4 x float> %a1 to <4 x i32> + %3 = or <4 x i32> %1, %2 + %4 = load <4 x float>, <4 x float> *%a2, align 16 + %5 = bitcast <4 x float> %4 to <4 x i32> + %6 = or <4 x i32> %3, %5 + %7 = bitcast <4 x i32> %6 to <4 x float> + ret <4 x float> %7 +} + +define void @test_prefetchnta(i8* %a0) { +; GENERIC-LABEL: test_prefetchnta: +; GENERIC: # BB#0: +; GENERIC-NEXT: prefetchnta (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_prefetchnta: +; ATOM: # BB#0: +; ATOM-NEXT: prefetchnta (%rdi) +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_prefetchnta: +; SLM: # BB#0: +; SLM-NEXT: prefetchnta (%rdi) # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_prefetchnta: +; SANDY: # BB#0: +; SANDY-NEXT: prefetchnta (%rdi) # sched: [4:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_prefetchnta: +; HASWELL: # BB#0: +; HASWELL-NEXT: prefetchnta (%rdi) # sched: [4:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_prefetchnta: +; BTVER2: # BB#0: +; BTVER2-NEXT: prefetchnta (%rdi) # sched: [5:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1) + ret void +} +declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind readnone + +define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_rcpps: +; GENERIC: # BB#0: +; GENERIC-NEXT: rcpps %xmm0, %xmm1 +; GENERIC-NEXT: rcpps (%rdi), %xmm0 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_rcpps: +; ATOM: # BB#0: +; ATOM-NEXT: rcpps (%rdi), %xmm1 +; ATOM-NEXT: rcpps %xmm0, %xmm0 +; ATOM-NEXT: addps %xmm0, %xmm1 +; ATOM-NEXT: movaps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_rcpps: +; SLM: # BB#0: +; SLM-NEXT: rcpps (%rdi), %xmm1 # sched: [8:1.00] +; SLM-NEXT: rcpps %xmm0, %xmm0 # sched: [5:1.00] +; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_rcpps: +; SANDY: # BB#0: +; SANDY-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vrcpps (%rdi), %xmm1 # sched: [9:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_rcpps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vrcpps (%rdi), %xmm1 # sched: [9:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_rcpps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vrcpps (%rdi), %xmm1 # sched: [7:1.00] +; BTVER2-NEXT: vrcpps %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) + %2 = load <4 x float>, <4 x float> *%a1, align 16 + %3 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %2) + %4 = fadd <4 x float> %1, %3 + ret <4 x float> %4 +} +declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone + +; TODO - rcpss_m + +define <4 x float> @test_rcpss(float %a0, float *%a1) { +; GENERIC-LABEL: test_rcpss: +; GENERIC: # BB#0: +; GENERIC-NEXT: rcpss %xmm0, %xmm0 +; GENERIC-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; GENERIC-NEXT: rcpss %xmm1, %xmm1 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_rcpss: +; ATOM: # BB#0: +; ATOM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; ATOM-NEXT: rcpss %xmm0, %xmm0 +; ATOM-NEXT: rcpss %xmm1, %xmm1 +; ATOM-NEXT: addps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_rcpss: +; SLM: # BB#0: +; SLM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [3:1.00] +; SLM-NEXT: rcpss %xmm0, %xmm0 # sched: [8:1.00] +; SLM-NEXT: rcpss %xmm1, %xmm1 # sched: [8:1.00] +; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_rcpss: +; SANDY: # BB#0: +; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50] +; SANDY-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [9:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_rcpss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [9:1.00] +; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50] +; HASWELL-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [9:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_rcpss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00] +; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [7:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <4 x float> undef, float %a0, i32 0 + %2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %1) + %3 = load float, float *%a1, align 4 + %4 = insertelement <4 x float> undef, float %3, i32 0 + %5 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %4) + %6 = fadd <4 x float> %2, %5 + ret <4 x float> %6 +} +declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone + +define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_rsqrtps: +; GENERIC: # BB#0: +; GENERIC-NEXT: rsqrtps %xmm0, %xmm1 +; GENERIC-NEXT: rsqrtps (%rdi), %xmm0 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_rsqrtps: +; ATOM: # BB#0: +; ATOM-NEXT: rsqrtps (%rdi), %xmm1 +; ATOM-NEXT: rsqrtps %xmm0, %xmm0 +; ATOM-NEXT: addps %xmm0, %xmm1 +; ATOM-NEXT: movaps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_rsqrtps: +; SLM: # BB#0: +; SLM-NEXT: rsqrtps (%rdi), %xmm1 # sched: [8:1.00] +; SLM-NEXT: rsqrtps %xmm0, %xmm0 # sched: [5:1.00] +; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_rsqrtps: +; SANDY: # BB#0: +; SANDY-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [9:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_rsqrtps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [9:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_rsqrtps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [7:1.00] +; BTVER2-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) + %2 = load <4 x float>, <4 x float> *%a1, align 16 + %3 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %2) + %4 = fadd <4 x float> %1, %3 + ret <4 x float> %4 +} +declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone + +; TODO - rsqrtss_m + +define <4 x float> @test_rsqrtss(float %a0, float *%a1) { +; GENERIC-LABEL: test_rsqrtss: +; GENERIC: # BB#0: +; GENERIC-NEXT: rsqrtss %xmm0, %xmm0 +; GENERIC-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; GENERIC-NEXT: rsqrtss %xmm1, %xmm1 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_rsqrtss: +; ATOM: # BB#0: +; ATOM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; ATOM-NEXT: rsqrtss %xmm0, %xmm0 +; ATOM-NEXT: rsqrtss %xmm1, %xmm1 +; ATOM-NEXT: addps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_rsqrtss: +; SLM: # BB#0: +; SLM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [3:1.00] +; SLM-NEXT: rsqrtss %xmm0, %xmm0 # sched: [8:1.00] +; SLM-NEXT: rsqrtss %xmm1, %xmm1 # sched: [8:1.00] +; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_rsqrtss: +; SANDY: # BB#0: +; SANDY-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50] +; SANDY-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [9:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_rsqrtss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50] +; HASWELL-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_rsqrtss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00] +; BTVER2-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [7:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <4 x float> undef, float %a0, i32 0 + %2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %1) + %3 = load float, float *%a1, align 4 + %4 = insertelement <4 x float> undef, float %3, i32 0 + %5 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %4) + %6 = fadd <4 x float> %2, %5 + ret <4 x float> %6 +} +declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone + +define void @test_sfence() { +; GENERIC-LABEL: test_sfence: +; GENERIC: # BB#0: +; GENERIC-NEXT: sfence +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_sfence: +; ATOM: # BB#0: +; ATOM-NEXT: sfence +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_sfence: +; SLM: # BB#0: +; SLM-NEXT: sfence # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_sfence: +; SANDY: # BB#0: +; SANDY-NEXT: sfence # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_sfence: +; HASWELL: # BB#0: +; HASWELL-NEXT: sfence # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_sfence: +; BTVER2: # BB#0: +; BTVER2-NEXT: sfence # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + call void @llvm.x86.sse.sfence() + ret void +} +declare void @llvm.x86.sse.sfence() nounwind readnone + +define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) nounwind { +; GENERIC-LABEL: test_shufps: +; GENERIC: # BB#0: +; GENERIC-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] +; GENERIC-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_shufps: +; ATOM: # BB#0: +; ATOM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] +; ATOM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_shufps: +; SLM: # BB#0: +; SLM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00] +; SLM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_shufps: +; SANDY: # BB#0: +; SANDY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00] +; SANDY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [5:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_shufps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00] +; HASWELL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_shufps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50] +; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4> + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 3, i32 4, i32 4> + ret <4 x float> %3 +} + +define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_sqrtps: +; GENERIC: # BB#0: +; GENERIC-NEXT: sqrtps %xmm0, %xmm1 +; GENERIC-NEXT: sqrtps (%rdi), %xmm0 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_sqrtps: +; ATOM: # BB#0: +; ATOM-NEXT: sqrtps %xmm0, %xmm1 +; ATOM-NEXT: sqrtps (%rdi), %xmm0 +; ATOM-NEXT: addps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_sqrtps: +; SLM: # BB#0: +; SLM-NEXT: sqrtps (%rdi), %xmm1 # sched: [18:1.00] +; SLM-NEXT: sqrtps %xmm0, %xmm0 # sched: [15:1.00] +; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_sqrtps: +; SANDY: # BB#0: +; SANDY-NEXT: vsqrtps %xmm0, %xmm0 # sched: [15:1.00] +; SANDY-NEXT: vsqrtps (%rdi), %xmm1 # sched: [19:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_sqrtps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsqrtps %xmm0, %xmm0 # sched: [15:1.00] +; HASWELL-NEXT: vsqrtps (%rdi), %xmm1 # sched: [19:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_sqrtps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vsqrtps (%rdi), %xmm1 # sched: [26:21.00] +; BTVER2-NEXT: vsqrtps %xmm0, %xmm0 # sched: [21:21.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0) + %2 = load <4 x float>, <4 x float> *%a1, align 16 + %3 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %2) + %4 = fadd <4 x float> %1, %3 + ret <4 x float> %4 +} +declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone + +; TODO - sqrtss_m + +define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_sqrtss: +; GENERIC: # BB#0: +; GENERIC-NEXT: sqrtss %xmm0, %xmm0 +; GENERIC-NEXT: movaps (%rdi), %xmm1 +; GENERIC-NEXT: sqrtss %xmm1, %xmm1 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_sqrtss: +; ATOM: # BB#0: +; ATOM-NEXT: movaps (%rdi), %xmm1 +; ATOM-NEXT: sqrtss %xmm0, %xmm0 +; ATOM-NEXT: sqrtss %xmm1, %xmm1 +; ATOM-NEXT: addps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_sqrtss: +; SLM: # BB#0: +; SLM-NEXT: movaps (%rdi), %xmm1 # sched: [3:1.00] +; SLM-NEXT: sqrtss %xmm0, %xmm0 # sched: [18:1.00] +; SLM-NEXT: sqrtss %xmm1, %xmm1 # sched: [18:1.00] +; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_sqrtss: +; SANDY: # BB#0: +; SANDY-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [19:1.00] +; SANDY-NEXT: vmovaps (%rdi), %xmm1 # sched: [4:0.50] +; SANDY-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [19:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_sqrtss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [19:1.00] +; HASWELL-NEXT: vmovaps (%rdi), %xmm1 # sched: [4:0.50] +; HASWELL-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [19:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_sqrtss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovaps (%rdi), %xmm1 # sched: [5:1.00] +; BTVER2-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [26:21.00] +; BTVER2-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [26:21.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) + %2 = load <4 x float>, <4 x float> *%a1, align 16 + %3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %2) + %4 = fadd <4 x float> %1, %3 + ret <4 x float> %4 +} +declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone + +define i32 @test_stmxcsr() { +; GENERIC-LABEL: test_stmxcsr: +; GENERIC: # BB#0: +; GENERIC-NEXT: stmxcsr -{{[0-9]+}}(%rsp) +; GENERIC-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_stmxcsr: +; ATOM: # BB#0: +; ATOM-NEXT: stmxcsr -{{[0-9]+}}(%rsp) +; ATOM-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_stmxcsr: +; SLM: # BB#0: +; SLM-NEXT: stmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00] +; SLM-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_stmxcsr: +; SANDY: # BB#0: +; SANDY-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00] +; SANDY-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [4:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_stmxcsr: +; HASWELL: # BB#0: +; HASWELL-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [7:1.00] +; HASWELL-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [4:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_stmxcsr: +; BTVER2: # BB#0: +; BTVER2-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00] +; BTVER2-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = alloca i32, align 4 + %2 = bitcast i32* %1 to i8* + call void @llvm.x86.sse.stmxcsr(i8* %2) + %3 = load i32, i32* %1, align 4 + ret i32 %3 +} +declare void @llvm.x86.sse.stmxcsr(i8*) nounwind readnone + +define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_subps: +; GENERIC: # BB#0: +; GENERIC-NEXT: subps %xmm1, %xmm0 +; GENERIC-NEXT: subps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_subps: +; ATOM: # BB#0: +; ATOM-NEXT: subps %xmm1, %xmm0 +; ATOM-NEXT: subps (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_subps: +; SLM: # BB#0: +; SLM-NEXT: subps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: subps (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_subps: +; SANDY: # BB#0: +; SANDY-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_subps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_subps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fsub <4 x float> %a0, %a1 + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = fsub <4 x float> %1, %2 + ret <4 x float> %3 +} + +define float @test_subss(float %a0, float %a1, float *%a2) { +; GENERIC-LABEL: test_subss: +; GENERIC: # BB#0: +; GENERIC-NEXT: subss %xmm1, %xmm0 +; GENERIC-NEXT: subss (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_subss: +; ATOM: # BB#0: +; ATOM-NEXT: subss %xmm1, %xmm0 +; ATOM-NEXT: subss (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_subss: +; SLM: # BB#0: +; SLM-NEXT: subss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: subss (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_subss: +; SANDY: # BB#0: +; SANDY-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_subss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_subss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fsub float %a0, %a1 + %2 = load float, float *%a2, align 4 + %3 = fsub float %1, %2 + ret float %3 +} + +define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_ucomiss: +; GENERIC: # BB#0: +; GENERIC-NEXT: ucomiss %xmm1, %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %cl +; GENERIC-NEXT: andb %al, %cl +; GENERIC-NEXT: ucomiss (%rdi), %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %dl +; GENERIC-NEXT: andb %al, %dl +; GENERIC-NEXT: orb %cl, %dl +; GENERIC-NEXT: movzbl %dl, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_ucomiss: +; ATOM: # BB#0: +; ATOM-NEXT: ucomiss %xmm1, %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %cl +; ATOM-NEXT: andb %al, %cl +; ATOM-NEXT: ucomiss (%rdi), %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %dl +; ATOM-NEXT: andb %al, %dl +; ATOM-NEXT: orb %cl, %dl +; ATOM-NEXT: movzbl %dl, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_ucomiss: +; SLM: # BB#0: +; SLM-NEXT: ucomiss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %cl # sched: [1:0.50] +; SLM-NEXT: andb %al, %cl # sched: [1:0.50] +; SLM-NEXT: ucomiss (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %dl # sched: [1:0.50] +; SLM-NEXT: andb %al, %dl # sched: [1:0.50] +; SLM-NEXT: orb %cl, %dl # sched: [1:0.50] +; SLM-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_ucomiss: +; SANDY: # BB#0: +; SANDY-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %cl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %cl # sched: [1:0.33] +; SANDY-NEXT: vucomiss (%rdi), %xmm0 # sched: [7:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %dl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %dl # sched: [1:0.33] +; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33] +; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_ucomiss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %cl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %cl # sched: [1:0.25] +; HASWELL-NEXT: vucomiss (%rdi), %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %dl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %dl # sched: [1:0.25] +; HASWELL-NEXT: orb %cl, %dl # sched: [1:0.25] +; HASWELL-NEXT: movzbl %dl, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_ucomiss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %cl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %cl # sched: [1:0.50] +; BTVER2-NEXT: vucomiss (%rdi), %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %dl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %dl # sched: [1:0.50] +; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50] +; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1) + %2 = load <4 x float>, <4 x float> *%a2, align 4 + %3 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %2) + %4 = or i32 %1, %3 + ret i32 %4 +} +declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_unpckhps: +; GENERIC: # BB#0: +; GENERIC-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; GENERIC-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_unpckhps: +; ATOM: # BB#0: +; ATOM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; ATOM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_unpckhps: +; SLM: # BB#0: +; SLM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] +; SLM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_unpckhps: +; SANDY: # BB#0: +; SANDY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] +; SANDY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [5:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_unpckhps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] +; HASWELL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_unpckhps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7> + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> + ret <4 x float> %3 +} + +define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_unpcklps: +; GENERIC: # BB#0: +; GENERIC-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; GENERIC-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_unpcklps: +; ATOM: # BB#0: +; ATOM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; ATOM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_unpcklps: +; SLM: # BB#0: +; SLM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00] +; SLM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_unpcklps: +; SANDY: # BB#0: +; SANDY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00] +; SANDY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [5:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_unpcklps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00] +; HASWELL-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_unpcklps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5> + %2 = load <4 x float>, <4 x float> *%a2, align 16 + %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5> + ret <4 x float> %3 +} + +define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { +; GENERIC-LABEL: test_xorps: +; GENERIC: # BB#0: +; GENERIC-NEXT: xorps %xmm1, %xmm0 +; GENERIC-NEXT: xorps (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_xorps: +; ATOM: # BB#0: +; ATOM-NEXT: xorps %xmm1, %xmm0 +; ATOM-NEXT: xorps (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_xorps: +; SLM: # BB#0: +; SLM-NEXT: xorps %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: xorps (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_xorps: +; SANDY: # BB#0: +; SANDY-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_xorps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_xorps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <4 x float> %a0 to <4 x i32> + %2 = bitcast <4 x float> %a1 to <4 x i32> + %3 = xor <4 x i32> %1, %2 + %4 = load <4 x float>, <4 x float> *%a2, align 16 + %5 = bitcast <4 x float> %4 to <4 x i32> + %6 = xor <4 x i32> %3, %5 + %7 = bitcast <4 x i32> %6 to <4 x float> + ret <4 x float> %7 +} + +!0 = !{i32 1} diff --git a/test/CodeGen/X86/sse2-schedule.ll b/test/CodeGen/X86/sse2-schedule.ll new file mode 100644 index 0000000000000..33a4f413b6832 --- /dev/null +++ b/test/CodeGen/X86/sse2-schedule.ll @@ -0,0 +1,6039 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=slm | FileCheck %s --check-prefix=CHECK --check-prefix=SLM +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=sandybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=ivybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2 + +define <2 x double> @test_addpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_addpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: addpd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_addpd: +; ATOM: # BB#0: +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: addpd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_addpd: +; SLM: # BB#0: +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: addpd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_addpd: +; SANDY: # BB#0: +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_addpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_addpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fadd <2 x double> %a0, %a1 + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = fadd <2 x double> %1, %2 + ret <2 x double> %3 +} + +define double @test_addsd(double %a0, double %a1, double *%a2) { +; GENERIC-LABEL: test_addsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: addsd %xmm1, %xmm0 +; GENERIC-NEXT: addsd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_addsd: +; ATOM: # BB#0: +; ATOM-NEXT: addsd %xmm1, %xmm0 +; ATOM-NEXT: addsd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_addsd: +; SLM: # BB#0: +; SLM-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: addsd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_addsd: +; SANDY: # BB#0: +; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_addsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_addsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fadd double %a0, %a1 + %2 = load double, double *%a2, align 8 + %3 = fadd double %1, %2 + ret double %3 +} + +define <2 x double> @test_andpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_andpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: andpd %xmm1, %xmm0 +; GENERIC-NEXT: andpd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_andpd: +; ATOM: # BB#0: +; ATOM-NEXT: andpd %xmm1, %xmm0 +; ATOM-NEXT: andpd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_andpd: +; SLM: # BB#0: +; SLM-NEXT: andpd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: andpd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_andpd: +; SANDY: # BB#0: +; SANDY-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_andpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_andpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <2 x double> %a0 to <4 x i32> + %2 = bitcast <2 x double> %a1 to <4 x i32> + %3 = and <4 x i32> %1, %2 + %4 = load <2 x double>, <2 x double> *%a2, align 16 + %5 = bitcast <2 x double> %4 to <4 x i32> + %6 = and <4 x i32> %3, %5 + %7 = bitcast <4 x i32> %6 to <2 x double> + %8 = fadd <2 x double> %a1, %7 + ret <2 x double> %8 +} + +define <2 x double> @test_andnotpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_andnotpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: andnpd %xmm1, %xmm0 +; GENERIC-NEXT: andnpd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_andnotpd: +; ATOM: # BB#0: +; ATOM-NEXT: andnpd %xmm1, %xmm0 +; ATOM-NEXT: andnpd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_andnotpd: +; SLM: # BB#0: +; SLM-NEXT: andnpd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: andnpd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_andnotpd: +; SANDY: # BB#0: +; SANDY-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_andnotpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_andnotpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <2 x double> %a0 to <4 x i32> + %2 = bitcast <2 x double> %a1 to <4 x i32> + %3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1> + %4 = and <4 x i32> %3, %2 + %5 = load <2 x double>, <2 x double> *%a2, align 16 + %6 = bitcast <2 x double> %5 to <4 x i32> + %7 = xor <4 x i32> %4, <i32 -1, i32 -1, i32 -1, i32 -1> + %8 = and <4 x i32> %6, %7 + %9 = bitcast <4 x i32> %8 to <2 x double> + %10 = fadd <2 x double> %a1, %9 + ret <2 x double> %10 +} + +define <2 x double> @test_cmppd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_cmppd: +; GENERIC: # BB#0: +; GENERIC-NEXT: cmpeqpd %xmm0, %xmm1 +; GENERIC-NEXT: cmpeqpd (%rdi), %xmm0 +; GENERIC-NEXT: orpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cmppd: +; ATOM: # BB#0: +; ATOM-NEXT: cmpeqpd %xmm0, %xmm1 +; ATOM-NEXT: cmpeqpd (%rdi), %xmm0 +; ATOM-NEXT: orpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cmppd: +; SLM: # BB#0: +; SLM-NEXT: cmpeqpd %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: cmpeqpd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: orpd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cmppd: +; SANDY: # BB#0: +; SANDY-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00] +; SANDY-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cmppd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00] +; HASWELL-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cmppd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00] +; BTVER2-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fcmp oeq <2 x double> %a0, %a1 + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = fcmp oeq <2 x double> %a0, %2 + %4 = or <2 x i1> %1, %3 + %5 = sext <2 x i1> %4 to <2 x i64> + %6 = bitcast <2 x i64> %5 to <2 x double> + ret <2 x double> %6 +} + +define double @test_cmpsd(double %a0, double %a1, double *%a2) { +; GENERIC-LABEL: test_cmpsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: cmpeqsd %xmm1, %xmm0 +; GENERIC-NEXT: cmpeqsd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cmpsd: +; ATOM: # BB#0: +; ATOM-NEXT: cmpeqsd %xmm1, %xmm0 +; ATOM-NEXT: cmpeqsd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cmpsd: +; SLM: # BB#0: +; SLM-NEXT: cmpeqsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: cmpeqsd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cmpsd: +; SANDY: # BB#0: +; SANDY-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cmpsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cmpsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <2 x double> undef, double %a0, i32 0 + %2 = insertelement <2 x double> undef, double %a1, i32 0 + %3 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %1, <2 x double> %2, i8 0) + %4 = load double, double *%a2, align 8 + %5 = insertelement <2 x double> undef, double %4, i32 0 + %6 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %3, <2 x double> %5, i8 0) + %7 = extractelement <2 x double> %6, i32 0 + ret double %7 +} +declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone + +define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_comisd: +; GENERIC: # BB#0: +; GENERIC-NEXT: comisd %xmm1, %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %cl +; GENERIC-NEXT: andb %al, %cl +; GENERIC-NEXT: comisd (%rdi), %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %dl +; GENERIC-NEXT: andb %al, %dl +; GENERIC-NEXT: orb %cl, %dl +; GENERIC-NEXT: movzbl %dl, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_comisd: +; ATOM: # BB#0: +; ATOM-NEXT: comisd %xmm1, %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %cl +; ATOM-NEXT: andb %al, %cl +; ATOM-NEXT: comisd (%rdi), %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %dl +; ATOM-NEXT: andb %al, %dl +; ATOM-NEXT: orb %cl, %dl +; ATOM-NEXT: movzbl %dl, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_comisd: +; SLM: # BB#0: +; SLM-NEXT: comisd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %cl # sched: [1:0.50] +; SLM-NEXT: andb %al, %cl # sched: [1:0.50] +; SLM-NEXT: comisd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %dl # sched: [1:0.50] +; SLM-NEXT: andb %al, %dl # sched: [1:0.50] +; SLM-NEXT: orb %cl, %dl # sched: [1:0.50] +; SLM-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_comisd: +; SANDY: # BB#0: +; SANDY-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %cl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %cl # sched: [1:0.33] +; SANDY-NEXT: vcomisd (%rdi), %xmm0 # sched: [7:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %dl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %dl # sched: [1:0.33] +; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33] +; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_comisd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %cl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %cl # sched: [1:0.25] +; HASWELL-NEXT: vcomisd (%rdi), %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %dl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %dl # sched: [1:0.25] +; HASWELL-NEXT: orb %cl, %dl # sched: [1:0.25] +; HASWELL-NEXT: movzbl %dl, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_comisd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %cl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %cl # sched: [1:0.50] +; BTVER2-NEXT: vcomisd (%rdi), %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %dl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %dl # sched: [1:0.50] +; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50] +; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1) + %2 = load <2 x double>, <2 x double> *%a2, align 8 + %3 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %2) + %4 = or i32 %1, %3 + ret i32 %4 +} +declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) { +; GENERIC-LABEL: test_cvtdq2pd: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtdq2pd %xmm0, %xmm1 +; GENERIC-NEXT: cvtdq2pd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtdq2pd: +; ATOM: # BB#0: +; ATOM-NEXT: cvtdq2pd %xmm0, %xmm1 +; ATOM-NEXT: cvtdq2pd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtdq2pd: +; SLM: # BB#0: +; SLM-NEXT: cvtdq2pd %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvtdq2pd (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtdq2pd: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [4:1.00] +; SANDY-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [8:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtdq2pd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtdq2pd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1> + %2 = sitofp <2 x i32> %1 to <2 x double> + %3 = load <4 x i32>, <4 x i32>*%a1, align 16 + %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <2 x i32> <i32 0, i32 1> + %5 = sitofp <2 x i32> %4 to <2 x double> + %6 = fadd <2 x double> %2, %5 + ret <2 x double> %6 +} + +define <4 x float> @test_cvtdq2ps(<4 x i32> %a0, <4 x i32> *%a1) { +; GENERIC-LABEL: test_cvtdq2ps: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtdq2ps %xmm0, %xmm1 +; GENERIC-NEXT: cvtdq2ps (%rdi), %xmm0 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtdq2ps: +; ATOM: # BB#0: +; ATOM-NEXT: cvtdq2ps (%rdi), %xmm1 +; ATOM-NEXT: cvtdq2ps %xmm0, %xmm0 +; ATOM-NEXT: addps %xmm0, %xmm1 +; ATOM-NEXT: movaps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtdq2ps: +; SLM: # BB#0: +; SLM-NEXT: cvtdq2ps %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvtdq2ps (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtdq2ps: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [4:1.00] +; SANDY-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [8:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtdq2ps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtdq2ps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sitofp <4 x i32> %a0 to <4 x float> + %2 = load <4 x i32>, <4 x i32>*%a1, align 16 + %3 = sitofp <4 x i32> %2 to <4 x float> + %4 = fadd <4 x float> %1, %3 + ret <4 x float> %4 +} + +define <4 x i32> @test_cvtpd2dq(<2 x double> %a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_cvtpd2dq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtpd2dq %xmm0, %xmm1 +; GENERIC-NEXT: cvtpd2dq (%rdi), %xmm0 +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtpd2dq: +; ATOM: # BB#0: +; ATOM-NEXT: cvtpd2dq (%rdi), %xmm1 +; ATOM-NEXT: cvtpd2dq %xmm0, %xmm0 +; ATOM-NEXT: paddd %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtpd2dq: +; SLM: # BB#0: +; SLM-NEXT: cvtpd2dq %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvtpd2dq (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtpd2dq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [7:1.00] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtpd2dq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtpd2dq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0) + %2 = load <2 x double>, <2 x double> *%a1, align 16 + %3 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %2) + %4 = add <4 x i32> %1, %3 + ret <4 x i32> %4 +} +declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone + +define <4 x float> @test_cvtpd2ps(<2 x double> %a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_cvtpd2ps: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtpd2ps %xmm0, %xmm1 +; GENERIC-NEXT: cvtpd2ps (%rdi), %xmm0 +; GENERIC-NEXT: addps %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtpd2ps: +; ATOM: # BB#0: +; ATOM-NEXT: cvtpd2ps (%rdi), %xmm1 +; ATOM-NEXT: cvtpd2ps %xmm0, %xmm0 +; ATOM-NEXT: addps %xmm0, %xmm1 +; ATOM-NEXT: movaps %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtpd2ps: +; SLM: # BB#0: +; SLM-NEXT: cvtpd2ps %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvtpd2ps (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtpd2ps: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [7:1.00] +; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtpd2ps: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtpd2ps: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0) + %2 = load <2 x double>, <2 x double> *%a1, align 16 + %3 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %2) + %4 = fadd <4 x float> %1, %3 + ret <4 x float> %4 +} +declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone + +define <4 x i32> @test_cvtps2dq(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_cvtps2dq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtps2dq %xmm0, %xmm1 +; GENERIC-NEXT: cvtps2dq (%rdi), %xmm0 +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtps2dq: +; ATOM: # BB#0: +; ATOM-NEXT: cvtps2dq (%rdi), %xmm1 +; ATOM-NEXT: cvtps2dq %xmm0, %xmm0 +; ATOM-NEXT: paddd %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtps2dq: +; SLM: # BB#0: +; SLM-NEXT: cvtps2dq %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvtps2dq (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtps2dq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [7:1.00] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtps2dq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [7:1.00] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtps2dq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0) + %2 = load <4 x float>, <4 x float> *%a1, align 16 + %3 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %2) + %4 = add <4 x i32> %1, %3 + ret <4 x i32> %4 +} +declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone + +define <2 x double> @test_cvtps2pd(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_cvtps2pd: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtps2pd %xmm0, %xmm1 +; GENERIC-NEXT: cvtps2pd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtps2pd: +; ATOM: # BB#0: +; ATOM-NEXT: cvtps2pd (%rdi), %xmm1 +; ATOM-NEXT: cvtps2pd %xmm0, %xmm0 +; ATOM-NEXT: addpd %xmm0, %xmm1 +; ATOM-NEXT: movapd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtps2pd: +; SLM: # BB#0: +; SLM-NEXT: cvtps2pd %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvtps2pd (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtps2pd: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [7:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtps2pd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtps2pd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x float> %a0, <4 x float> undef, <2 x i32> <i32 0, i32 1> + %2 = fpext <2 x float> %1 to <2 x double> + %3 = load <4 x float>, <4 x float> *%a1, align 16 + %4 = shufflevector <4 x float> %3, <4 x float> undef, <2 x i32> <i32 0, i32 1> + %5 = fpext <2 x float> %4 to <2 x double> + %6 = fadd <2 x double> %2, %5 + ret <2 x double> %6 +} + +define i32 @test_cvtsd2si(double %a0, double *%a1) { +; GENERIC-LABEL: test_cvtsd2si: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtsd2si %xmm0, %ecx +; GENERIC-NEXT: cvtsd2si (%rdi), %eax +; GENERIC-NEXT: addl %ecx, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtsd2si: +; ATOM: # BB#0: +; ATOM-NEXT: cvtsd2si (%rdi), %eax +; ATOM-NEXT: cvtsd2si %xmm0, %ecx +; ATOM-NEXT: addl %ecx, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtsd2si: +; SLM: # BB#0: +; SLM-NEXT: cvtsd2si (%rdi), %eax # sched: [7:1.00] +; SLM-NEXT: cvtsd2si %xmm0, %ecx # sched: [4:0.50] +; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtsd2si: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtsd2si %xmm0, %ecx # sched: [3:1.00] +; SANDY-NEXT: vcvtsd2si (%rdi), %eax # sched: [7:1.00] +; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtsd2si: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtsd2si %xmm0, %ecx # sched: [4:1.00] +; HASWELL-NEXT: vcvtsd2si (%rdi), %eax # sched: [8:1.00] +; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtsd2si: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtsd2si (%rdi), %eax # sched: [8:1.00] +; BTVER2-NEXT: vcvtsd2si %xmm0, %ecx # sched: [3:1.00] +; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <2 x double> undef, double %a0, i32 0 + %2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %1) + %3 = load double, double *%a1, align 8 + %4 = insertelement <2 x double> undef, double %3, i32 0 + %5 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %4) + %6 = add i32 %2, %5 + ret i32 %6 +} +declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone + +define i64 @test_cvtsd2siq(double %a0, double *%a1) { +; GENERIC-LABEL: test_cvtsd2siq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtsd2si %xmm0, %rcx +; GENERIC-NEXT: cvtsd2si (%rdi), %rax +; GENERIC-NEXT: addq %rcx, %rax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtsd2siq: +; ATOM: # BB#0: +; ATOM-NEXT: cvtsd2si (%rdi), %rax +; ATOM-NEXT: cvtsd2si %xmm0, %rcx +; ATOM-NEXT: addq %rcx, %rax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtsd2siq: +; SLM: # BB#0: +; SLM-NEXT: cvtsd2si (%rdi), %rax # sched: [7:1.00] +; SLM-NEXT: cvtsd2si %xmm0, %rcx # sched: [4:0.50] +; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtsd2siq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtsd2si %xmm0, %rcx # sched: [3:1.00] +; SANDY-NEXT: vcvtsd2si (%rdi), %rax # sched: [7:1.00] +; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtsd2siq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtsd2si %xmm0, %rcx # sched: [4:1.00] +; HASWELL-NEXT: vcvtsd2si (%rdi), %rax # sched: [8:1.00] +; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtsd2siq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtsd2si (%rdi), %rax # sched: [8:1.00] +; BTVER2-NEXT: vcvtsd2si %xmm0, %rcx # sched: [3:1.00] +; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <2 x double> undef, double %a0, i32 0 + %2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %1) + %3 = load double, double *%a1, align 8 + %4 = insertelement <2 x double> undef, double %3, i32 0 + %5 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %4) + %6 = add i64 %2, %5 + ret i64 %6 +} +declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone + +define float @test_cvtsd2ss(double %a0, double *%a1) { +; GENERIC-LABEL: test_cvtsd2ss: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtsd2ss %xmm0, %xmm1 +; GENERIC-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; GENERIC-NEXT: cvtsd2ss %xmm0, %xmm0 +; GENERIC-NEXT: addss %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtsd2ss: +; ATOM: # BB#0: +; ATOM-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; ATOM-NEXT: cvtsd2ss %xmm0, %xmm2 +; ATOM-NEXT: xorps %xmm0, %xmm0 +; ATOM-NEXT: cvtsd2ss %xmm1, %xmm0 +; ATOM-NEXT: addss %xmm2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtsd2ss: +; SLM: # BB#0: +; SLM-NEXT: cvtsd2ss %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero sched: [3:1.00] +; SLM-NEXT: cvtsd2ss %xmm0, %xmm0 # sched: [4:0.50] +; SLM-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtsd2ss: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [4:0.50] +; SANDY-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [3:1.00] +; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtsd2ss: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [4:0.50] +; HASWELL-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [4:1.00] +; HASWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtsd2ss: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [5:1.00] +; BTVER2-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [3:1.00] +; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fptrunc double %a0 to float + %2 = load double, double *%a1, align 8 + %3 = fptrunc double %2 to float + %4 = fadd float %1, %3 + ret float %4 +} + +define double @test_cvtsi2sd(i32 %a0, i32 *%a1) { +; GENERIC-LABEL: test_cvtsi2sd: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtsi2sdl %edi, %xmm1 +; GENERIC-NEXT: cvtsi2sdl (%rsi), %xmm0 +; GENERIC-NEXT: addsd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtsi2sd: +; ATOM: # BB#0: +; ATOM-NEXT: cvtsi2sdl (%rsi), %xmm0 +; ATOM-NEXT: cvtsi2sdl %edi, %xmm1 +; ATOM-NEXT: addsd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtsi2sd: +; SLM: # BB#0: +; SLM-NEXT: cvtsi2sdl (%rsi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: cvtsi2sdl %edi, %xmm1 # sched: [4:0.50] +; SLM-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtsi2sd: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [4:1.00] +; SANDY-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtsi2sd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtsi2sd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sitofp i32 %a0 to double + %2 = load i32, i32 *%a1, align 8 + %3 = sitofp i32 %2 to double + %4 = fadd double %1, %3 + ret double %4 +} + +define double @test_cvtsi2sdq(i64 %a0, i64 *%a1) { +; GENERIC-LABEL: test_cvtsi2sdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtsi2sdq %rdi, %xmm1 +; GENERIC-NEXT: cvtsi2sdq (%rsi), %xmm0 +; GENERIC-NEXT: addsd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtsi2sdq: +; ATOM: # BB#0: +; ATOM-NEXT: cvtsi2sdq (%rsi), %xmm0 +; ATOM-NEXT: cvtsi2sdq %rdi, %xmm1 +; ATOM-NEXT: addsd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtsi2sdq: +; SLM: # BB#0: +; SLM-NEXT: cvtsi2sdq (%rsi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: cvtsi2sdq %rdi, %xmm1 # sched: [4:0.50] +; SLM-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtsi2sdq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [4:1.00] +; SANDY-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtsi2sdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtsi2sdq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sitofp i64 %a0 to double + %2 = load i64, i64 *%a1, align 8 + %3 = sitofp i64 %2 to double + %4 = fadd double %1, %3 + ret double %4 +} + +; TODO - cvtss2sd_m + +define double @test_cvtss2sd(float %a0, float *%a1) { +; GENERIC-LABEL: test_cvtss2sd: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvtss2sd %xmm0, %xmm1 +; GENERIC-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; GENERIC-NEXT: cvtss2sd %xmm0, %xmm0 +; GENERIC-NEXT: addsd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvtss2sd: +; ATOM: # BB#0: +; ATOM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; ATOM-NEXT: cvtss2sd %xmm0, %xmm2 +; ATOM-NEXT: xorps %xmm0, %xmm0 +; ATOM-NEXT: cvtss2sd %xmm1, %xmm0 +; ATOM-NEXT: addsd %xmm2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvtss2sd: +; SLM: # BB#0: +; SLM-NEXT: cvtss2sd %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [3:1.00] +; SLM-NEXT: cvtss2sd %xmm0, %xmm0 # sched: [4:0.50] +; SLM-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvtss2sd: +; SANDY: # BB#0: +; SANDY-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50] +; SANDY-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [3:1.00] +; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvtss2sd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50] +; HASWELL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [2:1.00] +; HASWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvtss2sd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00] +; BTVER2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [3:1.00] +; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fpext float %a0 to double + %2 = load float, float *%a1, align 4 + %3 = fpext float %2 to double + %4 = fadd double %1, %3 + ret double %4 +} + +define <4 x i32> @test_cvttpd2dq(<2 x double> %a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_cvttpd2dq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvttpd2dq %xmm0, %xmm1 +; GENERIC-NEXT: cvttpd2dq (%rdi), %xmm0 +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvttpd2dq: +; ATOM: # BB#0: +; ATOM-NEXT: cvttpd2dq (%rdi), %xmm1 +; ATOM-NEXT: cvttpd2dq %xmm0, %xmm0 +; ATOM-NEXT: paddd %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvttpd2dq: +; SLM: # BB#0: +; SLM-NEXT: cvttpd2dq %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvttpd2dq (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvttpd2dq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [7:1.00] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvttpd2dq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [4:1.00] +; HASWELL-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [8:1.00] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvttpd2dq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fptosi <2 x double> %a0 to <2 x i32> + %2 = shufflevector <2 x i32> %1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %3 = load <2 x double>, <2 x double> *%a1, align 16 + %4 = fptosi <2 x double> %3 to <2 x i32> + %5 = shufflevector <2 x i32> %4, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %6 = add <4 x i32> %2, %5 + ret <4 x i32> %6 +} + +define <4 x i32> @test_cvttps2dq(<4 x float> %a0, <4 x float> *%a1) { +; GENERIC-LABEL: test_cvttps2dq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvttps2dq %xmm0, %xmm1 +; GENERIC-NEXT: cvttps2dq (%rdi), %xmm0 +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvttps2dq: +; ATOM: # BB#0: +; ATOM-NEXT: cvttps2dq (%rdi), %xmm1 +; ATOM-NEXT: cvttps2dq %xmm0, %xmm0 +; ATOM-NEXT: paddd %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvttps2dq: +; SLM: # BB#0: +; SLM-NEXT: cvttps2dq %xmm0, %xmm1 # sched: [4:0.50] +; SLM-NEXT: cvttps2dq (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvttps2dq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [7:1.00] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvttps2dq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [7:1.00] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvttps2dq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [8:1.00] +; BTVER2-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fptosi <4 x float> %a0 to <4 x i32> + %2 = load <4 x float>, <4 x float> *%a1, align 16 + %3 = fptosi <4 x float> %2 to <4 x i32> + %4 = add <4 x i32> %1, %3 + ret <4 x i32> %4 +} + +define i32 @test_cvttsd2si(double %a0, double *%a1) { +; GENERIC-LABEL: test_cvttsd2si: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvttsd2si %xmm0, %ecx +; GENERIC-NEXT: cvttsd2si (%rdi), %eax +; GENERIC-NEXT: addl %ecx, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvttsd2si: +; ATOM: # BB#0: +; ATOM-NEXT: cvttsd2si (%rdi), %eax +; ATOM-NEXT: cvttsd2si %xmm0, %ecx +; ATOM-NEXT: addl %ecx, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvttsd2si: +; SLM: # BB#0: +; SLM-NEXT: cvttsd2si (%rdi), %eax # sched: [7:1.00] +; SLM-NEXT: cvttsd2si %xmm0, %ecx # sched: [4:0.50] +; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvttsd2si: +; SANDY: # BB#0: +; SANDY-NEXT: vcvttsd2si %xmm0, %ecx # sched: [3:1.00] +; SANDY-NEXT: vcvttsd2si (%rdi), %eax # sched: [7:1.00] +; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvttsd2si: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvttsd2si %xmm0, %ecx # sched: [4:1.00] +; HASWELL-NEXT: vcvttsd2si (%rdi), %eax # sched: [8:1.00] +; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvttsd2si: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvttsd2si (%rdi), %eax # sched: [8:1.00] +; BTVER2-NEXT: vcvttsd2si %xmm0, %ecx # sched: [3:1.00] +; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fptosi double %a0 to i32 + %2 = load double, double *%a1, align 8 + %3 = fptosi double %2 to i32 + %4 = add i32 %1, %3 + ret i32 %4 +} + +define i64 @test_cvttsd2siq(double %a0, double *%a1) { +; GENERIC-LABEL: test_cvttsd2siq: +; GENERIC: # BB#0: +; GENERIC-NEXT: cvttsd2si %xmm0, %rcx +; GENERIC-NEXT: cvttsd2si (%rdi), %rax +; GENERIC-NEXT: addq %rcx, %rax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_cvttsd2siq: +; ATOM: # BB#0: +; ATOM-NEXT: cvttsd2si (%rdi), %rax +; ATOM-NEXT: cvttsd2si %xmm0, %rcx +; ATOM-NEXT: addq %rcx, %rax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_cvttsd2siq: +; SLM: # BB#0: +; SLM-NEXT: cvttsd2si (%rdi), %rax # sched: [7:1.00] +; SLM-NEXT: cvttsd2si %xmm0, %rcx # sched: [4:0.50] +; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_cvttsd2siq: +; SANDY: # BB#0: +; SANDY-NEXT: vcvttsd2si %xmm0, %rcx # sched: [3:1.00] +; SANDY-NEXT: vcvttsd2si (%rdi), %rax # sched: [7:1.00] +; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_cvttsd2siq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vcvttsd2si %xmm0, %rcx # sched: [4:1.00] +; HASWELL-NEXT: vcvttsd2si (%rdi), %rax # sched: [8:1.00] +; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_cvttsd2siq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vcvttsd2si (%rdi), %rax # sched: [8:1.00] +; BTVER2-NEXT: vcvttsd2si %xmm0, %rcx # sched: [3:1.00] +; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fptosi double %a0 to i64 + %2 = load double, double *%a1, align 8 + %3 = fptosi double %2 to i64 + %4 = add i64 %1, %3 + ret i64 %4 +} + +define <2 x double> @test_divpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_divpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: divpd %xmm1, %xmm0 +; GENERIC-NEXT: divpd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_divpd: +; ATOM: # BB#0: +; ATOM-NEXT: divpd %xmm1, %xmm0 +; ATOM-NEXT: divpd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_divpd: +; SLM: # BB#0: +; SLM-NEXT: divpd %xmm1, %xmm0 # sched: [34:34.00] +; SLM-NEXT: divpd (%rdi), %xmm0 # sched: [37:34.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_divpd: +; SANDY: # BB#0: +; SANDY-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; SANDY-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_divpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; HASWELL-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_divpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [19:19.00] +; BTVER2-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [24:19.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fdiv <2 x double> %a0, %a1 + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = fdiv <2 x double> %1, %2 + ret <2 x double> %3 +} + +define double @test_divsd(double %a0, double %a1, double *%a2) { +; GENERIC-LABEL: test_divsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: divsd %xmm1, %xmm0 +; GENERIC-NEXT: divsd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_divsd: +; ATOM: # BB#0: +; ATOM-NEXT: divsd %xmm1, %xmm0 +; ATOM-NEXT: divsd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_divsd: +; SLM: # BB#0: +; SLM-NEXT: divsd %xmm1, %xmm0 # sched: [34:34.00] +; SLM-NEXT: divsd (%rdi), %xmm0 # sched: [37:34.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_divsd: +; SANDY: # BB#0: +; SANDY-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; SANDY-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_divsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [12:1.00] +; HASWELL-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [16:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_divsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [19:19.00] +; BTVER2-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [24:19.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fdiv double %a0, %a1 + %2 = load double, double *%a2, align 8 + %3 = fdiv double %1, %2 + ret double %3 +} + +define void @test_lfence() { +; GENERIC-LABEL: test_lfence: +; GENERIC: # BB#0: +; GENERIC-NEXT: lfence +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_lfence: +; ATOM: # BB#0: +; ATOM-NEXT: lfence +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_lfence: +; SLM: # BB#0: +; SLM-NEXT: lfence # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_lfence: +; SANDY: # BB#0: +; SANDY-NEXT: lfence # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_lfence: +; HASWELL: # BB#0: +; HASWELL-NEXT: lfence # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_lfence: +; BTVER2: # BB#0: +; BTVER2-NEXT: lfence # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + call void @llvm.x86.sse2.lfence() + ret void +} +declare void @llvm.x86.sse2.lfence() nounwind readnone + +define void @test_mfence() { +; GENERIC-LABEL: test_mfence: +; GENERIC: # BB#0: +; GENERIC-NEXT: mfence +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_mfence: +; ATOM: # BB#0: +; ATOM-NEXT: mfence +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_mfence: +; SLM: # BB#0: +; SLM-NEXT: mfence # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_mfence: +; SANDY: # BB#0: +; SANDY-NEXT: mfence # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_mfence: +; HASWELL: # BB#0: +; HASWELL-NEXT: mfence # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_mfence: +; BTVER2: # BB#0: +; BTVER2-NEXT: mfence # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + call void @llvm.x86.sse2.mfence() + ret void +} +declare void @llvm.x86.sse2.mfence() nounwind readnone + +define void @test_maskmovdqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) { +; GENERIC-LABEL: test_maskmovdqu: +; GENERIC: # BB#0: +; GENERIC-NEXT: maskmovdqu %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_maskmovdqu: +; ATOM: # BB#0: +; ATOM-NEXT: maskmovdqu %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_maskmovdqu: +; SLM: # BB#0: +; SLM-NEXT: maskmovdqu %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_maskmovdqu: +; SANDY: # BB#0: +; SANDY-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_maskmovdqu: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [14:2.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_maskmovdqu: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) + ret void +} +declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind + +define <2 x double> @test_maxpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_maxpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: maxpd %xmm1, %xmm0 +; GENERIC-NEXT: maxpd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_maxpd: +; ATOM: # BB#0: +; ATOM-NEXT: maxpd %xmm1, %xmm0 +; ATOM-NEXT: maxpd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_maxpd: +; SLM: # BB#0: +; SLM-NEXT: maxpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: maxpd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_maxpd: +; SANDY: # BB#0: +; SANDY-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_maxpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_maxpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1) + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %1, <2 x double> %2) + ret <2 x double> %3 +} +declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_maxsd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_maxsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: maxsd %xmm1, %xmm0 +; GENERIC-NEXT: maxsd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_maxsd: +; ATOM: # BB#0: +; ATOM-NEXT: maxsd %xmm1, %xmm0 +; ATOM-NEXT: maxsd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_maxsd: +; SLM: # BB#0: +; SLM-NEXT: maxsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: maxsd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_maxsd: +; SANDY: # BB#0: +; SANDY-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_maxsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_maxsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %1, <2 x double> %2) + ret <2 x double> %3 +} +declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_minpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_minpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: minpd %xmm1, %xmm0 +; GENERIC-NEXT: minpd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_minpd: +; ATOM: # BB#0: +; ATOM-NEXT: minpd %xmm1, %xmm0 +; ATOM-NEXT: minpd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_minpd: +; SLM: # BB#0: +; SLM-NEXT: minpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: minpd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_minpd: +; SANDY: # BB#0: +; SANDY-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_minpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_minpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1) + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %1, <2 x double> %2) + ret <2 x double> %3 +} +declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_minsd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_minsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: minsd %xmm1, %xmm0 +; GENERIC-NEXT: minsd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_minsd: +; ATOM: # BB#0: +; ATOM-NEXT: minsd %xmm1, %xmm0 +; ATOM-NEXT: minsd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_minsd: +; SLM: # BB#0: +; SLM-NEXT: minsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: minsd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_minsd: +; SANDY: # BB#0: +; SANDY-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_minsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_minsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %1, <2 x double> %2) + ret <2 x double> %3 +} +declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone + +define void @test_movapd(<2 x double> *%a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_movapd: +; GENERIC: # BB#0: +; GENERIC-NEXT: movapd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm0, %xmm0 +; GENERIC-NEXT: movapd %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movapd: +; ATOM: # BB#0: +; ATOM-NEXT: movapd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm0, %xmm0 +; ATOM-NEXT: movapd %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movapd: +; SLM: # BB#0: +; SLM-NEXT: movapd (%rdi), %xmm0 # sched: [3:1.00] +; SLM-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00] +; SLM-NEXT: movapd %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movapd: +; SANDY: # BB#0: +; SANDY-NEXT: vmovapd (%rdi), %xmm0 # sched: [4:0.50] +; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movapd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovapd (%rdi), %xmm0 # sched: [4:0.50] +; HASWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movapd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovapd (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load <2 x double>, <2 x double> *%a0, align 16 + %2 = fadd <2 x double> %1, %1 + store <2 x double> %2, <2 x double> *%a1, align 16 + ret void +} + +define void @test_movdqa(<2 x i64> *%a0, <2 x i64> *%a1) { +; GENERIC-LABEL: test_movdqa: +; GENERIC: # BB#0: +; GENERIC-NEXT: movdqa (%rdi), %xmm0 +; GENERIC-NEXT: paddq %xmm0, %xmm0 +; GENERIC-NEXT: movdqa %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movdqa: +; ATOM: # BB#0: +; ATOM-NEXT: movdqa (%rdi), %xmm0 +; ATOM-NEXT: paddq %xmm0, %xmm0 +; ATOM-NEXT: movdqa %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movdqa: +; SLM: # BB#0: +; SLM-NEXT: movdqa (%rdi), %xmm0 # sched: [3:1.00] +; SLM-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50] +; SLM-NEXT: movdqa %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movdqa: +; SANDY: # BB#0: +; SANDY-NEXT: vmovdqa (%rdi), %xmm0 # sched: [4:0.50] +; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movdqa: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovdqa (%rdi), %xmm0 # sched: [4:0.50] +; HASWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movdqa: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovdqa (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load <2 x i64>, <2 x i64> *%a0, align 16 + %2 = add <2 x i64> %1, %1 + store <2 x i64> %2, <2 x i64> *%a1, align 16 + ret void +} + +define void @test_movdqu(<2 x i64> *%a0, <2 x i64> *%a1) { +; GENERIC-LABEL: test_movdqu: +; GENERIC: # BB#0: +; GENERIC-NEXT: movdqu (%rdi), %xmm0 +; GENERIC-NEXT: paddq %xmm0, %xmm0 +; GENERIC-NEXT: movdqu %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movdqu: +; ATOM: # BB#0: +; ATOM-NEXT: movdqu (%rdi), %xmm0 +; ATOM-NEXT: paddq %xmm0, %xmm0 +; ATOM-NEXT: movdqu %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movdqu: +; SLM: # BB#0: +; SLM-NEXT: movdqu (%rdi), %xmm0 # sched: [3:1.00] +; SLM-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50] +; SLM-NEXT: movdqu %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movdqu: +; SANDY: # BB#0: +; SANDY-NEXT: vmovdqu (%rdi), %xmm0 # sched: [4:0.50] +; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movdqu: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovdqu (%rdi), %xmm0 # sched: [4:0.50] +; HASWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movdqu: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovdqu (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load <2 x i64>, <2 x i64> *%a0, align 1 + %2 = add <2 x i64> %1, %1 + store <2 x i64> %2, <2 x i64> *%a1, align 1 + ret void +} + +define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) { +; GENERIC-LABEL: test_movd: +; GENERIC: # BB#0: +; GENERIC-NEXT: movd %edi, %xmm1 +; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; GENERIC-NEXT: paddd %xmm0, %xmm1 +; GENERIC-NEXT: paddd %xmm0, %xmm2 +; GENERIC-NEXT: movd %xmm2, %eax +; GENERIC-NEXT: movd %xmm1, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movd: +; ATOM: # BB#0: +; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; ATOM-NEXT: paddd %xmm0, %xmm1 +; ATOM-NEXT: movd %xmm1, %eax +; ATOM-NEXT: movd %edi, %xmm1 +; ATOM-NEXT: paddd %xmm0, %xmm1 +; ATOM-NEXT: movd %xmm1, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movd: +; SLM: # BB#0: +; SLM-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [3:1.00] +; SLM-NEXT: movd %edi, %xmm1 # sched: [1:0.50] +; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: movd %xmm1, (%rsi) # sched: [1:1.00] +; SLM-NEXT: paddd %xmm0, %xmm2 # sched: [1:0.50] +; SLM-NEXT: movd %xmm2, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movd: +; SANDY: # BB#0: +; SANDY-NEXT: vmovd %edi, %xmm1 # sched: [1:0.33] +; SANDY-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vmovd %xmm0, %eax # sched: [1:0.33] +; SANDY-NEXT: vmovd %xmm1, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovd %edi, %xmm1 # sched: [1:1.00] +; HASWELL-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vmovd %xmm0, %eax # sched: [1:1.00] +; HASWELL-NEXT: vmovd %xmm1, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00] +; BTVER2-NEXT: vmovd %edi, %xmm1 # sched: [1:0.17] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vmovd %xmm1, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vmovd %xmm0, %eax # sched: [1:0.17] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <4 x i32> undef, i32 %a1, i32 0 + %2 = load i32, i32 *%a2 + %3 = insertelement <4 x i32> undef, i32 %2, i32 0 + %4 = add <4 x i32> %a0, %1 + %5 = add <4 x i32> %a0, %3 + %6 = extractelement <4 x i32> %4, i32 0 + %7 = extractelement <4 x i32> %5, i32 0 + store i32 %6, i32* %a2 + ret i32 %7 +} + +define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) { +; GENERIC-LABEL: test_movd_64: +; GENERIC: # BB#0: +; GENERIC-NEXT: movd %rdi, %xmm1 +; GENERIC-NEXT: movq {{.*#+}} xmm2 = mem[0],zero +; GENERIC-NEXT: paddq %xmm0, %xmm1 +; GENERIC-NEXT: paddq %xmm0, %xmm2 +; GENERIC-NEXT: movd %xmm2, %rax +; GENERIC-NEXT: movq %xmm1, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movd_64: +; ATOM: # BB#0: +; ATOM-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; ATOM-NEXT: movd %rdi, %xmm2 +; ATOM-NEXT: paddq %xmm0, %xmm2 +; ATOM-NEXT: paddq %xmm0, %xmm1 +; ATOM-NEXT: movq %xmm2, (%rsi) +; ATOM-NEXT: movd %xmm1, %rax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movd_64: +; SLM: # BB#0: +; SLM-NEXT: movq {{.*#+}} xmm2 = mem[0],zero sched: [3:1.00] +; SLM-NEXT: movd %rdi, %xmm1 # sched: [1:0.50] +; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: movq %xmm1, (%rsi) # sched: [1:1.00] +; SLM-NEXT: paddq %xmm0, %xmm2 # sched: [1:0.50] +; SLM-NEXT: movd %xmm2, %rax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movd_64: +; SANDY: # BB#0: +; SANDY-NEXT: vmovq %rdi, %xmm1 # sched: [1:0.33] +; SANDY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [4:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vmovq %xmm0, %rax # sched: [1:0.33] +; SANDY-NEXT: vmovq %xmm1, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movd_64: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovq %rdi, %xmm1 # sched: [1:1.00] +; HASWELL-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [4:0.50] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vmovq %xmm0, %rax # sched: [1:1.00] +; HASWELL-NEXT: vmovq %xmm1, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movd_64: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [5:1.00] +; BTVER2-NEXT: vmovq %rdi, %xmm1 # sched: [1:0.17] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vmovq %xmm1, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vmovq %xmm0, %rax # sched: [1:0.17] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = insertelement <2 x i64> undef, i64 %a1, i64 0 + %2 = load i64, i64 *%a2 + %3 = insertelement <2 x i64> undef, i64 %2, i64 0 + %4 = add <2 x i64> %a0, %1 + %5 = add <2 x i64> %a0, %3 + %6 = extractelement <2 x i64> %4, i64 0 + %7 = extractelement <2 x i64> %5, i64 0 + store i64 %6, i64* %a2 + ret i64 %7 +} + +define void @test_movhpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) { +; GENERIC-LABEL: test_movhpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; GENERIC-NEXT: addpd %xmm0, %xmm1 +; GENERIC-NEXT: movhpd %xmm1, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movhpd: +; ATOM: # BB#0: +; ATOM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; ATOM-NEXT: addpd %xmm0, %xmm1 +; ATOM-NEXT: movhpd %xmm1, (%rdi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movhpd: +; SLM: # BB#0: +; SLM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00] +; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movhpd %xmm1, (%rdi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movhpd: +; SANDY: # BB#0: +; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movhpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movhpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast x86_mmx* %a2 to double* + %2 = load double, double *%1, align 8 + %3 = insertelement <2 x double> %a1, double %2, i32 1 + %4 = fadd <2 x double> %a0, %3 + %5 = extractelement <2 x double> %4, i32 1 + store double %5, double* %1 + ret void +} + +define void @test_movlpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) { +; GENERIC-LABEL: test_movlpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; GENERIC-NEXT: addpd %xmm0, %xmm1 +; GENERIC-NEXT: movlpd %xmm1, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movlpd: +; ATOM: # BB#0: +; ATOM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] +; ATOM-NEXT: addpd %xmm0, %xmm1 +; ATOM-NEXT: movlpd %xmm1, (%rdi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movlpd: +; SLM: # BB#0: +; SLM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [4:1.00] +; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movlpd %xmm1, (%rdi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movlpd: +; SANDY: # BB#0: +; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [5:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movlpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movlpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast x86_mmx* %a2 to double* + %2 = load double, double *%1, align 8 + %3 = insertelement <2 x double> %a1, double %2, i32 0 + %4 = fadd <2 x double> %a0, %3 + %5 = extractelement <2 x double> %4, i32 0 + store double %5, double* %1 + ret void +} + +define i32 @test_movmskpd(<2 x double> %a0) { +; GENERIC-LABEL: test_movmskpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: movmskpd %xmm0, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movmskpd: +; ATOM: # BB#0: +; ATOM-NEXT: movmskpd %xmm0, %eax +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movmskpd: +; SLM: # BB#0: +; SLM-NEXT: movmskpd %xmm0, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movmskpd: +; SANDY: # BB#0: +; SANDY-NEXT: vmovmskpd %xmm0, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movmskpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovmskpd %xmm0, %eax # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movmskpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovmskpd %xmm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) + ret i32 %1 +} +declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone + +define void @test_movntdqa(<2 x i64> %a0, <2 x i64> *%a1) { +; GENERIC-LABEL: test_movntdqa: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddq %xmm0, %xmm0 +; GENERIC-NEXT: movntdq %xmm0, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movntdqa: +; ATOM: # BB#0: +; ATOM-NEXT: paddq %xmm0, %xmm0 +; ATOM-NEXT: movntdq %xmm0, (%rdi) +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movntdqa: +; SLM: # BB#0: +; SLM-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50] +; SLM-NEXT: movntdq %xmm0, (%rdi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movntdqa: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movntdqa: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movntdqa: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = add <2 x i64> %a0, %a0 + store <2 x i64> %1, <2 x i64> *%a1, align 16, !nontemporal !0 + ret void +} + +define void @test_movntpd(<2 x double> %a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_movntpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: addpd %xmm0, %xmm0 +; GENERIC-NEXT: movntpd %xmm0, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movntpd: +; ATOM: # BB#0: +; ATOM-NEXT: addpd %xmm0, %xmm0 +; ATOM-NEXT: movntpd %xmm0, (%rdi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movntpd: +; SLM: # BB#0: +; SLM-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00] +; SLM-NEXT: movntpd %xmm0, (%rdi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movntpd: +; SANDY: # BB#0: +; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movntpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movntpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fadd <2 x double> %a0, %a0 + store <2 x double> %1, <2 x double> *%a1, align 16, !nontemporal !0 + ret void +} + +define <2 x i64> @test_movq_mem(<2 x i64> %a0, i64 *%a1) { +; GENERIC-LABEL: test_movq_mem: +; GENERIC: # BB#0: +; GENERIC-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: movq %xmm0, (%rdi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movq_mem: +; ATOM: # BB#0: +; ATOM-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: movq %xmm0, (%rdi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movq_mem: +; SLM: # BB#0: +; SLM-NEXT: movq {{.*#+}} xmm1 = mem[0],zero sched: [3:1.00] +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: movq %xmm0, (%rdi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movq_mem: +; SANDY: # BB#0: +; SANDY-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [4:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movq_mem: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [4:0.50] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movq_mem: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [5:1.00] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load i64, i64* %a1, align 1 + %2 = insertelement <2 x i64> zeroinitializer, i64 %1, i32 0 + %3 = add <2 x i64> %a0, %2 + %4 = extractelement <2 x i64> %3, i32 0 + store i64 %4, i64 *%a1, align 1 + ret <2 x i64> %3 +} + +define <2 x i64> @test_movq_reg(<2 x i64> %a0, <2 x i64> %a1) { +; GENERIC-LABEL: test_movq_reg: +; GENERIC: # BB#0: +; GENERIC-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movq_reg: +; ATOM: # BB#0: +; ATOM-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movq_reg: +; SLM: # BB#0: +; SLM-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.50] +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movq_reg: +; SANDY: # BB#0: +; SANDY-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33] +; SANDY-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movq_reg: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33] +; HASWELL-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movq_reg: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.50] +; BTVER2-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2> + %2 = add <2 x i64> %a1, %1 + ret <2 x i64> %2 +} + +define void @test_movsd_mem(double* %a0, double* %a1) { +; GENERIC-LABEL: test_movsd_mem: +; GENERIC: # BB#0: +; GENERIC-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; GENERIC-NEXT: addsd %xmm0, %xmm0 +; GENERIC-NEXT: movsd %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movsd_mem: +; ATOM: # BB#0: +; ATOM-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; ATOM-NEXT: addsd %xmm0, %xmm0 +; ATOM-NEXT: movsd %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movsd_mem: +; SLM: # BB#0: +; SLM-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero sched: [3:1.00] +; SLM-NEXT: addsd %xmm0, %xmm0 # sched: [3:1.00] +; SLM-NEXT: movsd %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movsd_mem: +; SANDY: # BB#0: +; SANDY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [4:0.50] +; SANDY-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movsd_mem: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [4:0.50] +; HASWELL-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movsd_mem: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:1.00] +; BTVER2-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load double, double* %a0, align 1 + %2 = fadd double %1, %1 + store double %2, double *%a1, align 1 + ret void +} + +define <2 x double> @test_movsd_reg(<2 x double> %a0, <2 x double> %a1) { +; GENERIC-LABEL: test_movsd_reg: +; GENERIC: # BB#0: +; GENERIC-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; GENERIC-NEXT: movapd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movsd_reg: +; ATOM: # BB#0: +; ATOM-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; ATOM-NEXT: movapd %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movsd_reg: +; SLM: # BB#0: +; SLM-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] sched: [1:1.00] +; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movsd_reg: +; SANDY: # BB#0: +; SANDY-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movsd_reg: +; HASWELL: # BB#0: +; HASWELL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movsd_reg: +; BTVER2: # BB#0: +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 2, i32 0> + ret <2 x double> %1 +} + +define void @test_movupd(<2 x double> *%a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_movupd: +; GENERIC: # BB#0: +; GENERIC-NEXT: movupd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm0, %xmm0 +; GENERIC-NEXT: movupd %xmm0, (%rsi) +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_movupd: +; ATOM: # BB#0: +; ATOM-NEXT: movupd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm0, %xmm0 +; ATOM-NEXT: movupd %xmm0, (%rsi) +; ATOM-NEXT: retq +; +; SLM-LABEL: test_movupd: +; SLM: # BB#0: +; SLM-NEXT: movupd (%rdi), %xmm0 # sched: [3:1.00] +; SLM-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00] +; SLM-NEXT: movupd %xmm0, (%rsi) # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_movupd: +; SANDY: # BB#0: +; SANDY-NEXT: vmovupd (%rdi), %xmm0 # sched: [4:0.50] +; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_movupd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmovupd (%rdi), %xmm0 # sched: [4:0.50] +; HASWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_movupd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovupd (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = load <2 x double>, <2 x double> *%a0, align 1 + %2 = fadd <2 x double> %1, %1 + store <2 x double> %2, <2 x double> *%a1, align 1 + ret void +} + +define <2 x double> @test_mulpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_mulpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: mulpd %xmm1, %xmm0 +; GENERIC-NEXT: mulpd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_mulpd: +; ATOM: # BB#0: +; ATOM-NEXT: mulpd %xmm1, %xmm0 +; ATOM-NEXT: mulpd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_mulpd: +; SLM: # BB#0: +; SLM-NEXT: mulpd %xmm1, %xmm0 # sched: [5:2.00] +; SLM-NEXT: mulpd (%rdi), %xmm0 # sched: [8:2.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_mulpd: +; SANDY: # BB#0: +; SANDY-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_mulpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [9:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_mulpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fmul <2 x double> %a0, %a1 + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = fmul <2 x double> %1, %2 + ret <2 x double> %3 +} + +define double @test_mulsd(double %a0, double %a1, double *%a2) { +; GENERIC-LABEL: test_mulsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: mulsd %xmm1, %xmm0 +; GENERIC-NEXT: mulsd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_mulsd: +; ATOM: # BB#0: +; ATOM-NEXT: mulsd %xmm1, %xmm0 +; ATOM-NEXT: mulsd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_mulsd: +; SLM: # BB#0: +; SLM-NEXT: mulsd %xmm1, %xmm0 # sched: [5:2.00] +; SLM-NEXT: mulsd (%rdi), %xmm0 # sched: [8:2.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_mulsd: +; SANDY: # BB#0: +; SANDY-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_mulsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_mulsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fmul double %a0, %a1 + %2 = load double, double *%a2, align 8 + %3 = fmul double %1, %2 + ret double %3 +} + +define <2 x double> @test_orpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_orpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: orpd %xmm1, %xmm0 +; GENERIC-NEXT: orpd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_orpd: +; ATOM: # BB#0: +; ATOM-NEXT: orpd %xmm1, %xmm0 +; ATOM-NEXT: orpd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_orpd: +; SLM: # BB#0: +; SLM-NEXT: orpd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: orpd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_orpd: +; SANDY: # BB#0: +; SANDY-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_orpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_orpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <2 x double> %a0 to <4 x i32> + %2 = bitcast <2 x double> %a1 to <4 x i32> + %3 = or <4 x i32> %1, %2 + %4 = load <2 x double>, <2 x double> *%a2, align 16 + %5 = bitcast <2 x double> %4 to <4 x i32> + %6 = or <4 x i32> %3, %5 + %7 = bitcast <4 x i32> %6 to <2 x double> + %8 = fadd <2 x double> %a1, %7 + ret <2 x double> %8 +} + +define <8 x i16> @test_packssdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_packssdw: +; GENERIC: # BB#0: +; GENERIC-NEXT: packssdw %xmm1, %xmm0 +; GENERIC-NEXT: packssdw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_packssdw: +; ATOM: # BB#0: +; ATOM-NEXT: packssdw %xmm1, %xmm0 +; ATOM-NEXT: packssdw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_packssdw: +; SLM: # BB#0: +; SLM-NEXT: packssdw %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: packssdw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_packssdw: +; SANDY: # BB#0: +; SANDY-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_packssdw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_packssdw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) + %2 = bitcast <8 x i16> %1 to <4 x i32> + %3 = load <4 x i32>, <4 x i32> *%a2, align 16 + %4 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %2, <4 x i32> %3) + ret <8 x i16> %4 +} +declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone + +define <16 x i8> @test_packsswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_packsswb: +; GENERIC: # BB#0: +; GENERIC-NEXT: packsswb %xmm1, %xmm0 +; GENERIC-NEXT: packsswb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_packsswb: +; ATOM: # BB#0: +; ATOM-NEXT: packsswb %xmm1, %xmm0 +; ATOM-NEXT: packsswb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_packsswb: +; SLM: # BB#0: +; SLM-NEXT: packsswb %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: packsswb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_packsswb: +; SANDY: # BB#0: +; SANDY-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_packsswb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_packsswb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) + %2 = bitcast <16 x i8> %1 to <8 x i16> + %3 = load <8 x i16>, <8 x i16> *%a2, align 16 + %4 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %2, <8 x i16> %3) + ret <16 x i8> %4 +} +declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone + +define <16 x i8> @test_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_packuswb: +; GENERIC: # BB#0: +; GENERIC-NEXT: packuswb %xmm1, %xmm0 +; GENERIC-NEXT: packuswb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_packuswb: +; ATOM: # BB#0: +; ATOM-NEXT: packuswb %xmm1, %xmm0 +; ATOM-NEXT: packuswb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_packuswb: +; SLM: # BB#0: +; SLM-NEXT: packuswb %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: packuswb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_packuswb: +; SANDY: # BB#0: +; SANDY-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_packuswb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_packuswb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) + %2 = bitcast <16 x i8> %1 to <8 x i16> + %3 = load <8 x i16>, <8 x i16> *%a2, align 16 + %4 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %2, <8 x i16> %3) + ret <16 x i8> %4 +} +declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone + +define <16 x i8> @test_paddb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_paddb: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddb %xmm1, %xmm0 +; GENERIC-NEXT: paddb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddb: +; ATOM: # BB#0: +; ATOM-NEXT: paddb %xmm1, %xmm0 +; ATOM-NEXT: paddb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddb: +; SLM: # BB#0: +; SLM-NEXT: paddb %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddb: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = add <16 x i8> %a0, %a1 + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = add <16 x i8> %1, %2 + ret <16 x i8> %3 +} + +define <4 x i32> @test_paddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_paddd: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: paddd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddd: +; ATOM: # BB#0: +; ATOM-NEXT: paddd %xmm1, %xmm0 +; ATOM-NEXT: paddd (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddd: +; SLM: # BB#0: +; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddd: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = add <4 x i32> %a0, %a1 + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = add <4 x i32> %1, %2 + ret <4 x i32> %3 +} + +define <2 x i64> @test_paddq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_paddq: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: paddq (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddq: +; ATOM: # BB#0: +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: paddq (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddq: +; SLM: # BB#0: +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddq (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddq: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = add <2 x i64> %a0, %a1 + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = add <2 x i64> %1, %2 + ret <2 x i64> %3 +} + +define <16 x i8> @test_paddsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_paddsb: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddsb %xmm1, %xmm0 +; GENERIC-NEXT: paddsb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddsb: +; ATOM: # BB#0: +; ATOM-NEXT: paddsb %xmm1, %xmm0 +; ATOM-NEXT: paddsb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddsb: +; SLM: # BB#0: +; SLM-NEXT: paddsb %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddsb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddsb: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddsb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1) + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +} +declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone + +define <8 x i16> @test_paddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_paddsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddsw %xmm1, %xmm0 +; GENERIC-NEXT: paddsw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddsw: +; ATOM: # BB#0: +; ATOM-NEXT: paddsw %xmm1, %xmm0 +; ATOM-NEXT: paddsw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddsw: +; SLM: # BB#0: +; SLM-NEXT: paddsw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddsw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddsw: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddsw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <16 x i8> @test_paddusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_paddusb: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddusb %xmm1, %xmm0 +; GENERIC-NEXT: paddusb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddusb: +; ATOM: # BB#0: +; ATOM-NEXT: paddusb %xmm1, %xmm0 +; ATOM-NEXT: paddusb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddusb: +; SLM: # BB#0: +; SLM-NEXT: paddusb %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddusb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddusb: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddusb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddusb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1) + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +} +declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone + +define <8 x i16> @test_paddusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_paddusw: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddusw %xmm1, %xmm0 +; GENERIC-NEXT: paddusw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddusw: +; ATOM: # BB#0: +; ATOM-NEXT: paddusw %xmm1, %xmm0 +; ATOM-NEXT: paddusw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddusw: +; SLM: # BB#0: +; SLM-NEXT: paddusw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddusw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddusw: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddusw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddusw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <8 x i16> @test_paddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_paddw: +; GENERIC: # BB#0: +; GENERIC-NEXT: paddw %xmm1, %xmm0 +; GENERIC-NEXT: paddw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_paddw: +; ATOM: # BB#0: +; ATOM-NEXT: paddw %xmm1, %xmm0 +; ATOM-NEXT: paddw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_paddw: +; SLM: # BB#0: +; SLM-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: paddw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_paddw: +; SANDY: # BB#0: +; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_paddw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_paddw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = add <8 x i16> %a0, %a1 + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = add <8 x i16> %1, %2 + ret <8 x i16> %3 +} + +define <2 x i64> @test_pand(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_pand: +; GENERIC: # BB#0: +; GENERIC-NEXT: pand %xmm1, %xmm0 +; GENERIC-NEXT: pand (%rdi), %xmm0 +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pand: +; ATOM: # BB#0: +; ATOM-NEXT: pand %xmm1, %xmm0 +; ATOM-NEXT: pand (%rdi), %xmm0 +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pand: +; SLM: # BB#0: +; SLM-NEXT: pand %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pand (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pand: +; SANDY: # BB#0: +; SANDY-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pand: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pand: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = and <2 x i64> %a0, %a1 + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = and <2 x i64> %1, %2 + %4 = add <2 x i64> %3, %a1 + ret <2 x i64> %4 +} + +define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_pandn: +; GENERIC: # BB#0: +; GENERIC-NEXT: pandn %xmm1, %xmm0 +; GENERIC-NEXT: movdqa %xmm0, %xmm1 +; GENERIC-NEXT: pandn (%rdi), %xmm1 +; GENERIC-NEXT: paddq %xmm0, %xmm1 +; GENERIC-NEXT: movdqa %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pandn: +; ATOM: # BB#0: +; ATOM-NEXT: pandn %xmm1, %xmm0 +; ATOM-NEXT: movdqa %xmm0, %xmm1 +; ATOM-NEXT: pandn (%rdi), %xmm1 +; ATOM-NEXT: paddq %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pandn: +; SLM: # BB#0: +; SLM-NEXT: pandn %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: movdqa %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: pandn (%rdi), %xmm1 # sched: [4:1.00] +; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: movdqa %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pandn: +; SANDY: # BB#0: +; SANDY-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [5:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pandn: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pandn: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [6:1.00] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = xor <2 x i64> %a0, <i64 -1, i64 -1> + %2 = and <2 x i64> %a1, %1 + %3 = load <2 x i64>, <2 x i64> *%a2, align 16 + %4 = xor <2 x i64> %2, <i64 -1, i64 -1> + %5 = and <2 x i64> %3, %4 + %6 = add <2 x i64> %2, %5 + ret <2 x i64> %6 +} + +define <16 x i8> @test_pavgb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_pavgb: +; GENERIC: # BB#0: +; GENERIC-NEXT: pavgb %xmm1, %xmm0 +; GENERIC-NEXT: pavgb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pavgb: +; ATOM: # BB#0: +; ATOM-NEXT: pavgb %xmm1, %xmm0 +; ATOM-NEXT: pavgb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pavgb: +; SLM: # BB#0: +; SLM-NEXT: pavgb %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pavgb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pavgb: +; SANDY: # BB#0: +; SANDY-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pavgb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pavgb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1) + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +} +declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %arg0, <16 x i8> %arg1) nounwind readnone + +define <8 x i16> @test_pavgw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pavgw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pavgw %xmm1, %xmm0 +; GENERIC-NEXT: pavgw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pavgw: +; ATOM: # BB#0: +; ATOM-NEXT: pavgw %xmm1, %xmm0 +; ATOM-NEXT: pavgw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pavgw: +; SLM: # BB#0: +; SLM-NEXT: pavgw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pavgw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pavgw: +; SANDY: # BB#0: +; SANDY-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pavgw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pavgw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <16 x i8> @test_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_pcmpeqb: +; GENERIC: # BB#0: +; GENERIC-NEXT: pcmpeqb %xmm0, %xmm1 +; GENERIC-NEXT: pcmpeqb (%rdi), %xmm0 +; GENERIC-NEXT: por %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pcmpeqb: +; ATOM: # BB#0: +; ATOM-NEXT: pcmpeqb %xmm0, %xmm1 +; ATOM-NEXT: pcmpeqb (%rdi), %xmm0 +; ATOM-NEXT: por %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pcmpeqb: +; SLM: # BB#0: +; SLM-NEXT: pcmpeqb %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: pcmpeqb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pcmpeqb: +; SANDY: # BB#0: +; SANDY-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pcmpeqb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pcmpeqb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = icmp eq <16 x i8> %a0, %a1 + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = icmp eq <16 x i8> %a0, %2 + %4 = or <16 x i1> %1, %3 + %5 = sext <16 x i1> %4 to <16 x i8> + ret <16 x i8> %5 +} + +define <4 x i32> @test_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_pcmpeqd: +; GENERIC: # BB#0: +; GENERIC-NEXT: pcmpeqd %xmm0, %xmm1 +; GENERIC-NEXT: pcmpeqd (%rdi), %xmm0 +; GENERIC-NEXT: por %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pcmpeqd: +; ATOM: # BB#0: +; ATOM-NEXT: pcmpeqd %xmm0, %xmm1 +; ATOM-NEXT: pcmpeqd (%rdi), %xmm0 +; ATOM-NEXT: por %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pcmpeqd: +; SLM: # BB#0: +; SLM-NEXT: pcmpeqd %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pcmpeqd: +; SANDY: # BB#0: +; SANDY-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pcmpeqd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pcmpeqd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = icmp eq <4 x i32> %a0, %a1 + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = icmp eq <4 x i32> %a0, %2 + %4 = or <4 x i1> %1, %3 + %5 = sext <4 x i1> %4 to <4 x i32> + ret <4 x i32> %5 +} + +define <8 x i16> @test_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pcmpeqw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pcmpeqw %xmm0, %xmm1 +; GENERIC-NEXT: pcmpeqw (%rdi), %xmm0 +; GENERIC-NEXT: por %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pcmpeqw: +; ATOM: # BB#0: +; ATOM-NEXT: pcmpeqw %xmm0, %xmm1 +; ATOM-NEXT: pcmpeqw (%rdi), %xmm0 +; ATOM-NEXT: por %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pcmpeqw: +; SLM: # BB#0: +; SLM-NEXT: pcmpeqw %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: pcmpeqw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pcmpeqw: +; SANDY: # BB#0: +; SANDY-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pcmpeqw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pcmpeqw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = icmp eq <8 x i16> %a0, %a1 + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = icmp eq <8 x i16> %a0, %2 + %4 = or <8 x i1> %1, %3 + %5 = sext <8 x i1> %4 to <8 x i16> + ret <8 x i16> %5 +} + +define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_pcmpgtb: +; GENERIC: # BB#0: +; GENERIC-NEXT: movdqa %xmm0, %xmm2 +; GENERIC-NEXT: pcmpgtb %xmm1, %xmm2 +; GENERIC-NEXT: pcmpgtb (%rdi), %xmm0 +; GENERIC-NEXT: por %xmm2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pcmpgtb: +; ATOM: # BB#0: +; ATOM-NEXT: movdqa %xmm0, %xmm2 +; ATOM-NEXT: pcmpgtb (%rdi), %xmm0 +; ATOM-NEXT: pcmpgtb %xmm1, %xmm2 +; ATOM-NEXT: por %xmm2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pcmpgtb: +; SLM: # BB#0: +; SLM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50] +; SLM-NEXT: pcmpgtb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: pcmpgtb %xmm1, %xmm2 # sched: [1:0.50] +; SLM-NEXT: por %xmm2, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pcmpgtb: +; SANDY: # BB#0: +; SANDY-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pcmpgtb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pcmpgtb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = icmp sgt <16 x i8> %a0, %a1 + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = icmp sgt <16 x i8> %a0, %2 + %4 = or <16 x i1> %1, %3 + %5 = sext <16 x i1> %4 to <16 x i8> + ret <16 x i8> %5 +} + +define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_pcmpgtd: +; GENERIC: # BB#0: +; GENERIC-NEXT: movdqa %xmm0, %xmm2 +; GENERIC-NEXT: pcmpgtd %xmm1, %xmm2 +; GENERIC-NEXT: pcmpeqd (%rdi), %xmm0 +; GENERIC-NEXT: por %xmm2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pcmpgtd: +; ATOM: # BB#0: +; ATOM-NEXT: movdqa %xmm0, %xmm2 +; ATOM-NEXT: pcmpeqd (%rdi), %xmm0 +; ATOM-NEXT: pcmpgtd %xmm1, %xmm2 +; ATOM-NEXT: por %xmm2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pcmpgtd: +; SLM: # BB#0: +; SLM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50] +; SLM-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: pcmpgtd %xmm1, %xmm2 # sched: [1:0.50] +; SLM-NEXT: por %xmm2, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pcmpgtd: +; SANDY: # BB#0: +; SANDY-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pcmpgtd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pcmpgtd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = icmp sgt <4 x i32> %a0, %a1 + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = icmp eq <4 x i32> %a0, %2 + %4 = or <4 x i1> %1, %3 + %5 = sext <4 x i1> %4 to <4 x i32> + ret <4 x i32> %5 +} + +define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pcmpgtw: +; GENERIC: # BB#0: +; GENERIC-NEXT: movdqa %xmm0, %xmm2 +; GENERIC-NEXT: pcmpgtw %xmm1, %xmm2 +; GENERIC-NEXT: pcmpgtw (%rdi), %xmm0 +; GENERIC-NEXT: por %xmm2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pcmpgtw: +; ATOM: # BB#0: +; ATOM-NEXT: movdqa %xmm0, %xmm2 +; ATOM-NEXT: pcmpgtw (%rdi), %xmm0 +; ATOM-NEXT: pcmpgtw %xmm1, %xmm2 +; ATOM-NEXT: por %xmm2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pcmpgtw: +; SLM: # BB#0: +; SLM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50] +; SLM-NEXT: pcmpgtw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: pcmpgtw %xmm1, %xmm2 # sched: [1:0.50] +; SLM-NEXT: por %xmm2, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pcmpgtw: +; SANDY: # BB#0: +; SANDY-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; SANDY-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pcmpgtw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pcmpgtw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50] +; BTVER2-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = icmp sgt <8 x i16> %a0, %a1 + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = icmp sgt <8 x i16> %a0, %2 + %4 = or <8 x i1> %1, %3 + %5 = sext <8 x i1> %4 to <8 x i16> + ret <8 x i16> %5 +} + +define i16 @test_pextrw(<8 x i16> %a0) { +; GENERIC-LABEL: test_pextrw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pextrw $6, %xmm0, %eax +; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pextrw: +; ATOM: # BB#0: +; ATOM-NEXT: pextrw $6, %xmm0, %eax +; ATOM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pextrw: +; SLM: # BB#0: +; SLM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:1.00] +; SLM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pextrw: +; SANDY: # BB#0: +; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50] +; SANDY-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pextrw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:1.00] +; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pextrw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = extractelement <8 x i16> %a0, i32 6 + ret i16 %1 +} + +define <4 x i32> @test_pmaddwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pmaddwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmaddwd %xmm1, %xmm0 +; GENERIC-NEXT: pmaddwd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmaddwd: +; ATOM: # BB#0: +; ATOM-NEXT: pmaddwd %xmm1, %xmm0 +; ATOM-NEXT: pmaddwd (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmaddwd: +; SLM: # BB#0: +; SLM-NEXT: pmaddwd %xmm1, %xmm0 # sched: [4:1.00] +; SLM-NEXT: pmaddwd (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmaddwd: +; SANDY: # BB#0: +; SANDY-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmaddwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmaddwd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1) + %2 = bitcast <4 x i32> %1 to <8 x i16> + %3 = load <8 x i16>, <8 x i16> *%a2, align 16 + %4 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %2, <8 x i16> %3) + ret <4 x i32> %4 +} +declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone + +define <8 x i16> @test_pmaxsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pmaxsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmaxsw %xmm1, %xmm0 +; GENERIC-NEXT: pmaxsw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmaxsw: +; ATOM: # BB#0: +; ATOM-NEXT: pmaxsw %xmm1, %xmm0 +; ATOM-NEXT: pmaxsw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmaxsw: +; SLM: # BB#0: +; SLM-NEXT: pmaxsw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pmaxsw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmaxsw: +; SANDY: # BB#0: +; SANDY-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmaxsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmaxsw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <16 x i8> @test_pmaxub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_pmaxub: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmaxub %xmm1, %xmm0 +; GENERIC-NEXT: pmaxub (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmaxub: +; ATOM: # BB#0: +; ATOM-NEXT: pmaxub %xmm1, %xmm0 +; ATOM-NEXT: pmaxub (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmaxub: +; SLM: # BB#0: +; SLM-NEXT: pmaxub %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pmaxub (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmaxub: +; SANDY: # BB#0: +; SANDY-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmaxub: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmaxub: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1) + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +} +declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone + +define <8 x i16> @test_pminsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pminsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pminsw %xmm1, %xmm0 +; GENERIC-NEXT: pminsw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pminsw: +; ATOM: # BB#0: +; ATOM-NEXT: pminsw %xmm1, %xmm0 +; ATOM-NEXT: pminsw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pminsw: +; SLM: # BB#0: +; SLM-NEXT: pminsw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pminsw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pminsw: +; SANDY: # BB#0: +; SANDY-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pminsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pminsw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <16 x i8> @test_pminub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_pminub: +; GENERIC: # BB#0: +; GENERIC-NEXT: pminub %xmm1, %xmm0 +; GENERIC-NEXT: pminub (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pminub: +; ATOM: # BB#0: +; ATOM-NEXT: pminub %xmm1, %xmm0 +; ATOM-NEXT: pminub (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pminub: +; SLM: # BB#0: +; SLM-NEXT: pminub %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pminub (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pminub: +; SANDY: # BB#0: +; SANDY-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pminub: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pminub: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1) + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +} +declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone + +define i32 @test_pmovmskb(<16 x i8> %a0) { +; GENERIC-LABEL: test_pmovmskb: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmovmskb %xmm0, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmovmskb: +; ATOM: # BB#0: +; ATOM-NEXT: pmovmskb %xmm0, %eax +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmovmskb: +; SLM: # BB#0: +; SLM-NEXT: pmovmskb %xmm0, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmovmskb: +; SANDY: # BB#0: +; SANDY-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmovmskb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovmskb %xmm0, %eax # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmovmskb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) + ret i32 %1 +} +declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone + +define <8 x i16> @test_pmulhuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pmulhuw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmulhuw %xmm1, %xmm0 +; GENERIC-NEXT: pmulhuw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmulhuw: +; ATOM: # BB#0: +; ATOM-NEXT: pmulhuw %xmm1, %xmm0 +; ATOM-NEXT: pmulhuw (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmulhuw: +; SLM: # BB#0: +; SLM-NEXT: pmulhuw %xmm1, %xmm0 # sched: [4:1.00] +; SLM-NEXT: pmulhuw (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmulhuw: +; SANDY: # BB#0: +; SANDY-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmulhuw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmulhuw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <8 x i16> @test_pmulhw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pmulhw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmulhw %xmm1, %xmm0 +; GENERIC-NEXT: pmulhw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmulhw: +; ATOM: # BB#0: +; ATOM-NEXT: pmulhw %xmm1, %xmm0 +; ATOM-NEXT: pmulhw (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmulhw: +; SLM: # BB#0: +; SLM-NEXT: pmulhw %xmm1, %xmm0 # sched: [4:1.00] +; SLM-NEXT: pmulhw (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmulhw: +; SANDY: # BB#0: +; SANDY-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmulhw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmulhw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <8 x i16> @test_pmullw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_pmullw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmullw %xmm1, %xmm0 +; GENERIC-NEXT: pmullw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmullw: +; ATOM: # BB#0: +; ATOM-NEXT: pmullw %xmm1, %xmm0 +; ATOM-NEXT: pmullw (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmullw: +; SLM: # BB#0: +; SLM-NEXT: pmullw %xmm1, %xmm0 # sched: [4:1.00] +; SLM-NEXT: pmullw (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmullw: +; SANDY: # BB#0: +; SANDY-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmullw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmullw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = mul <8 x i16> %a0, %a1 + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = mul <8 x i16> %1, %2 + ret <8 x i16> %3 +} + +define <2 x i64> @test_pmuludq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_pmuludq: +; GENERIC: # BB#0: +; GENERIC-NEXT: pmuludq %xmm1, %xmm0 +; GENERIC-NEXT: pmuludq (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pmuludq: +; ATOM: # BB#0: +; ATOM-NEXT: pmuludq %xmm1, %xmm0 +; ATOM-NEXT: pmuludq (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pmuludq: +; SLM: # BB#0: +; SLM-NEXT: pmuludq %xmm1, %xmm0 # sched: [4:1.00] +; SLM-NEXT: pmuludq (%rdi), %xmm0 # sched: [7:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pmuludq: +; SANDY: # BB#0: +; SANDY-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pmuludq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pmuludq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) + %2 = bitcast <2 x i64> %1 to <4 x i32> + %3 = load <4 x i32>, <4 x i32> *%a2, align 16 + %4 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %2, <4 x i32> %3) + ret <2 x i64> %4 +} +declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone + +define <2 x i64> @test_por(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_por: +; GENERIC: # BB#0: +; GENERIC-NEXT: por %xmm1, %xmm0 +; GENERIC-NEXT: por (%rdi), %xmm0 +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_por: +; ATOM: # BB#0: +; ATOM-NEXT: por %xmm1, %xmm0 +; ATOM-NEXT: por (%rdi), %xmm0 +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_por: +; SLM: # BB#0: +; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: por (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_por: +; SANDY: # BB#0: +; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_por: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_por: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = or <2 x i64> %a0, %a1 + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = or <2 x i64> %1, %2 + %4 = add <2 x i64> %3, %a1 + ret <2 x i64> %4 +} + +define <2 x i64> @test_psadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_psadbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: psadbw %xmm1, %xmm0 +; GENERIC-NEXT: psadbw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psadbw: +; ATOM: # BB#0: +; ATOM-NEXT: psadbw %xmm1, %xmm0 +; ATOM-NEXT: psadbw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psadbw: +; SLM: # BB#0: +; SLM-NEXT: psadbw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psadbw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psadbw: +; SANDY: # BB#0: +; SANDY-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; SANDY-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psadbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psadbw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; BTVER2-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1) + %2 = bitcast <2 x i64> %1 to <16 x i8> + %3 = load <16 x i8>, <16 x i8> *%a2, align 16 + %4 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %2, <16 x i8> %3) + ret <2 x i64> %4 +} +declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone + +define <4 x i32> @test_pshufd(<4 x i32> %a0, <4 x i32> *%a1) { +; GENERIC-LABEL: test_pshufd: +; GENERIC: # BB#0: +; GENERIC-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] +; GENERIC-NEXT: pshufd {{.*#+}} xmm0 = mem[3,2,1,0] +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pshufd: +; ATOM: # BB#0: +; ATOM-NEXT: pshufd {{.*#+}} xmm1 = mem[3,2,1,0] +; ATOM-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] +; ATOM-NEXT: paddd %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pshufd: +; SLM: # BB#0: +; SLM-NEXT: pshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [4:1.00] +; SLM-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00] +; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: movdqa %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pshufd: +; SANDY: # BB#0: +; SANDY-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.50] +; SANDY-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [5:0.50] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pshufd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00] +; HASWELL-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [5:1.00] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pshufd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00] +; BTVER2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.50] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2> + %2 = load <4 x i32>, <4 x i32> *%a1, align 16 + %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> + %4 = add <4 x i32> %1, %3 + ret <4 x i32> %4 +} + +define <8 x i16> @test_pshufhw(<8 x i16> %a0, <8 x i16> *%a1) { +; GENERIC-LABEL: test_pshufhw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,4,7,6] +; GENERIC-NEXT: pshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,6,5,4] +; GENERIC-NEXT: paddw %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pshufhw: +; ATOM: # BB#0: +; ATOM-NEXT: pshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] +; ATOM-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] +; ATOM-NEXT: paddw %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pshufhw: +; SLM: # BB#0: +; SLM-NEXT: pshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [4:1.00] +; SLM-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00] +; SLM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: movdqa %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pshufhw: +; SANDY: # BB#0: +; SANDY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.50] +; SANDY-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [5:0.50] +; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pshufhw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00] +; HASWELL-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [5:1.00] +; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pshufhw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [6:1.00] +; BTVER2-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.50] +; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6> + %2 = load <8 x i16>, <8 x i16> *%a1, align 16 + %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4> + %4 = add <8 x i16> %1, %3 + ret <8 x i16> %4 +} + +define <8 x i16> @test_pshuflw(<8 x i16> %a0, <8 x i16> *%a1) { +; GENERIC-LABEL: test_pshuflw: +; GENERIC: # BB#0: +; GENERIC-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[1,0,3,2,4,5,6,7] +; GENERIC-NEXT: pshuflw {{.*#+}} xmm0 = mem[3,2,1,0,4,5,6,7] +; GENERIC-NEXT: paddw %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pshuflw: +; ATOM: # BB#0: +; ATOM-NEXT: pshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] +; ATOM-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] +; ATOM-NEXT: paddw %xmm0, %xmm1 +; ATOM-NEXT: movdqa %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pshuflw: +; SLM: # BB#0: +; SLM-NEXT: pshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [4:1.00] +; SLM-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00] +; SLM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50] +; SLM-NEXT: movdqa %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pshuflw: +; SANDY: # BB#0: +; SANDY-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.50] +; SANDY-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [5:0.50] +; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pshuflw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00] +; HASWELL-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [5:1.00] +; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pshuflw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [6:1.00] +; BTVER2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.50] +; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7> + %2 = load <8 x i16>, <8 x i16> *%a1, align 16 + %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7> + %4 = add <8 x i16> %1, %3 + ret <8 x i16> %4 +} + +define <4 x i32> @test_pslld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_pslld: +; GENERIC: # BB#0: +; GENERIC-NEXT: pslld %xmm1, %xmm0 +; GENERIC-NEXT: pslld (%rdi), %xmm0 +; GENERIC-NEXT: pslld $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pslld: +; ATOM: # BB#0: +; ATOM-NEXT: pslld %xmm1, %xmm0 +; ATOM-NEXT: pslld (%rdi), %xmm0 +; ATOM-NEXT: pslld $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pslld: +; SLM: # BB#0: +; SLM-NEXT: pslld %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: pslld (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: pslld $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pslld: +; SANDY: # BB#0: +; SANDY-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pslld: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pslld: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %1, <4 x i32> %2) + %4 = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %3, i32 2) + ret <4 x i32> %4 +} +declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone + +define <4 x i32> @test_pslldq(<4 x i32> %a0) { +; GENERIC-LABEL: test_pslldq: +; GENERIC: # BB#0: +; GENERIC-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pslldq: +; ATOM: # BB#0: +; ATOM-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pslldq: +; SLM: # BB#0: +; SLM-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pslldq: +; SANDY: # BB#0: +; SANDY-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pslldq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pslldq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 1, i32 2> + ret <4 x i32> %1 +} + +define <2 x i64> @test_psllq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_psllq: +; GENERIC: # BB#0: +; GENERIC-NEXT: psllq %xmm1, %xmm0 +; GENERIC-NEXT: psllq (%rdi), %xmm0 +; GENERIC-NEXT: psllq $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psllq: +; ATOM: # BB#0: +; ATOM-NEXT: psllq %xmm1, %xmm0 +; ATOM-NEXT: psllq (%rdi), %xmm0 +; ATOM-NEXT: psllq $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psllq: +; SLM: # BB#0: +; SLM-NEXT: psllq %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: psllq (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: psllq $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psllq: +; SANDY: # BB#0: +; SANDY-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psllq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psllq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1) + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %1, <2 x i64> %2) + %4 = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %3, i32 2) + ret <2 x i64> %4 +} +declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone + +define <8 x i16> @test_psllw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psllw: +; GENERIC: # BB#0: +; GENERIC-NEXT: psllw %xmm1, %xmm0 +; GENERIC-NEXT: psllw (%rdi), %xmm0 +; GENERIC-NEXT: psllw $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psllw: +; ATOM: # BB#0: +; ATOM-NEXT: psllw %xmm1, %xmm0 +; ATOM-NEXT: psllw (%rdi), %xmm0 +; ATOM-NEXT: psllw $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psllw: +; SLM: # BB#0: +; SLM-NEXT: psllw %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: psllw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: psllw $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psllw: +; SANDY: # BB#0: +; SANDY-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psllw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psllw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %1, <8 x i16> %2) + %4 = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %3, i32 2) + ret <8 x i16> %4 +} +declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone +declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone + +define <4 x i32> @test_psrad(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psrad: +; GENERIC: # BB#0: +; GENERIC-NEXT: psrad %xmm1, %xmm0 +; GENERIC-NEXT: psrad (%rdi), %xmm0 +; GENERIC-NEXT: psrad $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psrad: +; ATOM: # BB#0: +; ATOM-NEXT: psrad %xmm1, %xmm0 +; ATOM-NEXT: psrad (%rdi), %xmm0 +; ATOM-NEXT: psrad $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psrad: +; SLM: # BB#0: +; SLM-NEXT: psrad %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: psrad (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: psrad $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psrad: +; SANDY: # BB#0: +; SANDY-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psrad: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psrad: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> %2) + %4 = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %3, i32 2) + ret <4 x i32> %4 +} +declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone + +define <8 x i16> @test_psraw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psraw: +; GENERIC: # BB#0: +; GENERIC-NEXT: psraw %xmm1, %xmm0 +; GENERIC-NEXT: psraw (%rdi), %xmm0 +; GENERIC-NEXT: psraw $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psraw: +; ATOM: # BB#0: +; ATOM-NEXT: psraw %xmm1, %xmm0 +; ATOM-NEXT: psraw (%rdi), %xmm0 +; ATOM-NEXT: psraw $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psraw: +; SLM: # BB#0: +; SLM-NEXT: psraw %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: psraw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: psraw $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psraw: +; SANDY: # BB#0: +; SANDY-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psraw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psraw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> %2) + %4 = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %3, i32 2) + ret <8 x i16> %4 +} +declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone +declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone + +define <4 x i32> @test_psrld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psrld: +; GENERIC: # BB#0: +; GENERIC-NEXT: psrld %xmm1, %xmm0 +; GENERIC-NEXT: psrld (%rdi), %xmm0 +; GENERIC-NEXT: psrld $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psrld: +; ATOM: # BB#0: +; ATOM-NEXT: psrld %xmm1, %xmm0 +; ATOM-NEXT: psrld (%rdi), %xmm0 +; ATOM-NEXT: psrld $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psrld: +; SLM: # BB#0: +; SLM-NEXT: psrld %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: psrld (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: psrld $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psrld: +; SANDY: # BB#0: +; SANDY-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psrld: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psrld: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %1, <4 x i32> %2) + %4 = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %3, i32 2) + ret <4 x i32> %4 +} +declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone + +define <4 x i32> @test_psrldq(<4 x i32> %a0) { +; GENERIC-LABEL: test_psrldq: +; GENERIC: # BB#0: +; GENERIC-NEXT: psrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psrldq: +; ATOM: # BB#0: +; ATOM-NEXT: psrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psrldq: +; SLM: # BB#0: +; SLM-NEXT: psrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psrldq: +; SANDY: # BB#0: +; SANDY-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psrldq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psrldq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + ret <4 x i32> %1 +} + +define <2 x i64> @test_psrlq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_psrlq: +; GENERIC: # BB#0: +; GENERIC-NEXT: psrlq %xmm1, %xmm0 +; GENERIC-NEXT: psrlq (%rdi), %xmm0 +; GENERIC-NEXT: psrlq $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psrlq: +; ATOM: # BB#0: +; ATOM-NEXT: psrlq %xmm1, %xmm0 +; ATOM-NEXT: psrlq (%rdi), %xmm0 +; ATOM-NEXT: psrlq $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psrlq: +; SLM: # BB#0: +; SLM-NEXT: psrlq %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: psrlq (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: psrlq $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psrlq: +; SANDY: # BB#0: +; SANDY-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psrlq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psrlq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %1, <2 x i64> %2) + %4 = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %3, i32 2) + ret <2 x i64> %4 +} +declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone + +define <8 x i16> @test_psrlw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psrlw: +; GENERIC: # BB#0: +; GENERIC-NEXT: psrlw %xmm1, %xmm0 +; GENERIC-NEXT: psrlw (%rdi), %xmm0 +; GENERIC-NEXT: psrlw $2, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psrlw: +; ATOM: # BB#0: +; ATOM-NEXT: psrlw %xmm1, %xmm0 +; ATOM-NEXT: psrlw (%rdi), %xmm0 +; ATOM-NEXT: psrlw $2, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psrlw: +; SLM: # BB#0: +; SLM-NEXT: psrlw %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: psrlw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: psrlw $2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psrlw: +; SANDY: # BB#0: +; SANDY-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psrlw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [2:1.00] +; HASWELL-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psrlw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %1, <8 x i16> %2) + %4 = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %3, i32 2) + ret <8 x i16> %4 +} +declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone +declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone + +define <16 x i8> @test_psubb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_psubb: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubb %xmm1, %xmm0 +; GENERIC-NEXT: psubb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubb: +; ATOM: # BB#0: +; ATOM-NEXT: psubb %xmm1, %xmm0 +; ATOM-NEXT: psubb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubb: +; SLM: # BB#0: +; SLM-NEXT: psubb %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubb: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sub <16 x i8> %a0, %a1 + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = sub <16 x i8> %1, %2 + ret <16 x i8> %3 +} + +define <4 x i32> @test_psubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psubd: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubd %xmm1, %xmm0 +; GENERIC-NEXT: psubd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubd: +; ATOM: # BB#0: +; ATOM-NEXT: psubd %xmm1, %xmm0 +; ATOM-NEXT: psubd (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubd: +; SLM: # BB#0: +; SLM-NEXT: psubd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubd: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sub <4 x i32> %a0, %a1 + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = sub <4 x i32> %1, %2 + ret <4 x i32> %3 +} + +define <2 x i64> @test_psubq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_psubq: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubq %xmm1, %xmm0 +; GENERIC-NEXT: psubq (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubq: +; ATOM: # BB#0: +; ATOM-NEXT: psubq %xmm1, %xmm0 +; ATOM-NEXT: psubq (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubq: +; SLM: # BB#0: +; SLM-NEXT: psubq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubq (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubq: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sub <2 x i64> %a0, %a1 + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = sub <2 x i64> %1, %2 + ret <2 x i64> %3 +} + +define <16 x i8> @test_psubsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_psubsb: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubsb %xmm1, %xmm0 +; GENERIC-NEXT: psubsb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubsb: +; ATOM: # BB#0: +; ATOM-NEXT: psubsb %xmm1, %xmm0 +; ATOM-NEXT: psubsb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubsb: +; SLM: # BB#0: +; SLM-NEXT: psubsb %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubsb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubsb: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubsb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1) + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +} +declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone + +define <8 x i16> @test_psubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psubsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubsw %xmm1, %xmm0 +; GENERIC-NEXT: psubsw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubsw: +; ATOM: # BB#0: +; ATOM-NEXT: psubsw %xmm1, %xmm0 +; ATOM-NEXT: psubsw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubsw: +; SLM: # BB#0: +; SLM-NEXT: psubsw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubsw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubsw: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubsw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <16 x i8> @test_psubusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_psubusb: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubusb %xmm1, %xmm0 +; GENERIC-NEXT: psubusb (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubusb: +; ATOM: # BB#0: +; ATOM-NEXT: psubusb %xmm1, %xmm0 +; ATOM-NEXT: psubusb (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubusb: +; SLM: # BB#0: +; SLM-NEXT: psubusb %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubusb (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubusb: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubusb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubusb: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1) + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +} +declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone + +define <8 x i16> @test_psubusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psubusw: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubusw %xmm1, %xmm0 +; GENERIC-NEXT: psubusw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubusw: +; ATOM: # BB#0: +; ATOM-NEXT: psubusw %xmm1, %xmm0 +; ATOM-NEXT: psubusw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubusw: +; SLM: # BB#0: +; SLM-NEXT: psubusw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubusw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubusw: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubusw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubusw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %1, <8 x i16> %2) + ret <8 x i16> %3 +} +declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone + +define <8 x i16> @test_psubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psubw: +; GENERIC: # BB#0: +; GENERIC-NEXT: psubw %xmm1, %xmm0 +; GENERIC-NEXT: psubw (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_psubw: +; ATOM: # BB#0: +; ATOM-NEXT: psubw %xmm1, %xmm0 +; ATOM-NEXT: psubw (%rdi), %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_psubw: +; SLM: # BB#0: +; SLM-NEXT: psubw %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: psubw (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_psubw: +; SANDY: # BB#0: +; SANDY-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_psubw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_psubw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = sub <8 x i16> %a0, %a1 + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = sub <8 x i16> %1, %2 + ret <8 x i16> %3 +} + +define <16 x i8> @test_punpckhbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_punpckhbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; GENERIC-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpckhbw: +; ATOM: # BB#0: +; ATOM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; ATOM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpckhbw: +; SLM: # BB#0: +; SLM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00] +; SLM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpckhbw: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.50] +; SANDY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpckhbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpckhbw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.50] +; BTVER2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> + ret <16 x i8> %3 +} + +define <4 x i32> @test_punpckhdq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_punpckhdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; GENERIC-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpckhdq: +; ATOM: # BB#0: +; ATOM-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; ATOM-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] +; ATOM-NEXT: paddd %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpckhdq: +; SLM: # BB#0: +; SLM-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] +; SLM-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [4:1.00] +; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpckhdq: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; SANDY-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [5:0.50] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpckhdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [5:1.00] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpckhdq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; BTVER2-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [6:1.00] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7> + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> + %4 = add <4 x i32> %1, %3 + ret <4 x i32> %4 +} + +define <2 x i64> @test_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_punpckhqdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; GENERIC-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpckhqdq: +; ATOM: # BB#0: +; ATOM-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; ATOM-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpckhqdq: +; SLM: # BB#0: +; SLM-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00] +; SLM-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [4:1.00] +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpckhqdq: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50] +; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [5:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpckhqdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [5:1.00] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpckhqdq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3> + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 1, i32 3> + %4 = add <2 x i64> %1, %3 + ret <2 x i64> %4 +} + +define <8 x i16> @test_punpckhwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_punpckhwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; GENERIC-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpckhwd: +; ATOM: # BB#0: +; ATOM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; ATOM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpckhwd: +; SLM: # BB#0: +; SLM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00] +; SLM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpckhwd: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50] +; SANDY-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpckhwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpckhwd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50] +; BTVER2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> + ret <8 x i16> %3 +} + +define <16 x i8> @test_punpcklbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) { +; GENERIC-LABEL: test_punpcklbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; GENERIC-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpcklbw: +; ATOM: # BB#0: +; ATOM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; ATOM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpcklbw: +; SLM: # BB#0: +; SLM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00] +; SLM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpcklbw: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50] +; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpcklbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00] +; HASWELL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpcklbw: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50] +; BTVER2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23> + %2 = load <16 x i8>, <16 x i8> *%a2, align 16 + %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23> + ret <16 x i8> %3 +} + +define <4 x i32> @test_punpckldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_punpckldq: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; GENERIC-NEXT: paddd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpckldq: +; ATOM: # BB#0: +; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; ATOM-NEXT: paddd %xmm1, %xmm0 +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpckldq: +; SLM: # BB#0: +; SLM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00] +; SLM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [4:1.00] +; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpckldq: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] +; SANDY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [5:0.50] +; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpckldq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00] +; HASWELL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [5:1.00] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpckldq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [6:1.00] +; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5> + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5> + %4 = add <4 x i32> %1, %3 + ret <4 x i32> %4 +} + +define <2 x i64> @test_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_punpcklqdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpcklqdq: +; ATOM: # BB#0: +; ATOM-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; ATOM-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpcklqdq: +; SLM: # BB#0: +; SLM-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; SLM-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00] +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpcklqdq: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; SANDY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpcklqdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; HASWELL-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [5:1.00] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpcklqdq: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2> + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 0, i32 2> + %4 = add <2 x i64> %1, %3 + ret <2 x i64> %4 +} + +define <8 x i16> @test_punpcklwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_punpcklwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; GENERIC-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_punpcklwd: +; ATOM: # BB#0: +; ATOM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; ATOM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: nop +; ATOM-NEXT: retq +; +; SLM-LABEL: test_punpcklwd: +; SLM: # BB#0: +; SLM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] +; SLM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [4:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_punpcklwd: +; SANDY: # BB#0: +; SANDY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; SANDY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [5:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_punpcklwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] +; HASWELL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_punpcklwd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; BTVER2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> + ret <8 x i16> %3 +} + +define <2 x i64> @test_pxor(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_pxor: +; GENERIC: # BB#0: +; GENERIC-NEXT: pxor %xmm1, %xmm0 +; GENERIC-NEXT: pxor (%rdi), %xmm0 +; GENERIC-NEXT: paddq %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_pxor: +; ATOM: # BB#0: +; ATOM-NEXT: pxor %xmm1, %xmm0 +; ATOM-NEXT: pxor (%rdi), %xmm0 +; ATOM-NEXT: paddq %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_pxor: +; SLM: # BB#0: +; SLM-NEXT: pxor %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: pxor (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_pxor: +; SANDY: # BB#0: +; SANDY-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_pxor: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; HASWELL-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_pxor: +; BTVER2: # BB#0: +; BTVER2-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = xor <2 x i64> %a0, %a1 + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = xor <2 x i64> %1, %2 + %4 = add <2 x i64> %3, %a1 + ret <2 x i64> %4 +} + +define <2 x double> @test_shufpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_shufpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] +; GENERIC-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],mem[0] +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_shufpd: +; ATOM: # BB#0: +; ATOM-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] +; ATOM-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],mem[0] +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_shufpd: +; SLM: # BB#0: +; SLM-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00] +; SLM-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [4:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_shufpd: +; SANDY: # BB#0: +; SANDY-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00] +; SANDY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [5:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_shufpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00] +; HASWELL-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_shufpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2> + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 2> + %4 = fadd <2 x double> %1, %3 + ret <2 x double> %4 +} + +define <2 x double> @test_sqrtpd(<2 x double> %a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_sqrtpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: sqrtpd %xmm0, %xmm1 +; GENERIC-NEXT: sqrtpd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_sqrtpd: +; ATOM: # BB#0: +; ATOM-NEXT: sqrtpd %xmm0, %xmm1 +; ATOM-NEXT: sqrtpd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_sqrtpd: +; SLM: # BB#0: +; SLM-NEXT: sqrtpd (%rdi), %xmm1 # sched: [18:1.00] +; SLM-NEXT: sqrtpd %xmm0, %xmm0 # sched: [15:1.00] +; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_sqrtpd: +; SANDY: # BB#0: +; SANDY-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [15:1.00] +; SANDY-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [19:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_sqrtpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [15:1.00] +; HASWELL-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [19:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_sqrtpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [26:21.00] +; BTVER2-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [21:21.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0) + %2 = load <2 x double>, <2 x double> *%a1, align 16 + %3 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %2) + %4 = fadd <2 x double> %1, %3 + ret <2 x double> %4 +} +declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone + +; TODO - sqrtsd_m + +define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) { +; GENERIC-LABEL: test_sqrtsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: sqrtsd %xmm0, %xmm0 +; GENERIC-NEXT: movapd (%rdi), %xmm1 +; GENERIC-NEXT: sqrtsd %xmm1, %xmm1 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_sqrtsd: +; ATOM: # BB#0: +; ATOM-NEXT: movapd (%rdi), %xmm1 +; ATOM-NEXT: sqrtsd %xmm0, %xmm0 +; ATOM-NEXT: sqrtsd %xmm1, %xmm1 +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_sqrtsd: +; SLM: # BB#0: +; SLM-NEXT: movapd (%rdi), %xmm1 # sched: [3:1.00] +; SLM-NEXT: sqrtsd %xmm0, %xmm0 # sched: [18:1.00] +; SLM-NEXT: sqrtsd %xmm1, %xmm1 # sched: [18:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_sqrtsd: +; SANDY: # BB#0: +; SANDY-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [19:1.00] +; SANDY-NEXT: vmovapd (%rdi), %xmm1 # sched: [4:0.50] +; SANDY-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [19:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_sqrtsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [19:1.00] +; HASWELL-NEXT: vmovapd (%rdi), %xmm1 # sched: [4:0.50] +; HASWELL-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [19:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_sqrtsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vmovapd (%rdi), %xmm1 # sched: [5:1.00] +; BTVER2-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [26:21.00] +; BTVER2-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [26:21.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) + %2 = load <2 x double>, <2 x double> *%a1, align 16 + %3 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %2) + %4 = fadd <2 x double> %1, %3 + ret <2 x double> %4 +} +declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone + +define <2 x double> @test_subpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_subpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: subpd %xmm1, %xmm0 +; GENERIC-NEXT: subpd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_subpd: +; ATOM: # BB#0: +; ATOM-NEXT: subpd %xmm1, %xmm0 +; ATOM-NEXT: subpd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_subpd: +; SLM: # BB#0: +; SLM-NEXT: subpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: subpd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_subpd: +; SANDY: # BB#0: +; SANDY-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_subpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_subpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fsub <2 x double> %a0, %a1 + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = fsub <2 x double> %1, %2 + ret <2 x double> %3 +} + +define double @test_subsd(double %a0, double %a1, double *%a2) { +; GENERIC-LABEL: test_subsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: subsd %xmm1, %xmm0 +; GENERIC-NEXT: subsd (%rdi), %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_subsd: +; ATOM: # BB#0: +; ATOM-NEXT: subsd %xmm1, %xmm0 +; ATOM-NEXT: subsd (%rdi), %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_subsd: +; SLM: # BB#0: +; SLM-NEXT: subsd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: subsd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_subsd: +; SANDY: # BB#0: +; SANDY-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_subsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_subsd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = fsub double %a0, %a1 + %2 = load double, double *%a2, align 8 + %3 = fsub double %1, %2 + ret double %3 +} + +define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_ucomisd: +; GENERIC: # BB#0: +; GENERIC-NEXT: ucomisd %xmm1, %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %cl +; GENERIC-NEXT: andb %al, %cl +; GENERIC-NEXT: ucomisd (%rdi), %xmm0 +; GENERIC-NEXT: setnp %al +; GENERIC-NEXT: sete %dl +; GENERIC-NEXT: andb %al, %dl +; GENERIC-NEXT: orb %cl, %dl +; GENERIC-NEXT: movzbl %dl, %eax +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_ucomisd: +; ATOM: # BB#0: +; ATOM-NEXT: ucomisd %xmm1, %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %cl +; ATOM-NEXT: andb %al, %cl +; ATOM-NEXT: ucomisd (%rdi), %xmm0 +; ATOM-NEXT: setnp %al +; ATOM-NEXT: sete %dl +; ATOM-NEXT: andb %al, %dl +; ATOM-NEXT: orb %cl, %dl +; ATOM-NEXT: movzbl %dl, %eax +; ATOM-NEXT: retq +; +; SLM-LABEL: test_ucomisd: +; SLM: # BB#0: +; SLM-NEXT: ucomisd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %cl # sched: [1:0.50] +; SLM-NEXT: andb %al, %cl # sched: [1:0.50] +; SLM-NEXT: ucomisd (%rdi), %xmm0 # sched: [6:1.00] +; SLM-NEXT: setnp %al # sched: [1:0.50] +; SLM-NEXT: sete %dl # sched: [1:0.50] +; SLM-NEXT: andb %al, %dl # sched: [1:0.50] +; SLM-NEXT: orb %cl, %dl # sched: [1:0.50] +; SLM-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_ucomisd: +; SANDY: # BB#0: +; SANDY-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %cl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %cl # sched: [1:0.33] +; SANDY-NEXT: vucomisd (%rdi), %xmm0 # sched: [7:1.00] +; SANDY-NEXT: setnp %al # sched: [1:0.33] +; SANDY-NEXT: sete %dl # sched: [1:0.33] +; SANDY-NEXT: andb %al, %dl # sched: [1:0.33] +; SANDY-NEXT: orb %cl, %dl # sched: [1:0.33] +; SANDY-NEXT: movzbl %dl, %eax # sched: [1:0.33] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_ucomisd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %cl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %cl # sched: [1:0.25] +; HASWELL-NEXT: vucomisd (%rdi), %xmm0 # sched: [7:1.00] +; HASWELL-NEXT: setnp %al # sched: [1:0.50] +; HASWELL-NEXT: sete %dl # sched: [1:0.50] +; HASWELL-NEXT: andb %al, %dl # sched: [1:0.25] +; HASWELL-NEXT: orb %cl, %dl # sched: [1:0.25] +; HASWELL-NEXT: movzbl %dl, %eax # sched: [1:0.25] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_ucomisd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %cl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %cl # sched: [1:0.50] +; BTVER2-NEXT: vucomisd (%rdi), %xmm0 # sched: [8:1.00] +; BTVER2-NEXT: setnp %al # sched: [1:0.50] +; BTVER2-NEXT: sete %dl # sched: [1:0.50] +; BTVER2-NEXT: andb %al, %dl # sched: [1:0.50] +; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50] +; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1) + %2 = load <2 x double>, <2 x double> *%a2, align 8 + %3 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %2) + %4 = or i32 %1, %3 + ret i32 %4 +} +declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_unpckhpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_unpckhpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; GENERIC-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_unpckhpd: +; ATOM: # BB#0: +; ATOM-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; ATOM-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_unpckhpd: +; SLM: # BB#0: +; SLM-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00] +; SLM-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [4:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_unpckhpd: +; SANDY: # BB#0: +; SANDY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00] +; SANDY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [5:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_unpckhpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00] +; HASWELL-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_unpckhpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3> + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 3> + %4 = fadd <2 x double> %1, %3 + ret <2 x double> %4 +} + +define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_unpcklpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; GENERIC-NEXT: movapd %xmm0, %xmm1 +; GENERIC-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; GENERIC-NEXT: addpd %xmm0, %xmm1 +; GENERIC-NEXT: movapd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_unpcklpd: +; ATOM: # BB#0: +; ATOM-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; ATOM-NEXT: movapd %xmm0, %xmm1 +; ATOM-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; ATOM-NEXT: addpd %xmm0, %xmm1 +; ATOM-NEXT: movapd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_unpcklpd: +; SLM: # BB#0: +; SLM-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; SLM-NEXT: movapd %xmm0, %xmm1 # sched: [1:1.00] +; SLM-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00] +; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00] +; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_unpcklpd: +; SANDY: # BB#0: +; SANDY-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; SANDY-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [5:1.00] +; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_unpcklpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] +; HASWELL-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_unpcklpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2> + %2 = load <2 x double>, <2 x double> *%a2, align 16 + %3 = shufflevector <2 x double> %1, <2 x double> %2, <2 x i32> <i32 0, i32 2> + %4 = fadd <2 x double> %1, %3 + ret <2 x double> %4 +} + +define <2 x double> @test_xorpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { +; GENERIC-LABEL: test_xorpd: +; GENERIC: # BB#0: +; GENERIC-NEXT: xorpd %xmm1, %xmm0 +; GENERIC-NEXT: xorpd (%rdi), %xmm0 +; GENERIC-NEXT: addpd %xmm1, %xmm0 +; GENERIC-NEXT: retq +; +; ATOM-LABEL: test_xorpd: +; ATOM: # BB#0: +; ATOM-NEXT: xorpd %xmm1, %xmm0 +; ATOM-NEXT: xorpd (%rdi), %xmm0 +; ATOM-NEXT: addpd %xmm1, %xmm0 +; ATOM-NEXT: retq +; +; SLM-LABEL: test_xorpd: +; SLM: # BB#0: +; SLM-NEXT: xorpd %xmm1, %xmm0 # sched: [1:0.50] +; SLM-NEXT: xorpd (%rdi), %xmm0 # sched: [4:1.00] +; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00] +; SLM-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: test_xorpd: +; SANDY: # BB#0: +; SANDY-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33] +; SANDY-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50] +; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [5:1.00] +; +; HASWELL-LABEL: test_xorpd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; BTVER2-LABEL: test_xorpd: +; BTVER2: # BB#0: +; BTVER2-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; BTVER2-NEXT: retq # sched: [4:1.00] + %1 = bitcast <2 x double> %a0 to <4 x i32> + %2 = bitcast <2 x double> %a1 to <4 x i32> + %3 = xor <4 x i32> %1, %2 + %4 = load <2 x double>, <2 x double> *%a2, align 16 + %5 = bitcast <2 x double> %4 to <4 x i32> + %6 = xor <4 x i32> %3, %5 + %7 = bitcast <4 x i32> %6 to <2 x double> + %8 = fadd <2 x double> %a1, %7 + ret <2 x double> %8 +} + +!0 = !{i32 1} diff --git a/test/CodeGen/X86/tail-merge-after-mbp.ll b/test/CodeGen/X86/tail-merge-after-mbp.ll deleted file mode 100644 index dc5f3a12bd91f..0000000000000 --- a/test/CodeGen/X86/tail-merge-after-mbp.ll +++ /dev/null @@ -1,94 +0,0 @@ -; RUN: llc -mtriple=x86_64-linux -o - %s | FileCheck %s - -%0 = type { %1, %3* } -%1 = type { %2* } -%2 = type { %2*, i8* } -%3 = type { i32, i32 (i32, i32)* } - - -declare i32 @Up(...) -declare i32 @f(i32, i32) - -; check loop block_14 is not merged with block_21 -; check loop block_11 is not merged with block_18, block_25 -define i32 @foo(%0* nocapture readonly, i32, i1 %c, i8* %p1, %2** %p2) { -; CHECK-LABEL: foo: -; CHECK: # %block_11 -; CHECK-NEXT: movq (%r14), %rax -; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: je -; CHECK-NEXT:# %block_14 -; CHECK-NEXT: cmpq $0, 8(%rax) -; CHECK-NEXT: jne -; CHECK-NEXT:# %block_18 -; CHECK-NEXT: movq (%r14), %rax -; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: je -; CHECK-NEXT:# %block_21 -; CHECK-NEXT:# =>This Inner Loop Header -; CHECK-NEXT: cmpq $0, 8(%rax) -; CHECK-NEXT: jne -; CHECK-NEXT:# %block_25 -; CHECK-NEXT:# in Loop -; CHECK-NEXT: movq (%r14), %rax -; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: jne - br i1 %c, label %block_34, label %block_3 - -block_3: ; preds = %2 - br i1 %c, label %block_7, label %block_4 - -block_4: ; preds = %block_3 - %a5 = tail call i32 @f(i32 undef, i32 undef) - %a6 = icmp eq i32 %a5, 0 - br i1 %a6, label %block_7, label %block_34 - -block_7: ; preds = %block_4, %block_3 - %a8 = icmp eq %2* null, null - br i1 %a8, label %block_34, label %block_9 - -block_9: ; preds = %block_7 - %a10 = icmp eq i8* %p1, null - br i1 %a10, label %block_11, label %block_32 - -block_11: ; preds = %block_9 - %a12 = load %2*, %2** %p2, align 8 - %a13 = icmp eq %2* %a12, null - br i1 %a13, label %block_34, label %block_14 - -block_14: ; preds = %block_11 - %a15 = getelementptr inbounds %2, %2* %a12, i64 0, i32 1 - %a16 = load i8*, i8** %a15, align 8 - %a17 = icmp eq i8* %a16, null - br i1 %a17, label %block_18, label %block_32 - -block_18: ; preds = %block_14 - %a19 = load %2*, %2** %p2, align 8 - %a20 = icmp eq %2* %a19, null - br i1 %a20, label %block_34, label %block_21 - -block_21: ; preds = %block_18 - %a22 = getelementptr inbounds %2, %2* %a19, i64 0, i32 1 - %a23 = load i8*, i8** %a22, align 8 - %a24 = icmp eq i8* %a23, null - br i1 %a24, label %block_25, label %block_32 - -block_25: ; preds = %block_28, %block_21 - %a26 = load %2*, %2** %p2, align 8 - %a27 = icmp eq %2* %a26, null - br i1 %a27, label %block_34, label %block_28 - -block_28: ; preds = %block_25 - %a29 = getelementptr inbounds %2, %2* %a26, i64 0, i32 1 - %a30 = load i8*, i8** %a29, align 8 - %a31 = icmp eq i8* %a30, null - br i1 %a31, label %block_25, label %block_32 - -block_32: ; preds = %block_28, %block_21, %block_14, %block_9 - %a33 = tail call i32 (...) @Up() - br label %block_34 - -block_34: ; preds = %block_32, %block_25, %block_18, %block_11, %block_7, %block_4, %2 - %a35 = phi i32 [ 0, %2 ], [ %a5, %block_4 ], [ 0, %block_7 ], [ 0, %block_11 ], [ 0, %block_32 ], [ 0, %block_18 ], [ 0, %block_25 ] - ret i32 %a35 -} diff --git a/test/CodeGen/X86/tail-merge-after-mbp.mir b/test/CodeGen/X86/tail-merge-after-mbp.mir new file mode 100644 index 0000000000000..d1dc65336948a --- /dev/null +++ b/test/CodeGen/X86/tail-merge-after-mbp.mir @@ -0,0 +1,105 @@ +# RUN: llc -mtriple=x86_64-linux -run-pass=block-placement -o - %s | FileCheck %s + +--- +# check loop bb.7 is not merged with bb.10, bb.13 +# check loop bb.9 is not merged with bb.12 +# CHECK: bb.2: +# CHECK-NEXT: successors: %bb.9(0x30000000), %bb.3(0x50000000) +# CHECK: %rax = MOV64rm %r14, 1, _, 0, _ +# CHECK-NEXT: TEST64rr %rax, %rax +# CHECK-NEXT: JE_1 %bb.9 +# CHECK: bb.3: +# CHECK-NEXT: successors: %bb.4(0x30000000), %bb.8(0x50000000) +# CHECK: CMP64mi8 killed %rax, 1, _, 8, _, 0 +# CHECK-NEXT: JNE_1 %bb.8 +# CHECK: bb.4: +# CHECK-NEXT: successors: %bb.9(0x30000000), %bb.5(0x50000000) +# CHECK: %rax = MOV64rm %r14, 1, _, 0, _ +# CHECK-NEXT: TEST64rr %rax, %rax +# CHECK-NEXT: JE_1 %bb.9 +# CHECK: bb.5 +# CHECK-NEXT: successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab) +# CHECK: CMP64mi8 killed %rax, 1, _, 8, _, 0 +# CHECK-NEXT: JNE_1 %bb.8 +# CHECK: bb.6: +# CHECK-NEXT: successors: %bb.9(0x04000000), %bb.5(0x7c000000) +# CHECK: %rax = MOV64rm %r14, 1, _, 0, _ +# CHECK-NEXT: TEST64rr %rax, %rax +# CHECK-NEXT: JNE_1 %bb.5 + +name: foo +body: | + bb.0: + successors: %bb.1(0x40000000), %bb.7(0x40000000) + + TEST8ri %dl, 1, implicit-def %eflags, implicit killed %edx + JE_1 %bb.7, implicit %eflags + + bb.1: + successors: %bb.16(0x80000000) + + %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + JMP_1 %bb.16 + + bb.7: + successors: %bb.8(0x30000000), %bb.9(0x50000000) + + %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8) + TEST64rr %rax, %rax, implicit-def %eflags + JNE_1 %bb.9, implicit killed %eflags + + bb.8: + successors: %bb.16(0x80000000) + + %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + JMP_1 %bb.16 + + bb.9: + successors: %bb.10(0x30000000), %bb.15(0x50000000) + + CMP64mi8 killed %rax, 1, _, 8, _, 0, implicit-def %eflags :: (load 8) + JNE_1 %bb.15, implicit %eflags + + bb.10: + successors: %bb.11(0x30000000), %bb.12(0x50000000) + + %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8) + TEST64rr %rax, %rax, implicit-def %eflags + JNE_1 %bb.12, implicit %eflags + + bb.11: + successors: %bb.16(0x80000000) + + %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + JMP_1 %bb.16 + + bb.12: + successors: %bb.13(0x71555555), %bb.15(0x0eaaaaab) + + CMP64mi8 killed %rax, 1, _, 8, _, 0, implicit-def %eflags :: (load 8), (load 8) + JNE_1 %bb.15, implicit %eflags + + bb.13: + successors: %bb.14(0x04000000), %bb.12(0x7c000000) + + %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8) + TEST64rr %rax, %rax, implicit-def %eflags + JNE_1 %bb.12, implicit %eflags + + bb.14: + successors: %bb.16(0x80000000) + + %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + JMP_1 %bb.16 + + bb.15: + successors: %bb.16(0x80000000) + + %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + dead %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, implicit-def %al + + bb.16: + + RETQ %eax + +... diff --git a/test/CodeGen/X86/vector-rotate-128.ll b/test/CodeGen/X86/vector-rotate-128.ll index 5eb1a55881e57..852c1f4d3d981 100644 --- a/test/CodeGen/X86/vector-rotate-128.ll +++ b/test/CodeGen/X86/vector-rotate-128.ll @@ -1534,31 +1534,20 @@ define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind { define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind { ; SSE-LABEL: splatconstant_rotate_mask_v2i64: ; SSE: # BB#0: -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psllq $15, %xmm1 ; SSE-NEXT: psrlq $49, %xmm0 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_rotate_mask_v2i64: ; AVX: # BB#0: -; AVX-NEXT: vpsllq $15, %xmm0, %xmm1 ; AVX-NEXT: vpsrlq $49, %xmm0, %xmm0 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: splatconstant_rotate_mask_v2i64: ; AVX512: # BB#0: -; AVX512-NEXT: vpsllq $15, %xmm0, %xmm1 ; AVX512-NEXT: vpsrlq $49, %xmm0, %xmm0 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: retq ; ; XOP-LABEL: splatconstant_rotate_mask_v2i64: diff --git a/test/CodeGen/X86/vector-rotate-256.ll b/test/CodeGen/X86/vector-rotate-256.ll index 3306cd400c1d0..14215e486bf9e 100644 --- a/test/CodeGen/X86/vector-rotate-256.ll +++ b/test/CodeGen/X86/vector-rotate-256.ll @@ -1014,34 +1014,23 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind { define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind { ; AVX1-LABEL: splatconstant_rotate_mask_v4i64: ; AVX1: # BB#0: -; AVX1-NEXT: vpsllq $15, %xmm0, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpsllq $15, %xmm2, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsrlq $49, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlq $49, %xmm2, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpsrlq $49, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 -; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1 -; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: splatconstant_rotate_mask_v4i64: ; AVX2: # BB#0: -; AVX2-NEXT: vpsllq $15, %ymm0, %ymm1 ; AVX2-NEXT: vpsrlq $49, %ymm0, %ymm0 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: splatconstant_rotate_mask_v4i64: ; AVX512: # BB#0: -; AVX512-NEXT: vpsllq $15, %ymm0, %ymm1 ; AVX512-NEXT: vpsrlq $49, %ymm0, %ymm0 ; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX512-NEXT: retq ; ; XOPAVX1-LABEL: splatconstant_rotate_mask_v4i64: diff --git a/test/CodeGen/X86/x86-16.ll b/test/CodeGen/X86/x86-16.ll index 775b2c447bbdb..55b53a8047c51 100644 --- a/test/CodeGen/X86/x86-16.ll +++ b/test/CodeGen/X86/x86-16.ll @@ -12,9 +12,16 @@ define i32 @main() #0 { ; CHECK: .code16 ; CHECK-LABEL: main +define i64 @foo(i32 %index) #0 { + %asm = tail call i64 asm "rdmsr", "=A,{cx},~{dirflag},~{fpsr},~{flags}"(i32 %index) + ret i64 %asm +} + +; CHECK-LABEL: foo +; CHECK: rdmsr attributes #0 = { nounwind } !llvm.ident = !{!0} -!0 = !{!"clang version 3.9.0 (trunk 265439) (llvm/trunk 265567)"}
\ No newline at end of file +!0 = !{!"clang version 3.9.0 (trunk 265439) (llvm/trunk 265567)"} diff --git a/test/DebugInfo/AMDGPU/code-pointer-size.ll b/test/DebugInfo/AMDGPU/code-pointer-size.ll new file mode 100644 index 0000000000000..9b2b0da945e68 --- /dev/null +++ b/test/DebugInfo/AMDGPU/code-pointer-size.ll @@ -0,0 +1,73 @@ +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s + +; LLVM IR generated with the following command and OpenCL source: +; +; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file> +; +; kernel void kernel1(global int *A) { +; *A = 11; +; } +; +; kernel void kernel2(global int *B) { +; *B = 12; +; } + +; Make sure that code pointer size is 8 bytes: +; CHECK: .debug_info contents: +; CHECK: addr_size = 0x08 + +declare void @llvm.dbg.declare(metadata, metadata, metadata) + +define amdgpu_kernel void @kernel1(i32 addrspace(1)* %A) !dbg !7 { +entry: + %A.addr = alloca i32 addrspace(1)*, align 4 + store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4 + call void @llvm.dbg.declare(metadata i32 addrspace(1)** %A.addr, metadata !16, metadata !17), !dbg !18 + %0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !19 + store i32 11, i32 addrspace(1)* %0, align 4, !dbg !20 + ret void, !dbg !21 +} + +define amdgpu_kernel void @kernel2(i32 addrspace(1)* %B) !dbg !22 { +entry: + %B.addr = alloca i32 addrspace(1)*, align 4 + store i32 addrspace(1)* %B, i32 addrspace(1)** %B.addr, align 4 + call void @llvm.dbg.declare(metadata i32 addrspace(1)** %B.addr, metadata !23, metadata !17), !dbg !24 + %0 = load i32 addrspace(1)*, i32 addrspace(1)** %B.addr, align 4, !dbg !25 + store i32 12, i32 addrspace(1)* %0, align 4, !dbg !26 + ret void, !dbg !27 +} + +!llvm.dbg.cu = !{!0} +!opencl.ocl.version = !{!3, !3} +!llvm.module.flags = !{!4, !5} +!llvm.ident = !{!6} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "dwarfdump-relocs.cl", directory: "/some/random/directory") +!2 = !{} +!3 = !{i32 2, i32 0} +!4 = !{i32 2, !"Dwarf Version", i32 2} +!5 = !{i32 2, !"Debug Info Version", i32 3} +!6 = !{!""} +!7 = distinct !DISubprogram(name: "kernel1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!8 = !DISubroutineType(types: !9) +!9 = !{null, !10} +!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !11, size: 64) +!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!12 = !{i32 1} +!13 = !{!"none"} +!14 = !{!"int*"} +!15 = !{!""} +!16 = !DILocalVariable(name: "A", arg: 1, scope: !7, file: !1, line: 1, type: !10) +!17 = !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef) +!18 = !DILocation(line: 1, column: 33, scope: !7) +!19 = !DILocation(line: 2, column: 4, scope: !7) +!20 = !DILocation(line: 2, column: 6, scope: !7) +!21 = !DILocation(line: 3, column: 1, scope: !7) +!22 = distinct !DISubprogram(name: "kernel2", scope: !1, file: !1, line: 5, type: !8, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!23 = !DILocalVariable(name: "B", arg: 1, scope: !22, file: !1, line: 5, type: !10) +!24 = !DILocation(line: 5, column: 33, scope: !22) +!25 = !DILocation(line: 6, column: 4, scope: !22) +!26 = !DILocation(line: 6, column: 6, scope: !22) +!27 = !DILocation(line: 7, column: 1, scope: !22) diff --git a/test/DebugInfo/AMDGPU/dwarfdump-relocs.ll b/test/DebugInfo/AMDGPU/dwarfdump-relocs.ll new file mode 100644 index 0000000000000..9c7e205aa2d08 --- /dev/null +++ b/test/DebugInfo/AMDGPU/dwarfdump-relocs.ll @@ -0,0 +1,72 @@ +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump - 2>&1 | FileCheck %s + +; LLVM IR generated with the following command and OpenCL source: +; +; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file> +; +; kernel void kernel1(global int *A) { +; *A = 11; +; } +; +; kernel void kernel2(global int *B) { +; *B = 12; +; } + +; CHECK-NOT: failed to compute relocation +; CHECK: file_names[ 1] 0 0x00000000 0x00000000 dwarfdump-relocs.cl + +declare void @llvm.dbg.declare(metadata, metadata, metadata) + +define amdgpu_kernel void @kernel1(i32 addrspace(1)* %A) !dbg !7 { +entry: + %A.addr = alloca i32 addrspace(1)*, align 4 + store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4 + call void @llvm.dbg.declare(metadata i32 addrspace(1)** %A.addr, metadata !16, metadata !17), !dbg !18 + %0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !19 + store i32 11, i32 addrspace(1)* %0, align 4, !dbg !20 + ret void, !dbg !21 +} + +define amdgpu_kernel void @kernel2(i32 addrspace(1)* %B) !dbg !22 { +entry: + %B.addr = alloca i32 addrspace(1)*, align 4 + store i32 addrspace(1)* %B, i32 addrspace(1)** %B.addr, align 4 + call void @llvm.dbg.declare(metadata i32 addrspace(1)** %B.addr, metadata !23, metadata !17), !dbg !24 + %0 = load i32 addrspace(1)*, i32 addrspace(1)** %B.addr, align 4, !dbg !25 + store i32 12, i32 addrspace(1)* %0, align 4, !dbg !26 + ret void, !dbg !27 +} + +!llvm.dbg.cu = !{!0} +!opencl.ocl.version = !{!3, !3} +!llvm.module.flags = !{!4, !5} +!llvm.ident = !{!6} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "dwarfdump-relocs.cl", directory: "/some/random/directory") +!2 = !{} +!3 = !{i32 2, i32 0} +!4 = !{i32 2, !"Dwarf Version", i32 2} +!5 = !{i32 2, !"Debug Info Version", i32 3} +!6 = !{!""} +!7 = distinct !DISubprogram(name: "kernel1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!8 = !DISubroutineType(types: !9) +!9 = !{null, !10} +!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !11, size: 64) +!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!12 = !{i32 1} +!13 = !{!"none"} +!14 = !{!"int*"} +!15 = !{!""} +!16 = !DILocalVariable(name: "A", arg: 1, scope: !7, file: !1, line: 1, type: !10) +!17 = !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef) +!18 = !DILocation(line: 1, column: 33, scope: !7) +!19 = !DILocation(line: 2, column: 4, scope: !7) +!20 = !DILocation(line: 2, column: 6, scope: !7) +!21 = !DILocation(line: 3, column: 1, scope: !7) +!22 = distinct !DISubprogram(name: "kernel2", scope: !1, file: !1, line: 5, type: !8, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!23 = !DILocalVariable(name: "B", arg: 1, scope: !22, file: !1, line: 5, type: !10) +!24 = !DILocation(line: 5, column: 33, scope: !22) +!25 = !DILocation(line: 6, column: 4, scope: !22) +!26 = !DILocation(line: 6, column: 6, scope: !22) +!27 = !DILocation(line: 7, column: 1, scope: !22) diff --git a/test/DebugInfo/AMDGPU/pointer-address-space-dwarf-v1.ll b/test/DebugInfo/AMDGPU/pointer-address-space-dwarf-v1.ll deleted file mode 100644 index cbd5e7688a5a2..0000000000000 --- a/test/DebugInfo/AMDGPU/pointer-address-space-dwarf-v1.ll +++ /dev/null @@ -1,70 +0,0 @@ -; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s - -; LLVM IR generated with the following command and OpenCL source: -; -; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file> -; -; kernel void kernel1() { -; global int *FuncVar0 = 0; -; constant int *FuncVar1 = 0; -; local int *FuncVar2 = 0; -; private int *FuncVar3 = 0; -; int *FuncVar4 = 0; -; } - -; DW_AT_address_class is available since Dwarf Version 2. -; CHECK-NOT: DW_AT_address_class - -declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 - -define amdgpu_kernel void @kernel1() #0 !dbg !7 { -entry: - %FuncVar0 = alloca i32 addrspace(1)*, align 4 - %FuncVar1 = alloca i32 addrspace(2)*, align 4 - %FuncVar2 = alloca i32 addrspace(3)*, align 4 - %FuncVar3 = alloca i32*, align 4 - %FuncVar4 = alloca i32 addrspace(4)*, align 4 - call void @llvm.dbg.declare(metadata i32 addrspace(1)** %FuncVar0, metadata !10, metadata !13), !dbg !14 - store i32 addrspace(1)* null, i32 addrspace(1)** %FuncVar0, align 4, !dbg !14 - call void @llvm.dbg.declare(metadata i32 addrspace(2)** %FuncVar1, metadata !15, metadata !13), !dbg !16 - store i32 addrspace(2)* null, i32 addrspace(2)** %FuncVar1, align 4, !dbg !16 - call void @llvm.dbg.declare(metadata i32 addrspace(3)** %FuncVar2, metadata !17, metadata !13), !dbg !19 - store i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*), i32 addrspace(3)** %FuncVar2, align 4, !dbg !19 - call void @llvm.dbg.declare(metadata i32** %FuncVar3, metadata !20, metadata !13), !dbg !22 - store i32* addrspacecast (i32 addrspace(4)* null to i32*), i32** %FuncVar3, align 4, !dbg !22 - call void @llvm.dbg.declare(metadata i32 addrspace(4)** %FuncVar4, metadata !23, metadata !13), !dbg !24 - store i32 addrspace(4)* null, i32 addrspace(4)** %FuncVar4, align 4, !dbg !24 - ret void, !dbg !25 -} - -!llvm.dbg.cu = !{!0} -!opencl.ocl.version = !{!3} -!llvm.module.flags = !{!4, !5} -!llvm.ident = !{!6} - -!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) -!1 = !DIFile(filename: "pointer-address-space-dwarf-v1.cl", directory: "/some/random/directory") -!2 = !{} -!3 = !{i32 2, i32 0} -!4 = !{i32 2, !"Dwarf Version", i32 1} -!5 = !{i32 2, !"Debug Info Version", i32 3} -!6 = !{!""} -!7 = distinct !DISubprogram(name: "kernel1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: false, unit: !0, variables: !2) -!8 = !DISubroutineType(types: !9) -!9 = !{null} -!10 = !DILocalVariable(name: "FuncVar0", scope: !7, file: !1, line: 2, type: !11) -!11 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 64) -!12 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) -!13 = !DIExpression() -!14 = !DILocation(line: 2, column: 15, scope: !7) -!15 = !DILocalVariable(name: "FuncVar1", scope: !7, file: !1, line: 3, type: !11) -!16 = !DILocation(line: 3, column: 17, scope: !7) -!17 = !DILocalVariable(name: "FuncVar2", scope: !7, file: !1, line: 4, type: !18) -!18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 32, dwarfAddressSpace: 2) -!19 = !DILocation(line: 4, column: 14, scope: !7) -!20 = !DILocalVariable(name: "FuncVar3", scope: !7, file: !1, line: 5, type: !21) -!21 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 32, dwarfAddressSpace: 1) -!22 = !DILocation(line: 5, column: 16, scope: !7) -!23 = !DILocalVariable(name: "FuncVar4", scope: !7, file: !1, line: 6, type: !11) -!24 = !DILocation(line: 6, column: 8, scope: !7) -!25 = !DILocation(line: 7, column: 1, scope: !7) diff --git a/test/DebugInfo/AMDGPU/variable-locations-dwarf-v1.ll b/test/DebugInfo/AMDGPU/variable-locations-dwarf-v1.ll deleted file mode 100644 index d04a8eb74656d..0000000000000 --- a/test/DebugInfo/AMDGPU/variable-locations-dwarf-v1.ll +++ /dev/null @@ -1,92 +0,0 @@ -; RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s - -; LLVM IR generated with the following command and OpenCL source: -; -; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file> -; -; global int GlobA; -; global int GlobB; -; -; kernel void kernel1(unsigned int ArgN, global int *ArgA, global int *ArgB) { -; ArgA[ArgN] += ArgB[ArgN]; -; } - -declare void @llvm.dbg.declare(metadata, metadata, metadata) - -; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 ) -@GlobA = common addrspace(1) global i32 0, align 4, !dbg !0 -; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 ) -@GlobB = common addrspace(1) global i32 0, align 4, !dbg !6 - -define amdgpu_kernel void @kernel1( -; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x06> 91 04 10 01 16 18 ) - i32 %ArgN, -; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x06> 91 08 10 01 16 18 ) - i32 addrspace(1)* %ArgA, -; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x06> 91 10 10 01 16 18 ) - i32 addrspace(1)* %ArgB) !dbg !13 { -entry: - %ArgN.addr = alloca i32, align 4 - %ArgA.addr = alloca i32 addrspace(1)*, align 4 - %ArgB.addr = alloca i32 addrspace(1)*, align 4 - store i32 %ArgN, i32* %ArgN.addr, align 4 - call void @llvm.dbg.declare(metadata i32* %ArgN.addr, metadata !22, metadata !23), !dbg !24 - store i32 addrspace(1)* %ArgA, i32 addrspace(1)** %ArgA.addr, align 4 - call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgA.addr, metadata !25, metadata !23), !dbg !26 - store i32 addrspace(1)* %ArgB, i32 addrspace(1)** %ArgB.addr, align 4 - call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgB.addr, metadata !27, metadata !23), !dbg !28 - %0 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgB.addr, align 4, !dbg !29 - %1 = load i32, i32* %ArgN.addr, align 4, !dbg !30 - %idxprom = zext i32 %1 to i64, !dbg !29 - %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 %idxprom, !dbg !29 - %2 = load i32, i32 addrspace(1)* %arrayidx, align 4, !dbg !29 - %3 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgA.addr, align 4, !dbg !31 - %4 = load i32, i32* %ArgN.addr, align 4, !dbg !32 - %idxprom1 = zext i32 %4 to i64, !dbg !31 - %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %3, i64 %idxprom1, !dbg !31 - %5 = load i32, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33 - %add = add nsw i32 %5, %2, !dbg !33 - store i32 %add, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33 - ret void, !dbg !34 -} - -!llvm.dbg.cu = !{!2} -!opencl.ocl.version = !{!9} -!llvm.module.flags = !{!10, !11} -!llvm.ident = !{!12} - -!0 = !DIGlobalVariableExpression(var: !1) -!1 = distinct !DIGlobalVariable(name: "GlobA", scope: !2, file: !3, line: 1, type: !8, isLocal: false, isDefinition: true) -!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5) -!3 = !DIFile(filename: "variable-locations-dwarf-v1.cl", directory: "/some/random/directory") -!4 = !{} -!5 = !{!0, !6} -!6 = !DIGlobalVariableExpression(var: !7) -!7 = distinct !DIGlobalVariable(name: "GlobB", scope: !2, file: !3, line: 2, type: !8, isLocal: false, isDefinition: true) -!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) -!9 = !{i32 2, i32 0} -!10 = !{i32 2, !"Dwarf Version", i32 1} -!11 = !{i32 2, !"Debug Info Version", i32 3} -!12 = !{!"clang version 5.0.0"} -!13 = distinct !DISubprogram(name: "kernel1", scope: !3, file: !3, line: 4, type: !14, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !2, variables: !4) -!14 = !DISubroutineType(types: !15) -!15 = !{null, !16, !17, !17} -!16 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned) -!17 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !8, size: 64) -!18 = !{i32 0, i32 1, i32 1} -!19 = !{!"none", !"none", !"none"} -!20 = !{!"uint", !"int*", !"int*"} -!21 = !{!"", !"", !""} -!22 = !DILocalVariable(name: "ArgN", arg: 1, scope: !13, file: !3, line: 4, type: !16) -!23 = !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef) -!24 = !DILocation(line: 4, column: 34, scope: !13) -!25 = !DILocalVariable(name: "ArgA", arg: 2, scope: !13, file: !3, line: 4, type: !17) -!26 = !DILocation(line: 4, column: 52, scope: !13) -!27 = !DILocalVariable(name: "ArgB", arg: 3, scope: !13, file: !3, line: 4, type: !17) -!28 = !DILocation(line: 4, column: 70, scope: !13) -!29 = !DILocation(line: 5, column: 17, scope: !13) -!30 = !DILocation(line: 5, column: 22, scope: !13) -!31 = !DILocation(line: 5, column: 3, scope: !13) -!32 = !DILocation(line: 5, column: 8, scope: !13) -!33 = !DILocation(line: 5, column: 14, scope: !13) -!34 = !DILocation(line: 6, column: 1, scope: !13) diff --git a/test/DebugInfo/AMDGPU/variable-locations.ll b/test/DebugInfo/AMDGPU/variable-locations.ll index 1aab40f946c6a..93a0f26d1f1dd 100644 --- a/test/DebugInfo/AMDGPU/variable-locations.ll +++ b/test/DebugInfo/AMDGPU/variable-locations.ll @@ -19,7 +19,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) ; CHECK-NEXT: DW_AT_external ; CHECK-NEXT: DW_AT_decl_file ; CHECK-NEXT: DW_AT_decl_line -; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 ) +; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x09> 03 00 00 00 00 00 00 00 00 ) @GlobA = common addrspace(1) global i32 0, align 4, !dbg !0 ; CHECK: {{.*}}DW_TAG_variable @@ -28,7 +28,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) ; CHECK-NEXT: DW_AT_external ; CHECK-NEXT: DW_AT_decl_file ; CHECK-NEXT: DW_AT_decl_line -; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 ) +; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x09> 03 00 00 00 00 00 00 00 00 ) @GlobB = common addrspace(1) global i32 0, align 4, !dbg !6 define amdgpu_kernel void @kernel1( diff --git a/test/DebugInfo/ARM/selectiondag-deadcode.ll b/test/DebugInfo/ARM/selectiondag-deadcode.ll index fe5e87658ddee..d4d0207bf07db 100644 --- a/test/DebugInfo/ARM/selectiondag-deadcode.ll +++ b/test/DebugInfo/ARM/selectiondag-deadcode.ll @@ -13,7 +13,7 @@ _ZN7Vector39NormalizeEv.exit: ; preds = %1, %0 ; and SelectionDAGISel crashes. It should definitely not ; crash. Drop the dbg_value instead. ; CHECK-NOT: "matrix" - tail call void @llvm.dbg.declare(metadata %class.Matrix3.0.6.10* %agg.result, metadata !45, metadata !DIExpression(DW_OP_deref)) + tail call void @llvm.dbg.declare(metadata %class.Matrix3.0.6.10* %agg.result, metadata !45, metadata !DIExpression()) %2 = getelementptr inbounds %class.Matrix3.0.6.10, %class.Matrix3.0.6.10* %agg.result, i32 0, i32 0, i32 8 ret void } diff --git a/test/DebugInfo/Generic/block-asan.ll b/test/DebugInfo/Generic/block-asan.ll index 96072b1ccfb5c..f1f8b35df27c9 100644 --- a/test/DebugInfo/Generic/block-asan.ll +++ b/test/DebugInfo/Generic/block-asan.ll @@ -13,7 +13,7 @@ ; Check that the location of the ASAN instrumented __block variable is ; correct. -; CHECK: !DIExpression(DW_OP_deref, DW_OP_plus, 8, DW_OP_deref, DW_OP_plus, 24) +; CHECK: !DIExpression(DW_OP_plus, 8, DW_OP_deref, DW_OP_plus, 24) target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" diff --git a/test/DebugInfo/X86/dbg-declare-arg.ll b/test/DebugInfo/X86/dbg-declare-arg.ll index 7fd6296c7ee17..ca865ab598293 100644 --- a/test/DebugInfo/X86/dbg-declare-arg.ll +++ b/test/DebugInfo/X86/dbg-declare-arg.ll @@ -1,9 +1,17 @@ -; RUN: llc -O0 -fast-isel=false < %s | FileCheck %s +; RUN: llc -O0 -fast-isel=true -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-macosx10.6.7" -;Radar 9321650 - -;CHECK: ##DEBUG_VALUE: my_a +; rdar://problem/9321650 +; +; CHECK: DW_AT_name {{.*}}"j" +; CHECK: DW_TAG_variable +; CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x00000000) +; CHECK-NEXT: DW_AT_name {{.*}}"my_a" +; CHECK: .debug_loc contents: +; CHECK: 0x00000000: Beginning address offset: +; CHECK-NEXT: Ending address offset: +; CHECK-NEXT: Location description: 77 08 +; rsp+8 %class.A = type { i32, i32, i32, i32 } diff --git a/test/DebugInfo/X86/dbg_value_direct.ll b/test/DebugInfo/X86/dbg_value_direct.ll index 58560e4c81d48..12adf125fadbb 100644 --- a/test/DebugInfo/X86/dbg_value_direct.ll +++ b/test/DebugInfo/X86/dbg_value_direct.ll @@ -70,7 +70,7 @@ entry: ; <label>:28 ; preds = %22, %entry store i32 %0, i32* %3, align 4 - call void @llvm.dbg.declare(metadata %struct.A* %agg.result, metadata !24, metadata !DIExpression(DW_OP_deref)), !dbg !25 + call void @llvm.dbg.declare(metadata %struct.A* %agg.result, metadata !24, metadata !DIExpression()), !dbg !25 call void @_ZN1AC1Ev(%struct.A* %agg.result), !dbg !25 store i64 1172321806, i64* %4, !dbg !26 %29 = inttoptr i64 %10 to i32*, !dbg !26 diff --git a/test/DebugInfo/X86/debug-info-block-captured-self.ll b/test/DebugInfo/X86/debug-info-block-captured-self.ll index e3cfca19955eb..1085eaef0d4e4 100644 --- a/test/DebugInfo/X86/debug-info-block-captured-self.ll +++ b/test/DebugInfo/X86/debug-info-block-captured-self.ll @@ -107,5 +107,5 @@ define internal void @"__24-[Main initWithContext:]_block_invoke_2"(i8* %.block_ !106 = !DILocation(line: 40, scope: !42) !107 = !DIFile(filename: "llvm/tools/clang/test/CodeGenObjC/debug-info-block-captured-self.m", directory: "") !108 = !{i32 1, !"Debug Info Version", i32 3} -!109 = !DIExpression(DW_OP_deref, DW_OP_plus, 32) -!110 = !DIExpression(DW_OP_deref, DW_OP_plus, 32) +!109 = !DIExpression(DW_OP_plus, 32, DW_OP_deref) +!110 = !DIExpression(DW_OP_plus, 32, DW_OP_deref) diff --git a/test/DebugInfo/X86/dw_op_minus.ll b/test/DebugInfo/X86/dw_op_minus.ll index e76f2933fdda6..8e65b489c27b0 100644 --- a/test/DebugInfo/X86/dw_op_minus.ll +++ b/test/DebugInfo/X86/dw_op_minus.ll @@ -10,7 +10,7 @@ ; Capture(buf); ; } ; } -; The interesting part is !DIExpression(DW_OP_deref, DW_OP_minus, 400) +; The interesting part is !DIExpression(DW_OP_minus, 400) target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -56,20 +56,17 @@ declare void @Capture(i32*) !14 = !{i32 2, !"Debug Info Version", i32 3} !15 = !{!"clang version 3.8.0 (trunk 248518) (llvm/trunk 248512)"} !16 = !DILocation(line: 5, column: 3, scope: !4) -!17 = !DIExpression(DW_OP_deref, DW_OP_minus, 400) +!17 = !DIExpression(DW_OP_minus, 400) !18 = !DILocation(line: 5, column: 7, scope: !4) !19 = !DILocation(line: 6, column: 11, scope: !4) !20 = !DILocation(line: 6, column: 3, scope: !4) !21 = !DILocation(line: 7, column: 1, scope: !4) ; RCX - 400 -; CHECK: .short 6 # Loc expr size +; CHECK: .short 3 # Loc expr size ; CHECK-NEXT: .byte 114 # DW_OP_breg2 -; CHECK-NEXT: .byte 0 # 0 -; CHECK-NEXT: .byte 16 # DW_OP_constu -; CHECK-NEXT: .byte 144 # 400 -; CHECK-NEXT: .byte 3 # DW_OP_minus -; CHECK-NEXT: .byte 28 +; CHECK-NEXT: .byte 240 # -400 +; CHECK-NEXT: .byte 124 ; RCX is clobbered in call @Capture, but there is a spilled copy. ; *(RSP + 8) - 400 diff --git a/test/DebugInfo/X86/dw_op_minus_direct.ll b/test/DebugInfo/X86/dw_op_minus_direct.ll index 29e07213abbb2..8d346be532e87 100644 --- a/test/DebugInfo/X86/dw_op_minus_direct.ll +++ b/test/DebugInfo/X86/dw_op_minus_direct.ll @@ -1,15 +1,24 @@ ; Test dwarf codegen of DW_OP_minus. ; RUN: llc -filetype=obj < %s | llvm-dwarfdump - | FileCheck %s +; RUN: llc -dwarf-version=2 -filetype=obj < %s | llvm-dwarfdump - \ +; RUN: | FileCheck %s --check-prefix=DWARF2 +; RUN: llc -dwarf-version=3 -filetype=obj < %s | llvm-dwarfdump - \ +; RUN: | FileCheck %s --check-prefix=DWARF2 ; This was derived manually from: ; int inc(int i) { ; return i+1; ; } +; DWARF2: .debug_info +; DWARF2: DW_TAG_formal_parameter +; DWARF2-NEXT: DW_AT_name {{.*}}"i" +; DWARF2-NOT: DW_AT_location + ; CHECK: Beginning address offset: 0x0000000000000000 ; CHECK: Ending address offset: 0x0000000000000004 -; CHECK: Location description: 50 10 ff ff ff ff 0f 1a 10 01 1c -; rax, constu 0xffffffff, and, constu 0x00000001, minus +; CHECK: Location description: 70 00 10 ff ff ff ff 0f 1a 10 01 1c 9f +; rax+0, constu 0xffffffff, and, constu 0x00000001, minus, stack-value source_filename = "minus.c" target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.12.0" @@ -42,7 +51,7 @@ attributes #1 = { nounwind readnone } !10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) !11 = !{!12} !12 = !DILocalVariable(name: "i", arg: 1, scope: !7, file: !1, line: 1, type: !10) -!13 = !DIExpression(DW_OP_minus, 1) +!13 = !DIExpression(DW_OP_minus, 1, DW_OP_stack_value) !14 = !DILocation(line: 1, column: 13, scope: !7) !15 = !DILocation(line: 2, column: 11, scope: !7) !16 = !DILocation(line: 2, column: 3, scope: !7) diff --git a/test/DebugInfo/X86/fi-expr.ll b/test/DebugInfo/X86/fi-expr.ll new file mode 100644 index 0000000000000..cf240505c0ccf --- /dev/null +++ b/test/DebugInfo/X86/fi-expr.ll @@ -0,0 +1,35 @@ +; RUN: llc -mtriple=x86_64-apple-darwin -o - %s -filetype=obj \ +; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck %s +; A hand-crafted FrameIndex location with a DW_OP_deref. +; CHECK: DW_TAG_formal_parameter +; fbreg -8, deref +; CHECK-NEXT: DW_AT_location {{.*}} (<0x3> 91 78 06 ) +; CHECK-NEXT: DW_AT_name {{.*}} "foo" +define void @f(i8* %bar) !dbg !6 { +entry: + %foo.addr = alloca i8* + store i8* %bar, i8** %foo.addr + call void @llvm.dbg.declare(metadata i8** %foo.addr, metadata !12, metadata !13), !dbg !14 + ret void, !dbg !15 +} + +declare void @llvm.dbg.declare(metadata, metadata, metadata) + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "t.c", directory: "/") +!2 = !{} +!3 = !{i32 2, !"Dwarf Version", i32 4} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!6 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!7 = !DISubroutineType(types: !8) +!8 = !{null, !9} +!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 64) +!10 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !11) +!11 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char) +!12 = !DILocalVariable(name: "foo", arg: 1, scope: !6, file: !1, line: 1, type: !10) +!13 = !DIExpression(DW_OP_deref) +!14 = !DILocation(line: 1, scope: !6) +!15 = !DILocation(line: 1, scope: !6) diff --git a/test/DebugInfo/X86/sret.ll b/test/DebugInfo/X86/sret.ll index 84d6719348894..c4bb005a36681 100644 --- a/test/DebugInfo/X86/sret.ll +++ b/test/DebugInfo/X86/sret.ll @@ -1,10 +1,22 @@ ; RUN: llc -split-dwarf=Enable -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t -; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s +; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s --check-prefix=CHECK-DWO ; Based on the debuginfo-tests/sret.cpp code. -; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x51ac5644b1937aa1) -; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x51ac5644b1937aa1) +; CHECK-DWO: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x51ac5644b1937aa1) +; CHECK-DWO: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x51ac5644b1937aa1) + +; RUN: llc -O0 -fast-isel=true -mtriple=x86_64-apple-darwin -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s +; RUN: llc -O0 -fast-isel=false -mtriple=x86_64-apple-darwin -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s +; CHECK: _ZN1B9AInstanceEv +; CHECK: DW_TAG_variable +; CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x00000000) +; CHECK-NEXT: DW_AT_name {{.*}}"a" +; CHECK: .debug_loc contents: +; CHECK: 0x00000000: Beginning address offset: +; CHECK-NEXT: Ending address offset: +; CHECK-NEXT: Location description: 75 00 +; rdi+0 %class.A = type { i32 (...)**, i32 } %class.B = type { i8 } @@ -98,7 +110,7 @@ entry: call void @llvm.dbg.declare(metadata %class.B** %this.addr, metadata !89, metadata !DIExpression()), !dbg !91 %this1 = load %class.B*, %class.B** %this.addr store i1 false, i1* %nrvo, !dbg !92 - call void @llvm.dbg.declare(metadata %class.A* %agg.result, metadata !93, metadata !DIExpression(DW_OP_deref)), !dbg !92 + call void @llvm.dbg.declare(metadata %class.A* %agg.result, metadata !93, metadata !DIExpression()), !dbg !92 call void @_ZN1AC1Ei(%class.A* %agg.result, i32 12), !dbg !92 store i1 true, i1* %nrvo, !dbg !94 store i32 1, i32* %cleanup.dest.slot diff --git a/test/Instrumentation/AddressSanitizer/debug_info.ll b/test/Instrumentation/AddressSanitizer/debug_info.ll index cc79cbbce9e9b..0366c0008d34d 100644 --- a/test/Instrumentation/AddressSanitizer/debug_info.ll +++ b/test/Instrumentation/AddressSanitizer/debug_info.ll @@ -24,9 +24,9 @@ entry: ; CHECK: entry: ; Verify that llvm.dbg.declare calls are in the entry basic block. ; CHECK-NOT: %entry -; CHECK: call void @llvm.dbg.declare(metadata {{.*}}, metadata ![[ARG_ID:[0-9]+]], metadata ![[OPDEREF:[0-9]+]]) +; CHECK: call void @llvm.dbg.declare(metadata {{.*}}, metadata ![[ARG_ID:[0-9]+]], metadata ![[EMPTY:[0-9]+]]) ; CHECK-NOT: %entry -; CHECK: call void @llvm.dbg.declare(metadata {{.*}}, metadata ![[VAR_ID:[0-9]+]], metadata ![[OPDEREF:[0-9]+]]) +; CHECK: call void @llvm.dbg.declare(metadata {{.*}}, metadata ![[VAR_ID:[0-9]+]], metadata ![[EMPTY:[0-9]+]]) declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone @@ -47,7 +47,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone ; Verify that debug descriptors for argument and local variable will be replaced ; with descriptors that end with OpDeref (encoded as 2). ; CHECK: ![[ARG_ID]] = !DILocalVariable(name: "p", arg: 1,{{.*}} line: 1 -; CHECK: ![[OPDEREF]] = !DIExpression(DW_OP_deref) +; CHECK: ![[EMPTY]] = !DIExpression() ; CHECK: ![[VAR_ID]] = !DILocalVariable(name: "r",{{.*}} line: 2 ; Verify that there are no more variable descriptors. ; CHECK-NOT: !DILocalVariable(tag: DW_TAG_arg_variable diff --git a/test/Instrumentation/SanitizerCoverage/coverage.ll b/test/Instrumentation/SanitizerCoverage/coverage.ll index 75a341da021c9..d675c9d9c3709 100644 --- a/test/Instrumentation/SanitizerCoverage/coverage.ll +++ b/test/Instrumentation/SanitizerCoverage/coverage.ll @@ -5,9 +5,7 @@ ; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=0 -S | FileCheck %s --check-prefix=CHECK_WITH_CHECK ; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=1 -S | FileCheck %s --check-prefix=CHECK_WITH_CHECK ; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-block-threshold=10 -S | FileCheck %s --check-prefix=CHECK3 -; RUN: opt < %s -sancov -sanitizer-coverage-level=4 -S | FileCheck %s --check-prefix=CHECK4 ; RUN: opt < %s -sancov -sanitizer-coverage-level=4 -sanitizer-coverage-trace-pc -S | FileCheck %s --check-prefix=CHECK_TRACE_PC -; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-8bit-counters=1 -S | FileCheck %s --check-prefix=CHECK-8BIT ; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=10 \ ; RUN: -S | FileCheck %s --check-prefix=CHECK2 @@ -81,25 +79,6 @@ entry: ; CHECK3-NOT: call void @__sanitizer_cov ; CHECK3: ret void -; test -sanitizer-coverage-8bit-counters=1 -; CHECK-8BIT-LABEL: define void @foo - -; CHECK-8BIT: [[V11:%[0-9]*]] = load i8{{.*}}!nosanitize -; CHECK-8BIT: [[V12:%[0-9]*]] = add i8 [[V11]], 1 -; CHECK-8BIT: store i8 [[V12]]{{.*}}!nosanitize -; CHECK-8BIT: [[V21:%[0-9]*]] = load i8{{.*}}!nosanitize -; CHECK-8BIT: [[V22:%[0-9]*]] = add i8 [[V21]], 1 -; CHECK-8BIT: store i8 [[V22]]{{.*}}!nosanitize -; CHECK-8BIT: [[V31:%[0-9]*]] = load i8{{.*}}!nosanitize -; CHECK-8BIT: [[V32:%[0-9]*]] = add i8 [[V31]], 1 -; CHECK-8BIT: store i8 [[V32]]{{.*}}!nosanitize -; CHECK-8BIT: [[V41:%[0-9]*]] = load i8{{.*}}!nosanitize -; CHECK-8BIT: [[V42:%[0-9]*]] = add i8 [[V41]], 1 -; CHECK-8BIT: store i8 [[V42]]{{.*}}!nosanitize - -; CHECK-8BIT: ret void - - %struct.StructWithVptr = type { i32 (...)** } define void @CallViaVptr(%struct.StructWithVptr* %foo) uwtable sanitize_address { @@ -113,13 +92,6 @@ entry: ret void } -; We expect to see two calls to __sanitizer_cov_indir_call16 -; with different values of second argument. -; CHECK4-LABEL: define void @CallViaVptr -; CHECK4: call void @__sanitizer_cov_indir_call16({{.*}},[[CACHE:.*]]) -; CHECK4-NOT: call void @__sanitizer_cov_indir_call16({{.*}},[[CACHE]]) -; CHECK4: ret void - ; CHECK_TRACE_PC-LABEL: define void @foo ; CHECK_TRACE_PC: call void @__sanitizer_cov_trace_pc ; CHECK_TRACE_PC: call void asm sideeffect "", ""() @@ -135,10 +107,6 @@ entry: unreachable } -; CHECK4-LABEL: define void @call_unreachable -; CHECK4-NOT: __sanitizer_cov -; CHECK4: unreachable - ; CHECKPRUNE-LABEL: define void @foo ; CHECKPRUNE: call void @__sanitizer_cov ; CHECKPRUNE: call void @__sanitizer_cov diff --git a/test/Instrumentation/SanitizerCoverage/tracing.ll b/test/Instrumentation/SanitizerCoverage/tracing.ll index 9e153472eaba2..1561a14860144 100644 --- a/test/Instrumentation/SanitizerCoverage/tracing.ll +++ b/test/Instrumentation/SanitizerCoverage/tracing.ll @@ -1,6 +1,4 @@ ; Test -sanitizer-coverage-experimental-tracing -; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-experimental-tracing -S | FileCheck %s --check-prefix=CHECK1 -; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-experimental-tracing -S | FileCheck %s --check-prefix=CHECK3 ; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc -S | FileCheck %s --check-prefix=CHECK_PC ; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc-guard -S | FileCheck %s --check-prefix=CHECK_PC_GUARD ; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc-guard -S -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefix=CHECK_PC_GUARD_DARWIN @@ -20,19 +18,6 @@ entry: ret void } -; CHECK1-LABEL: define void @foo -; CHECK1: call void @__sanitizer_cov_trace_func_enter -; CHECK1: call void @__sanitizer_cov_trace_basic_block -; CHECK1-NOT: call void @__sanitizer_cov_trace_basic_block -; CHECK1: ret void - -; CHECK3-LABEL: define void @foo -; CHECK3: call void @__sanitizer_cov_trace_func_enter -; CHECK3: call void @__sanitizer_cov_trace_basic_block -; CHECK3: call void @__sanitizer_cov_trace_basic_block -; CHECK3-NOT: call void @__sanitizer_cov_trace_basic_block -; CHECK3: ret void - ; CHECK_PC-LABEL: define void @foo ; CHECK_PC: call void @__sanitizer_cov_trace_pc ; CHECK_PC: call void @__sanitizer_cov_trace_pc diff --git a/test/MC/AArch64/basic-a64-diagnostics.s b/test/MC/AArch64/basic-a64-diagnostics.s index 80e32c48673a6..d37c0d5aba2a6 100644 --- a/test/MC/AArch64/basic-a64-diagnostics.s +++ b/test/MC/AArch64/basic-a64-diagnostics.s @@ -1781,12 +1781,20 @@ ;; Exponent too large fmov d3, #0.0625 fmov s2, #32.0 + fmov s2, #32 + fmov v0.4s, #-32 // CHECK-ERROR: error: expected compatible register or floating-point constant // CHECK-ERROR-NEXT: fmov d3, #0.0625 // CHECK-ERROR-NEXT: ^ // CHECK-ERROR-NEXT: error: expected compatible register or floating-point constant // CHECK-ERROR-NEXT: fmov s2, #32.0 // CHECK-ERROR-NEXT: ^ +// CHECK-ERROR-NEXT: error: expected compatible register or floating-point constant +// CHECK-ERROR-NEXT: fmov s2, #32 +// CHECK-ERROR-NEXT: ^ +// CHECK-ERROR-NEXT: error: expected compatible register or floating-point constant +// CHECK-ERROR-NEXT: fmov v0.4s, #-32 +// CHECK-ERROR-NEXT: ^ ;; Fraction too precise fmov s9, #1.03125 @@ -1798,11 +1806,17 @@ // CHECK-ERROR-NEXT: fmov s28, #1.96875 // CHECK-ERROR-NEXT: ^ - ;; No particular reason, but a striking omission - fmov d0, #0.0 -// CHECK-ERROR-AARCH64: error: expected compatible register or floating-point constant -// CHECK-ERROR-AARCH64-NEXT: fmov d0, #0.0 -// CHECK-ERROR-AARCH64-NEXT: ^ + ;; Explicitly encoded value too large + fmov s15, #0x100 +// CHECK-ERROR: error: encoded floating point value out of range +// CHECK-ERROR-NEXT: fmov s15, #0x100 +// CHECK-ERROR-NEXT: ^ + + ;; Not possible to fmov ZR to a whole vector + fmov v0.4s, #0.0 +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR-NEXT: fmov v0.4s, #0.0 +// CHECK-ERROR-NEXT: ^ //------------------------------------------------------------------------------ // Floating-point <-> integer conversion diff --git a/test/MC/AMDGPU/gfx7_asm_all.s b/test/MC/AMDGPU/gfx7_asm_all.s index d1d864c3ffeba..34c4f429ce24f 100644 --- a/test/MC/AMDGPU/gfx7_asm_all.s +++ b/test/MC/AMDGPU/gfx7_asm_all.s @@ -1,7 +1,5 @@ // RUN: llvm-mc -arch=amdgcn -mcpu=bonaire -show-encoding %s | FileCheck %s -// *** GENERATED BY TESTGEN, DO NOT EDIT! *** - ds_add_u32 v1, v2 offset:65535 // CHECK: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00] @@ -458,24 +456,12 @@ ds_max_f32 v1, v2 offset:65535 gds ds_gws_init v1 gds // CHECK: [0x00,0x00,0x66,0xd8,0x00,0x01,0x00,0x00] -ds_gws_sema_v gds -// CHECK: [0x00,0x00,0x6a,0xd8,0x00,0x00,0x00,0x00] - ds_gws_sema_br v1 gds // CHECK: [0x00,0x00,0x6e,0xd8,0x00,0x01,0x00,0x00] -ds_gws_sema_p gds -// CHECK: [0x00,0x00,0x72,0xd8,0x00,0x00,0x00,0x00] - ds_gws_barrier v1 gds // CHECK: [0x00,0x00,0x76,0xd8,0x00,0x01,0x00,0x00] -ds_gws_sema_release_all offset:65535 gds -// CHECK: [0xff,0xff,0x62,0xd8,0x00,0x00,0x00,0x00] - -ds_gws_sema_release_all gds -// CHECK: [0x00,0x00,0x62,0xd8,0x00,0x00,0x00,0x00] - ds_write_b8 v1, v2 offset:65535 // CHECK: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00] @@ -2666,23 +2652,89 @@ ds_max_src2_f64 v1 offset:4 ds_max_src2_f64 v1 offset:65535 gds // CHECK: [0xff,0xff,0x4e,0xdb,0x01,0x00,0x00,0x00] -ds_wrap_rtn_b32 v255, v1, v2, v3 offset:65535 -// CHECK: [0xff,0xff,0xd0,0xd8,0x01,0x02,0x03,0xff] +ds_write_b96 v1, v[2:4] offset:65535 +// CHECK: [0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b96 v255, v[2:4] offset:65535 +// CHECK: [0xff,0xff,0x78,0xdb,0xff,0x02,0x00,0x00] + +ds_write_b96 v1, v[253:255] offset:65535 +// CHECK: [0xff,0xff,0x78,0xdb,0x01,0xfd,0x00,0x00] + +ds_write_b96 v1, v[2:4] +// CHECK: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b96 v1, v[2:4] offset:0 +// CHECK: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b96 v1, v[2:4] offset:4 +// CHECK: [0x04,0x00,0x78,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b96 v1, v[2:4] offset:65535 gds +// CHECK: [0xff,0xff,0x7a,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b128 v1, v[2:5] offset:65535 +// CHECK: [0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b128 v255, v[2:5] offset:65535 +// CHECK: [0xff,0xff,0x7c,0xdb,0xff,0x02,0x00,0x00] + +ds_write_b128 v1, v[252:255] offset:65535 +// CHECK: [0xff,0xff,0x7c,0xdb,0x01,0xfc,0x00,0x00] + +ds_write_b128 v1, v[2:5] +// CHECK: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b128 v1, v[2:5] offset:0 +// CHECK: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b128 v1, v[2:5] offset:4 +// CHECK: [0x04,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00] + +ds_write_b128 v1, v[2:5] offset:65535 gds +// CHECK: [0xff,0xff,0x7e,0xdb,0x01,0x02,0x00,0x00] -ds_wrap_rtn_b32 v255, v1, v2, v3 offset:65535 gds -// CHECK: [0xff,0xff,0xd2,0xd8,0x01,0x02,0x03,0xff] +ds_read_b96 v[5:7], v1 offset:65535 +// CHECK: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x05] -ds_wrap_rtn_b32 v255, v1, v2, v3 -// CHECK: [0x00,0x00,0xd0,0xd8,0x01,0x02,0x03,0xff] +ds_read_b96 v[253:255], v1 offset:65535 +// CHECK: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0xfd] -ds_condxchg32_rtn_b64 v[5:6], v1, v[2:3] -// CHECK: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x05] +ds_read_b96 v[5:7], v255 offset:65535 +// CHECK: [0xff,0xff,0xf8,0xdb,0xff,0x00,0x00,0x05] -ds_condxchg32_rtn_b64 v[5:6], v1, v[2:3] gds -// CHECK: [0x00,0x00,0xfa,0xd9,0x01,0x02,0x00,0x05] +ds_read_b96 v[5:7], v1 +// CHECK: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x05] -ds_condxchg32_rtn_b64 v[5:6], v1, v[254:255] offset:65535 -// CHECK: [0xff,0xff,0xf8,0xd9,0x01,0xfe,0x00,0x05] +ds_read_b96 v[5:7], v1 offset:0 +// CHECK: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x05] + +ds_read_b96 v[5:7], v1 offset:4 +// CHECK: [0x04,0x00,0xf8,0xdb,0x01,0x00,0x00,0x05] + +ds_read_b96 v[5:7], v1 offset:65535 gds +// CHECK: [0xff,0xff,0xfa,0xdb,0x01,0x00,0x00,0x05] + +ds_read_b128 v[5:8], v1 offset:65535 +// CHECK: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x05] + +ds_read_b128 v[252:255], v1 offset:65535 +// CHECK: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0xfc] + +ds_read_b128 v[5:8], v255 offset:65535 +// CHECK: [0xff,0xff,0xfc,0xdb,0xff,0x00,0x00,0x05] + +ds_read_b128 v[5:8], v1 +// CHECK: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x05] + +ds_read_b128 v[5:8], v1 offset:0 +// CHECK: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x05] + +ds_read_b128 v[5:8], v1 offset:4 +// CHECK: [0x04,0x00,0xfc,0xdb,0x01,0x00,0x00,0x05] + +ds_read_b128 v[5:8], v1 offset:65535 gds +// CHECK: [0xff,0xff,0xfe,0xdb,0x01,0x00,0x00,0x05] exp mrt0, v0, v0, v0, v0 // CHECK: [0x0f,0x00,0x00,0xf8,0x00,0x00,0x00,0x00] @@ -23165,8 +23217,17 @@ v_cvt_i32_f64_e64 v5, ttmp[10:11] v_cvt_i32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x06,0xd3,0x7e,0x00,0x00,0x00] -v_cvt_i32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x06,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_i32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x06,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_i32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x06,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_i32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x06,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_i32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x06,0xd3,0xf7,0x00,0x00,0x00] v_cvt_i32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x06,0xd3,0x01,0x01,0x00,0x00] @@ -23690,8 +23751,17 @@ v_cvt_u32_f32_e64 v5, exec_lo v_cvt_u32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x0e,0xd3,0x7f,0x00,0x00,0x00] -v_cvt_u32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x0e,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_u32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x0e,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_u32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x0e,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_u32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x0e,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_u32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x0e,0xd3,0xf7,0x00,0x00,0x00] v_cvt_u32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x0e,0xd3,0x01,0x01,0x00,0x00] @@ -23819,8 +23889,17 @@ v_cvt_i32_f32_e64 v5, exec_lo v_cvt_i32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x10,0xd3,0x7f,0x00,0x00,0x00] -v_cvt_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x10,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_i32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x10,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x10,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_i32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x10,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x10,0xd3,0xf7,0x00,0x00,0x00] v_cvt_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x10,0xd3,0x01,0x01,0x00,0x00] @@ -24080,8 +24159,17 @@ v_cvt_f16_f32_e64 v5, exec_lo v_cvt_f16_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x14,0xd3,0x7f,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x14,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x14,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_f16_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x14,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_f16_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x14,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_f16_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x14,0xd3,0xf7,0x00,0x00,0x00] v_cvt_f16_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x14,0xd3,0x01,0x01,0x00,0x00] @@ -24197,9 +24285,6 @@ v_cvt_f32_f16_e64 v5, exec_lo v_cvt_f32_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x16,0xd3,0x7f,0x00,0x00,0x00] -v_cvt_f32_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x16,0xd3,0xfd,0x00,0x00,0x00] - v_cvt_f32_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x16,0xd3,0x01,0x01,0x00,0x00] @@ -24332,8 +24417,17 @@ v_cvt_rpi_i32_f32_e64 v5, exec_lo v_cvt_rpi_i32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x18,0xd3,0x7f,0x00,0x00,0x00] -v_cvt_rpi_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x18,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_rpi_i32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x18,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_rpi_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x18,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_rpi_i32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x18,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_rpi_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x18,0xd3,0xf7,0x00,0x00,0x00] v_cvt_rpi_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x18,0xd3,0x01,0x01,0x00,0x00] @@ -24461,8 +24555,17 @@ v_cvt_flr_i32_f32_e64 v5, exec_lo v_cvt_flr_i32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x1a,0xd3,0x7f,0x00,0x00,0x00] -v_cvt_flr_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x1a,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_flr_i32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x1a,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_flr_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x1a,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_flr_i32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x1a,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_flr_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x1a,0xd3,0xf7,0x00,0x00,0x00] v_cvt_flr_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x1a,0xd3,0x01,0x01,0x00,0x00] @@ -24692,8 +24795,17 @@ v_cvt_f32_f64_e64 v5, ttmp[10:11] v_cvt_f32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x1e,0xd3,0x7e,0x00,0x00,0x00] -v_cvt_f32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x1e,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_f32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x1e,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_f32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x1e,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_f32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x1e,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_f32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x1e,0xd3,0xf7,0x00,0x00,0x00] v_cvt_f32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x1e,0xd3,0x01,0x01,0x00,0x00] @@ -24833,8 +24945,17 @@ v_cvt_f64_f32_e64 v[5:6], exec_lo v_cvt_f64_f32_e64 v[5:6], exec_hi // CHECK: [0x05,0x00,0x20,0xd3,0x7f,0x00,0x00,0x00] -v_cvt_f64_f32_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x20,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_f64_f32_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x20,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_f64_f32_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x20,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_f64_f32_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x20,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_f64_f32_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x20,0xd3,0xf7,0x00,0x00,0x00] v_cvt_f64_f32_e64 v[5:6], v1 // CHECK: [0x05,0x00,0x20,0xd3,0x01,0x01,0x00,0x00] @@ -25472,8 +25593,17 @@ v_cvt_u32_f64_e64 v5, ttmp[10:11] v_cvt_u32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x2a,0xd3,0x7e,0x00,0x00,0x00] -v_cvt_u32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x2a,0xd3,0xfd,0x00,0x00,0x00] +v_cvt_u32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x2a,0xd3,0x80,0x00,0x00,0x00] + +v_cvt_u32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x2a,0xd3,0xc1,0x00,0x00,0x00] + +v_cvt_u32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x2a,0xd3,0xf0,0x00,0x00,0x00] + +v_cvt_u32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x2a,0xd3,0xf7,0x00,0x00,0x00] v_cvt_u32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x2a,0xd3,0x01,0x01,0x00,0x00] @@ -25703,8 +25833,17 @@ v_trunc_f64_e64 v[5:6], ttmp[10:11] v_trunc_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x2e,0xd3,0x7e,0x00,0x00,0x00] -v_trunc_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x2e,0xd3,0xfd,0x00,0x00,0x00] +v_trunc_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x2e,0xd3,0x80,0x00,0x00,0x00] + +v_trunc_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x2e,0xd3,0xc1,0x00,0x00,0x00] + +v_trunc_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x2e,0xd3,0xf0,0x00,0x00,0x00] + +v_trunc_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x2e,0xd3,0xf7,0x00,0x00,0x00] v_trunc_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x2e,0xd3,0x01,0x01,0x00,0x00] @@ -25814,8 +25953,17 @@ v_ceil_f64_e64 v[5:6], ttmp[10:11] v_ceil_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x30,0xd3,0x7e,0x00,0x00,0x00] -v_ceil_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x30,0xd3,0xfd,0x00,0x00,0x00] +v_ceil_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x30,0xd3,0x80,0x00,0x00,0x00] + +v_ceil_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x30,0xd3,0xc1,0x00,0x00,0x00] + +v_ceil_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x30,0xd3,0xf0,0x00,0x00,0x00] + +v_ceil_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x30,0xd3,0xf7,0x00,0x00,0x00] v_ceil_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x30,0xd3,0x01,0x01,0x00,0x00] @@ -25925,8 +26073,17 @@ v_rndne_f64_e64 v[5:6], ttmp[10:11] v_rndne_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x32,0xd3,0x7e,0x00,0x00,0x00] -v_rndne_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x32,0xd3,0xfd,0x00,0x00,0x00] +v_rndne_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x32,0xd3,0x80,0x00,0x00,0x00] + +v_rndne_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x32,0xd3,0xc1,0x00,0x00,0x00] + +v_rndne_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x32,0xd3,0xf0,0x00,0x00,0x00] + +v_rndne_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x32,0xd3,0xf7,0x00,0x00,0x00] v_rndne_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x32,0xd3,0x01,0x01,0x00,0x00] @@ -26036,8 +26193,17 @@ v_floor_f64_e64 v[5:6], ttmp[10:11] v_floor_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x34,0xd3,0x7e,0x00,0x00,0x00] -v_floor_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x34,0xd3,0xfd,0x00,0x00,0x00] +v_floor_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x34,0xd3,0x80,0x00,0x00,0x00] + +v_floor_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x34,0xd3,0xc1,0x00,0x00,0x00] + +v_floor_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x34,0xd3,0xf0,0x00,0x00,0x00] + +v_floor_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x34,0xd3,0xf7,0x00,0x00,0x00] v_floor_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x34,0xd3,0x01,0x01,0x00,0x00] @@ -26177,8 +26343,17 @@ v_fract_f32_e64 v5, exec_lo v_fract_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x40,0xd3,0x7f,0x00,0x00,0x00] -v_fract_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x40,0xd3,0xfd,0x00,0x00,0x00] +v_fract_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x40,0xd3,0x80,0x00,0x00,0x00] + +v_fract_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x40,0xd3,0xc1,0x00,0x00,0x00] + +v_fract_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x40,0xd3,0xf0,0x00,0x00,0x00] + +v_fract_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x40,0xd3,0xf7,0x00,0x00,0x00] v_fract_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x40,0xd3,0x01,0x01,0x00,0x00] @@ -26318,8 +26493,17 @@ v_trunc_f32_e64 v5, exec_lo v_trunc_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x42,0xd3,0x7f,0x00,0x00,0x00] -v_trunc_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x42,0xd3,0xfd,0x00,0x00,0x00] +v_trunc_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x42,0xd3,0x80,0x00,0x00,0x00] + +v_trunc_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x42,0xd3,0xc1,0x00,0x00,0x00] + +v_trunc_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x42,0xd3,0xf0,0x00,0x00,0x00] + +v_trunc_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x42,0xd3,0xf7,0x00,0x00,0x00] v_trunc_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x42,0xd3,0x01,0x01,0x00,0x00] @@ -26459,8 +26643,17 @@ v_ceil_f32_e64 v5, exec_lo v_ceil_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x44,0xd3,0x7f,0x00,0x00,0x00] -v_ceil_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x44,0xd3,0xfd,0x00,0x00,0x00] +v_ceil_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x44,0xd3,0x80,0x00,0x00,0x00] + +v_ceil_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x44,0xd3,0xc1,0x00,0x00,0x00] + +v_ceil_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x44,0xd3,0xf0,0x00,0x00,0x00] + +v_ceil_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x44,0xd3,0xf7,0x00,0x00,0x00] v_ceil_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x44,0xd3,0x01,0x01,0x00,0x00] @@ -26600,8 +26793,17 @@ v_rndne_f32_e64 v5, exec_lo v_rndne_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x46,0xd3,0x7f,0x00,0x00,0x00] -v_rndne_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x46,0xd3,0xfd,0x00,0x00,0x00] +v_rndne_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x46,0xd3,0x80,0x00,0x00,0x00] + +v_rndne_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x46,0xd3,0xc1,0x00,0x00,0x00] + +v_rndne_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x46,0xd3,0xf0,0x00,0x00,0x00] + +v_rndne_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x46,0xd3,0xf7,0x00,0x00,0x00] v_rndne_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x46,0xd3,0x01,0x01,0x00,0x00] @@ -26741,8 +26943,17 @@ v_floor_f32_e64 v5, exec_lo v_floor_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x48,0xd3,0x7f,0x00,0x00,0x00] -v_floor_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x48,0xd3,0xfd,0x00,0x00,0x00] +v_floor_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x48,0xd3,0x80,0x00,0x00,0x00] + +v_floor_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x48,0xd3,0xc1,0x00,0x00,0x00] + +v_floor_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x48,0xd3,0xf0,0x00,0x00,0x00] + +v_floor_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x48,0xd3,0xf7,0x00,0x00,0x00] v_floor_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x48,0xd3,0x01,0x01,0x00,0x00] @@ -26885,11 +27096,14 @@ v_exp_f32_e64 v5, exec_hi v_exp_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x4a,0xd3,0x80,0x00,0x00,0x00] +v_exp_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x4a,0xd3,0xc1,0x00,0x00,0x00] + v_exp_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x4a,0xd3,0xf0,0x00,0x00,0x00] -v_exp_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x4a,0xd3,0xfd,0x00,0x00,0x00] +v_exp_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4a,0xd3,0xf7,0x00,0x00,0x00] v_exp_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x4a,0xd3,0x01,0x01,0x00,0x00] @@ -27026,11 +27240,14 @@ v_log_clamp_f32_e64 v5, exec_hi v_log_clamp_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x4c,0xd3,0x80,0x00,0x00,0x00] +v_log_clamp_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x4c,0xd3,0xc1,0x00,0x00,0x00] + v_log_clamp_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x4c,0xd3,0xf0,0x00,0x00,0x00] -v_log_clamp_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x4c,0xd3,0xfd,0x00,0x00,0x00] +v_log_clamp_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4c,0xd3,0xf7,0x00,0x00,0x00] v_log_clamp_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x4c,0xd3,0x01,0x01,0x00,0x00] @@ -27167,11 +27384,14 @@ v_log_f32_e64 v5, exec_hi v_log_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x4e,0xd3,0x80,0x00,0x00,0x00] +v_log_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x4e,0xd3,0xc1,0x00,0x00,0x00] + v_log_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x4e,0xd3,0xf0,0x00,0x00,0x00] -v_log_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x4e,0xd3,0xfd,0x00,0x00,0x00] +v_log_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4e,0xd3,0xf7,0x00,0x00,0x00] v_log_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x4e,0xd3,0x01,0x01,0x00,0x00] @@ -27308,11 +27528,14 @@ v_rcp_clamp_f32_e64 v5, exec_hi v_rcp_clamp_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x50,0xd3,0x80,0x00,0x00,0x00] +v_rcp_clamp_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x50,0xd3,0xc1,0x00,0x00,0x00] + v_rcp_clamp_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x50,0xd3,0xf0,0x00,0x00,0x00] -v_rcp_clamp_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x50,0xd3,0xfd,0x00,0x00,0x00] +v_rcp_clamp_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x50,0xd3,0xf7,0x00,0x00,0x00] v_rcp_clamp_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x50,0xd3,0x01,0x01,0x00,0x00] @@ -27449,11 +27672,14 @@ v_rcp_legacy_f32_e64 v5, exec_hi v_rcp_legacy_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x52,0xd3,0x80,0x00,0x00,0x00] +v_rcp_legacy_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x52,0xd3,0xc1,0x00,0x00,0x00] + v_rcp_legacy_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x52,0xd3,0xf0,0x00,0x00,0x00] -v_rcp_legacy_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x52,0xd3,0xfd,0x00,0x00,0x00] +v_rcp_legacy_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x52,0xd3,0xf7,0x00,0x00,0x00] v_rcp_legacy_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x52,0xd3,0x01,0x01,0x00,0x00] @@ -27590,11 +27816,14 @@ v_rcp_f32_e64 v5, exec_hi v_rcp_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x54,0xd3,0x80,0x00,0x00,0x00] +v_rcp_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x54,0xd3,0xc1,0x00,0x00,0x00] + v_rcp_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x54,0xd3,0xf0,0x00,0x00,0x00] -v_rcp_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x54,0xd3,0xfd,0x00,0x00,0x00] +v_rcp_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x54,0xd3,0xf7,0x00,0x00,0x00] v_rcp_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x54,0xd3,0x01,0x01,0x00,0x00] @@ -27728,8 +27957,17 @@ v_rcp_iflag_f32_e64 v5, exec_lo v_rcp_iflag_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x56,0xd3,0x7f,0x00,0x00,0x00] -v_rcp_iflag_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x56,0xd3,0xfd,0x00,0x00,0x00] +v_rcp_iflag_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x56,0xd3,0x80,0x00,0x00,0x00] + +v_rcp_iflag_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x56,0xd3,0xc1,0x00,0x00,0x00] + +v_rcp_iflag_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x56,0xd3,0xf0,0x00,0x00,0x00] + +v_rcp_iflag_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x56,0xd3,0xf7,0x00,0x00,0x00] v_rcp_iflag_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x56,0xd3,0x01,0x01,0x00,0x00] @@ -27869,8 +28107,17 @@ v_rsq_clamp_f32_e64 v5, exec_lo v_rsq_clamp_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x58,0xd3,0x7f,0x00,0x00,0x00] -v_rsq_clamp_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x58,0xd3,0xfd,0x00,0x00,0x00] +v_rsq_clamp_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x58,0xd3,0x80,0x00,0x00,0x00] + +v_rsq_clamp_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x58,0xd3,0xc1,0x00,0x00,0x00] + +v_rsq_clamp_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x58,0xd3,0xf0,0x00,0x00,0x00] + +v_rsq_clamp_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x58,0xd3,0xf7,0x00,0x00,0x00] v_rsq_clamp_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x58,0xd3,0x01,0x01,0x00,0x00] @@ -28010,8 +28257,17 @@ v_rsq_legacy_f32_e64 v5, exec_lo v_rsq_legacy_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x5a,0xd3,0x7f,0x00,0x00,0x00] -v_rsq_legacy_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x5a,0xd3,0xfd,0x00,0x00,0x00] +v_rsq_legacy_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x5a,0xd3,0x80,0x00,0x00,0x00] + +v_rsq_legacy_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x5a,0xd3,0xc1,0x00,0x00,0x00] + +v_rsq_legacy_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x5a,0xd3,0xf0,0x00,0x00,0x00] + +v_rsq_legacy_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x5a,0xd3,0xf7,0x00,0x00,0x00] v_rsq_legacy_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x5a,0xd3,0x01,0x01,0x00,0x00] @@ -28151,8 +28407,17 @@ v_rsq_f32_e64 v5, exec_lo v_rsq_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x5c,0xd3,0x7f,0x00,0x00,0x00] -v_rsq_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x5c,0xd3,0xfd,0x00,0x00,0x00] +v_rsq_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x5c,0xd3,0x80,0x00,0x00,0x00] + +v_rsq_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x5c,0xd3,0xc1,0x00,0x00,0x00] + +v_rsq_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x5c,0xd3,0xf0,0x00,0x00,0x00] + +v_rsq_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x5c,0xd3,0xf7,0x00,0x00,0x00] v_rsq_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x5c,0xd3,0x01,0x01,0x00,0x00] @@ -28262,8 +28527,17 @@ v_rcp_f64_e64 v[5:6], ttmp[10:11] v_rcp_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x5e,0xd3,0x7e,0x00,0x00,0x00] -v_rcp_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x5e,0xd3,0xfd,0x00,0x00,0x00] +v_rcp_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x5e,0xd3,0x80,0x00,0x00,0x00] + +v_rcp_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x5e,0xd3,0xc1,0x00,0x00,0x00] + +v_rcp_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x5e,0xd3,0xf0,0x00,0x00,0x00] + +v_rcp_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x5e,0xd3,0xf7,0x00,0x00,0x00] v_rcp_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x5e,0xd3,0x01,0x01,0x00,0x00] @@ -28373,8 +28647,17 @@ v_rcp_clamp_f64_e64 v[5:6], ttmp[10:11] v_rcp_clamp_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x60,0xd3,0x7e,0x00,0x00,0x00] -v_rcp_clamp_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x60,0xd3,0xfd,0x00,0x00,0x00] +v_rcp_clamp_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x60,0xd3,0x80,0x00,0x00,0x00] + +v_rcp_clamp_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x60,0xd3,0xc1,0x00,0x00,0x00] + +v_rcp_clamp_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x60,0xd3,0xf0,0x00,0x00,0x00] + +v_rcp_clamp_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x60,0xd3,0xf7,0x00,0x00,0x00] v_rcp_clamp_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x60,0xd3,0x01,0x01,0x00,0x00] @@ -28484,8 +28767,17 @@ v_rsq_f64_e64 v[5:6], ttmp[10:11] v_rsq_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x62,0xd3,0x7e,0x00,0x00,0x00] -v_rsq_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x62,0xd3,0xfd,0x00,0x00,0x00] +v_rsq_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x62,0xd3,0x80,0x00,0x00,0x00] + +v_rsq_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x62,0xd3,0xc1,0x00,0x00,0x00] + +v_rsq_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x62,0xd3,0xf0,0x00,0x00,0x00] + +v_rsq_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x62,0xd3,0xf7,0x00,0x00,0x00] v_rsq_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x62,0xd3,0x01,0x01,0x00,0x00] @@ -28595,8 +28887,17 @@ v_rsq_clamp_f64_e64 v[5:6], ttmp[10:11] v_rsq_clamp_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x64,0xd3,0x7e,0x00,0x00,0x00] -v_rsq_clamp_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x64,0xd3,0xfd,0x00,0x00,0x00] +v_rsq_clamp_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x64,0xd3,0x80,0x00,0x00,0x00] + +v_rsq_clamp_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x64,0xd3,0xc1,0x00,0x00,0x00] + +v_rsq_clamp_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x64,0xd3,0xf0,0x00,0x00,0x00] + +v_rsq_clamp_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x64,0xd3,0xf7,0x00,0x00,0x00] v_rsq_clamp_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x64,0xd3,0x01,0x01,0x00,0x00] @@ -28736,8 +29037,17 @@ v_sqrt_f32_e64 v5, exec_lo v_sqrt_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x66,0xd3,0x7f,0x00,0x00,0x00] -v_sqrt_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x66,0xd3,0xfd,0x00,0x00,0x00] +v_sqrt_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x66,0xd3,0x80,0x00,0x00,0x00] + +v_sqrt_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x66,0xd3,0xc1,0x00,0x00,0x00] + +v_sqrt_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x66,0xd3,0xf0,0x00,0x00,0x00] + +v_sqrt_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x66,0xd3,0xf7,0x00,0x00,0x00] v_sqrt_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x66,0xd3,0x01,0x01,0x00,0x00] @@ -28847,8 +29157,17 @@ v_sqrt_f64_e64 v[5:6], ttmp[10:11] v_sqrt_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x68,0xd3,0x7e,0x00,0x00,0x00] -v_sqrt_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x68,0xd3,0xfd,0x00,0x00,0x00] +v_sqrt_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x68,0xd3,0x80,0x00,0x00,0x00] + +v_sqrt_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x68,0xd3,0xc1,0x00,0x00,0x00] + +v_sqrt_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x68,0xd3,0xf0,0x00,0x00,0x00] + +v_sqrt_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x68,0xd3,0xf7,0x00,0x00,0x00] v_sqrt_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x68,0xd3,0x01,0x01,0x00,0x00] @@ -28988,8 +29307,17 @@ v_sin_f32_e64 v5, exec_lo v_sin_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x6a,0xd3,0x7f,0x00,0x00,0x00] -v_sin_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x6a,0xd3,0xfd,0x00,0x00,0x00] +v_sin_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x6a,0xd3,0x80,0x00,0x00,0x00] + +v_sin_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x6a,0xd3,0xc1,0x00,0x00,0x00] + +v_sin_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x6a,0xd3,0xf0,0x00,0x00,0x00] + +v_sin_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x6a,0xd3,0xf7,0x00,0x00,0x00] v_sin_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x6a,0xd3,0x01,0x01,0x00,0x00] @@ -29129,8 +29457,17 @@ v_cos_f32_e64 v5, exec_lo v_cos_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x6c,0xd3,0x7f,0x00,0x00,0x00] -v_cos_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x6c,0xd3,0xfd,0x00,0x00,0x00] +v_cos_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x6c,0xd3,0x80,0x00,0x00,0x00] + +v_cos_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x6c,0xd3,0xc1,0x00,0x00,0x00] + +v_cos_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x6c,0xd3,0xf0,0x00,0x00,0x00] + +v_cos_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x6c,0xd3,0xf7,0x00,0x00,0x00] v_cos_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x6c,0xd3,0x01,0x01,0x00,0x00] @@ -29900,8 +30237,17 @@ v_frexp_exp_i32_f64_e64 v5, ttmp[10:11] v_frexp_exp_i32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x78,0xd3,0x7e,0x00,0x00,0x00] -v_frexp_exp_i32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x78,0xd3,0xfd,0x00,0x00,0x00] +v_frexp_exp_i32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x78,0xd3,0x80,0x00,0x00,0x00] + +v_frexp_exp_i32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x78,0xd3,0xc1,0x00,0x00,0x00] + +v_frexp_exp_i32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x78,0xd3,0xf0,0x00,0x00,0x00] + +v_frexp_exp_i32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x78,0xd3,0xf7,0x00,0x00,0x00] v_frexp_exp_i32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x78,0xd3,0x01,0x01,0x00,0x00] @@ -29999,8 +30345,17 @@ v_frexp_mant_f64_e64 v[5:6], ttmp[10:11] v_frexp_mant_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x7a,0xd3,0x7e,0x00,0x00,0x00] -v_frexp_mant_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x7a,0xd3,0xfd,0x00,0x00,0x00] +v_frexp_mant_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x7a,0xd3,0x80,0x00,0x00,0x00] + +v_frexp_mant_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x7a,0xd3,0xc1,0x00,0x00,0x00] + +v_frexp_mant_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x7a,0xd3,0xf0,0x00,0x00,0x00] + +v_frexp_mant_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x7a,0xd3,0xf7,0x00,0x00,0x00] v_frexp_mant_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x7a,0xd3,0x01,0x01,0x00,0x00] @@ -30110,8 +30465,17 @@ v_fract_f64_e64 v[5:6], ttmp[10:11] v_fract_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x7c,0xd3,0x7e,0x00,0x00,0x00] -v_fract_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x7c,0xd3,0xfd,0x00,0x00,0x00] +v_fract_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x7c,0xd3,0x80,0x00,0x00,0x00] + +v_fract_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x7c,0xd3,0xc1,0x00,0x00,0x00] + +v_fract_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x7c,0xd3,0xf0,0x00,0x00,0x00] + +v_fract_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x7c,0xd3,0xf7,0x00,0x00,0x00] v_fract_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x7c,0xd3,0x01,0x01,0x00,0x00] @@ -30254,11 +30618,14 @@ v_frexp_exp_i32_f32_e64 v5, exec_hi v_frexp_exp_i32_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x7e,0xd3,0x80,0x00,0x00,0x00] +v_frexp_exp_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x7e,0xd3,0xc1,0x00,0x00,0x00] + v_frexp_exp_i32_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x7e,0xd3,0xf0,0x00,0x00,0x00] -v_frexp_exp_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x7e,0xd3,0xfd,0x00,0x00,0x00] +v_frexp_exp_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x7e,0xd3,0xf7,0x00,0x00,0x00] v_frexp_exp_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x7e,0xd3,0x01,0x01,0x00,0x00] @@ -30383,11 +30750,14 @@ v_frexp_mant_f32_e64 v5, exec_hi v_frexp_mant_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x80,0xd3,0x80,0x00,0x00,0x00] +v_frexp_mant_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x80,0xd3,0xc1,0x00,0x00,0x00] + v_frexp_mant_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x80,0xd3,0xf0,0x00,0x00,0x00] -v_frexp_mant_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x80,0xd3,0xfd,0x00,0x00,0x00] +v_frexp_mant_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x80,0xd3,0xf7,0x00,0x00,0x00] v_frexp_mant_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x80,0xd3,0x01,0x01,0x00,0x00] @@ -30599,8 +30969,17 @@ v_log_legacy_f32_e64 v5, exec_lo v_log_legacy_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x8a,0xd3,0x7f,0x00,0x00,0x00] -v_log_legacy_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x8a,0xd3,0xfd,0x00,0x00,0x00] +v_log_legacy_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x8a,0xd3,0x80,0x00,0x00,0x00] + +v_log_legacy_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x8a,0xd3,0xc1,0x00,0x00,0x00] + +v_log_legacy_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x8a,0xd3,0xf0,0x00,0x00,0x00] + +v_log_legacy_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x8a,0xd3,0xf7,0x00,0x00,0x00] v_log_legacy_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x8a,0xd3,0x01,0x01,0x00,0x00] @@ -30740,8 +31119,17 @@ v_exp_legacy_f32_e64 v5, exec_lo v_exp_legacy_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x8c,0xd3,0x7f,0x00,0x00,0x00] -v_exp_legacy_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x8c,0xd3,0xfd,0x00,0x00,0x00] +v_exp_legacy_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x8c,0xd3,0x80,0x00,0x00,0x00] + +v_exp_legacy_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x8c,0xd3,0xc1,0x00,0x00,0x00] + +v_exp_legacy_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x8c,0xd3,0xf0,0x00,0x00,0x00] + +v_exp_legacy_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x8c,0xd3,0xf7,0x00,0x00,0x00] v_exp_legacy_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x8c,0xd3,0x01,0x01,0x00,0x00] @@ -31025,92 +31413,113 @@ v_add_f32 v5, v255, v2 v_add_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x06] +v_add_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x00,0x00] + +v_add_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x06,0xd2,0x80,0x04,0x00,0x00] + +v_add_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x06,0xd2,0xc1,0x04,0x00,0x00] + +v_add_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x06,0xd2,0xf0,0x04,0x00,0x00] + +v_add_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x06,0xd2,0xf7,0x04,0x00,0x00] + v_add_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x00] -v_add_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x06,0xd2,0x01,0x05,0x00,0x00] - v_add_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x06,0xd2,0xff,0x05,0x00,0x00] -v_add_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xcf,0x00,0x00] +v_add_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xce,0x00,0x00] + +v_add_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xd0,0x00,0x00] + +v_add_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xd2,0x00,0x00] -v_add_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd1,0x00,0x00] +v_add_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xd4,0x00,0x00] -v_add_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd3,0x00,0x00] +v_add_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xd6,0x00,0x00] -v_add_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd5,0x00,0x00] +v_add_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xd8,0x00,0x00] -v_add_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd7,0x00,0x00] +v_add_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xda,0x00,0x00] -v_add_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd9,0x00,0x00] +v_add_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xdc,0x00,0x00] -v_add_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xdb,0x00,0x00] +v_add_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xde,0x00,0x00] -v_add_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xdd,0x00,0x00] +v_add_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xf6,0x00,0x00] -v_add_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xdf,0x00,0x00] +v_add_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xf8,0x00,0x00] -v_add_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xf7,0x00,0x00] +v_add_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xfc,0x00,0x00] -v_add_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xf9,0x00,0x00] +v_add_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xfe,0x00,0x00] -v_add_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xfd,0x00,0x00] +v_add_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x00,0x01,0x00] -v_add_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xff,0x00,0x00] +v_add_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x82,0x01,0x00] -v_add_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xfb,0x01,0x00] +v_add_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xe0,0x01,0x00] -v_add_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x02,0x00] +v_add_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xee,0x01,0x00] -v_add_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xff,0x03,0x00] +v_add_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x02,0x00] -v_add_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x20] +v_add_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0xfe,0x03,0x00] -v_add_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x40] +v_add_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x00,0x20] -v_add_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x60] +v_add_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x00,0x40] -v_add_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x06,0xd2,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x00,0x60] -v_add_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x06,0xd2,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x06,0xd2,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x06,0xd2,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x06,0xd2,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x06,0xd2,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x06,0xd2,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x08] +v_add_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x06,0xd2,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x10] +v_add_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x00,0x08] -v_add_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x18] +v_add_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x00,0x10] + +v_add_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x06,0xd2,0x80,0x04,0x00,0x18] v_sub_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x08] @@ -31184,92 +31593,113 @@ v_sub_f32 v5, v255, v2 v_sub_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x08] +v_sub_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x00,0x00] + +v_sub_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x08,0xd2,0x80,0x04,0x00,0x00] + +v_sub_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x08,0xd2,0xc1,0x04,0x00,0x00] + +v_sub_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x08,0xd2,0xf0,0x04,0x00,0x00] + +v_sub_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x08,0xd2,0xf7,0x04,0x00,0x00] + v_sub_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x00] -v_sub_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x08,0xd2,0x01,0x05,0x00,0x00] - v_sub_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x08,0xd2,0xff,0x05,0x00,0x00] -v_sub_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xcf,0x00,0x00] +v_sub_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xce,0x00,0x00] + +v_sub_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xd0,0x00,0x00] + +v_sub_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xd2,0x00,0x00] -v_sub_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd1,0x00,0x00] +v_sub_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xd4,0x00,0x00] -v_sub_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd3,0x00,0x00] +v_sub_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xd6,0x00,0x00] -v_sub_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd5,0x00,0x00] +v_sub_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xd8,0x00,0x00] -v_sub_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd7,0x00,0x00] +v_sub_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xda,0x00,0x00] -v_sub_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd9,0x00,0x00] +v_sub_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xdc,0x00,0x00] -v_sub_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xdb,0x00,0x00] +v_sub_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xde,0x00,0x00] -v_sub_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xdd,0x00,0x00] +v_sub_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xf6,0x00,0x00] -v_sub_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xdf,0x00,0x00] +v_sub_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xf8,0x00,0x00] -v_sub_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xf7,0x00,0x00] +v_sub_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xfc,0x00,0x00] -v_sub_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xf9,0x00,0x00] +v_sub_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xfe,0x00,0x00] -v_sub_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xfd,0x00,0x00] +v_sub_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x00,0x01,0x00] -v_sub_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xff,0x00,0x00] +v_sub_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x82,0x01,0x00] -v_sub_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xfb,0x01,0x00] +v_sub_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xe0,0x01,0x00] -v_sub_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x02,0x00] +v_sub_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xee,0x01,0x00] -v_sub_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xff,0x03,0x00] +v_sub_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x02,0x00] -v_sub_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x20] +v_sub_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0xfe,0x03,0x00] -v_sub_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x40] +v_sub_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x00,0x20] -v_sub_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x60] +v_sub_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x00,0x40] -v_sub_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x08,0xd2,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x00,0x60] -v_sub_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x08,0xd2,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x08,0xd2,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x08,0xd2,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x08,0xd2,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x08,0xd2,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x08,0xd2,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x08] +v_sub_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x08,0xd2,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x10] +v_sub_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x00,0x08] -v_sub_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x18] +v_sub_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x00,0x10] + +v_sub_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x08,0xd2,0x80,0x04,0x00,0x18] v_subrev_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x0a] @@ -31343,92 +31773,113 @@ v_subrev_f32 v5, v255, v2 v_subrev_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x0a] +v_subrev_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x00,0x00] + +v_subrev_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x0a,0xd2,0x80,0x04,0x00,0x00] + +v_subrev_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x0a,0xd2,0xc1,0x04,0x00,0x00] + +v_subrev_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x0a,0xd2,0xf0,0x04,0x00,0x00] + +v_subrev_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x0a,0xd2,0xf7,0x04,0x00,0x00] + v_subrev_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x00] -v_subrev_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x0a,0xd2,0x01,0x05,0x00,0x00] - v_subrev_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x0a,0xd2,0xff,0x05,0x00,0x00] -v_subrev_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xcf,0x00,0x00] +v_subrev_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xce,0x00,0x00] + +v_subrev_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xd0,0x00,0x00] + +v_subrev_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xd2,0x00,0x00] -v_subrev_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd1,0x00,0x00] +v_subrev_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xd4,0x00,0x00] -v_subrev_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd3,0x00,0x00] +v_subrev_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xd6,0x00,0x00] -v_subrev_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd5,0x00,0x00] +v_subrev_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xd8,0x00,0x00] -v_subrev_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd7,0x00,0x00] +v_subrev_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xda,0x00,0x00] -v_subrev_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd9,0x00,0x00] +v_subrev_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xdc,0x00,0x00] -v_subrev_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xdb,0x00,0x00] +v_subrev_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xde,0x00,0x00] -v_subrev_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xdd,0x00,0x00] +v_subrev_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xf6,0x00,0x00] -v_subrev_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xdf,0x00,0x00] +v_subrev_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xf8,0x00,0x00] -v_subrev_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xf7,0x00,0x00] +v_subrev_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xfc,0x00,0x00] -v_subrev_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xf9,0x00,0x00] +v_subrev_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xfe,0x00,0x00] -v_subrev_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xfd,0x00,0x00] +v_subrev_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x00,0x01,0x00] -v_subrev_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xff,0x00,0x00] +v_subrev_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x82,0x01,0x00] -v_subrev_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xfb,0x01,0x00] +v_subrev_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xe0,0x01,0x00] -v_subrev_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x02,0x00] +v_subrev_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xee,0x01,0x00] -v_subrev_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xff,0x03,0x00] +v_subrev_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x02,0x00] -v_subrev_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x20] +v_subrev_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0xfe,0x03,0x00] -v_subrev_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x40] +v_subrev_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x00,0x20] -v_subrev_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x60] +v_subrev_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x00,0x40] -v_subrev_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x0a,0xd2,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x00,0x60] -v_subrev_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x0a,0xd2,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x0a,0xd2,0x80,0x04,0x00,0x00] -v_subrev_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x0a,0xd2,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x0a,0xd2,0x80,0x04,0x00,0x00] -v_subrev_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x0a,0xd2,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x0a,0xd2,0x80,0x04,0x00,0x00] -v_subrev_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x08] +v_subrev_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x0a,0xd2,0x80,0x04,0x00,0x00] -v_subrev_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x10] +v_subrev_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x00,0x08] -v_subrev_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x18] +v_subrev_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x00,0x10] + +v_subrev_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x0a,0xd2,0x80,0x04,0x00,0x18] v_mac_legacy_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x0c] @@ -31502,92 +31953,113 @@ v_mac_legacy_f32 v5, v255, v2 v_mac_legacy_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x0c] +v_mac_legacy_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x00,0x00] + +v_mac_legacy_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x0c,0xd2,0x80,0x04,0x00,0x00] + +v_mac_legacy_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x0c,0xd2,0xc1,0x04,0x00,0x00] + +v_mac_legacy_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x0c,0xd2,0xf0,0x04,0x00,0x00] + +v_mac_legacy_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x0c,0xd2,0xf7,0x04,0x00,0x00] + v_mac_legacy_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x00] -v_mac_legacy_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x0c,0xd2,0x01,0x05,0x00,0x00] - v_mac_legacy_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x0c,0xd2,0xff,0x05,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xcf,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xce,0x00,0x00] + +v_mac_legacy_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xd0,0x00,0x00] + +v_mac_legacy_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xd2,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd1,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xd4,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd3,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xd6,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd5,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xd8,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd7,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xda,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd9,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xdc,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xdb,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xde,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xdd,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xf6,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xdf,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xf8,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xf7,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xfc,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xf9,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xfe,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xfd,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x00,0x01,0x00] -v_mac_legacy_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xff,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x82,0x01,0x00] -v_mac_legacy_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xfb,0x01,0x00] +v_mac_legacy_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xe0,0x01,0x00] -v_mac_legacy_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x02,0x00] +v_mac_legacy_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xee,0x01,0x00] -v_mac_legacy_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xff,0x03,0x00] +v_mac_legacy_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x02,0x00] -v_mac_legacy_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x20] +v_mac_legacy_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0xfe,0x03,0x00] -v_mac_legacy_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x40] +v_mac_legacy_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x00,0x20] -v_mac_legacy_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x60] +v_mac_legacy_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x00,0x40] -v_mac_legacy_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x0c,0xd2,0x01,0x05,0x00,0x00] +v_mac_legacy_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x00,0x60] -v_mac_legacy_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x0c,0xd2,0x01,0x05,0x00,0x00] +v_mac_legacy_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x0c,0xd2,0x80,0x04,0x00,0x00] -v_mac_legacy_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x0c,0xd2,0x01,0x05,0x00,0x00] +v_mac_legacy_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x0c,0xd2,0x80,0x04,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x0c,0xd2,0x01,0x05,0x00,0x00] +v_mac_legacy_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x0c,0xd2,0x80,0x04,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x08] +v_mac_legacy_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x0c,0xd2,0x80,0x04,0x00,0x00] -v_mac_legacy_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x10] +v_mac_legacy_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x00,0x08] -v_mac_legacy_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x18] +v_mac_legacy_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x00,0x10] + +v_mac_legacy_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x0c,0xd2,0x80,0x04,0x00,0x18] v_mul_legacy_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x0e] @@ -31661,92 +32133,113 @@ v_mul_legacy_f32 v5, v255, v2 v_mul_legacy_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x0e] +v_mul_legacy_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x0e,0xd2,0x80,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x0e,0xd2,0xc1,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x0e,0xd2,0xf0,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x0e,0xd2,0xf7,0x04,0x00,0x00] + v_mul_legacy_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x00] -v_mul_legacy_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x0e,0xd2,0x01,0x05,0x00,0x00] - v_mul_legacy_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x0e,0xd2,0xff,0x05,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xcf,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xce,0x00,0x00] + +v_mul_legacy_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xd0,0x00,0x00] + +v_mul_legacy_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xd2,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd1,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xd4,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd3,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xd6,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd5,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xd8,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd7,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xda,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd9,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xdc,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xdb,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xde,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xdd,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xf6,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xdf,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xf8,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xf7,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xfc,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xf9,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xfe,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xfd,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x00,0x01,0x00] -v_mul_legacy_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xff,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x82,0x01,0x00] -v_mul_legacy_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xfb,0x01,0x00] +v_mul_legacy_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xe0,0x01,0x00] -v_mul_legacy_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x02,0x00] +v_mul_legacy_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xee,0x01,0x00] -v_mul_legacy_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xff,0x03,0x00] +v_mul_legacy_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x02,0x00] -v_mul_legacy_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x20] +v_mul_legacy_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0xfe,0x03,0x00] -v_mul_legacy_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x40] +v_mul_legacy_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x00,0x20] -v_mul_legacy_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x60] +v_mul_legacy_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x00,0x40] -v_mul_legacy_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x0e,0xd2,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x00,0x60] -v_mul_legacy_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x0e,0xd2,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x0e,0xd2,0x80,0x04,0x00,0x00] -v_mul_legacy_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x0e,0xd2,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x0e,0xd2,0x80,0x04,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x0e,0xd2,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x0e,0xd2,0x80,0x04,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x08] +v_mul_legacy_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x0e,0xd2,0x80,0x04,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x10] +v_mul_legacy_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x00,0x08] -v_mul_legacy_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x18] +v_mul_legacy_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x00,0x10] + +v_mul_legacy_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x0e,0xd2,0x80,0x04,0x00,0x18] v_mul_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x10] @@ -31820,92 +32313,113 @@ v_mul_f32 v5, v255, v2 v_mul_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x10] +v_mul_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x00,0x00] + +v_mul_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x10,0xd2,0x80,0x04,0x00,0x00] + +v_mul_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x10,0xd2,0xc1,0x04,0x00,0x00] + +v_mul_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x10,0xd2,0xf0,0x04,0x00,0x00] + +v_mul_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x10,0xd2,0xf7,0x04,0x00,0x00] + v_mul_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x00] -v_mul_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x10,0xd2,0x01,0x05,0x00,0x00] - v_mul_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x10,0xd2,0xff,0x05,0x00,0x00] -v_mul_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xcf,0x00,0x00] +v_mul_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xce,0x00,0x00] + +v_mul_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xd0,0x00,0x00] + +v_mul_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xd2,0x00,0x00] -v_mul_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd1,0x00,0x00] +v_mul_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xd4,0x00,0x00] -v_mul_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd3,0x00,0x00] +v_mul_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xd6,0x00,0x00] -v_mul_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd5,0x00,0x00] +v_mul_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xd8,0x00,0x00] -v_mul_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd7,0x00,0x00] +v_mul_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xda,0x00,0x00] -v_mul_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd9,0x00,0x00] +v_mul_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xdc,0x00,0x00] -v_mul_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xdb,0x00,0x00] +v_mul_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xde,0x00,0x00] -v_mul_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xdd,0x00,0x00] +v_mul_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xf6,0x00,0x00] -v_mul_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xdf,0x00,0x00] +v_mul_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xf8,0x00,0x00] -v_mul_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xf7,0x00,0x00] +v_mul_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xfc,0x00,0x00] -v_mul_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xf9,0x00,0x00] +v_mul_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xfe,0x00,0x00] -v_mul_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xfd,0x00,0x00] +v_mul_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x00,0x01,0x00] -v_mul_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xff,0x00,0x00] +v_mul_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x82,0x01,0x00] -v_mul_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xfb,0x01,0x00] +v_mul_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xe0,0x01,0x00] -v_mul_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x02,0x00] +v_mul_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xee,0x01,0x00] -v_mul_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xff,0x03,0x00] +v_mul_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x02,0x00] -v_mul_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x20] +v_mul_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0xfe,0x03,0x00] -v_mul_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x40] +v_mul_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x00,0x20] -v_mul_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x60] +v_mul_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x00,0x40] -v_mul_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x10,0xd2,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x00,0x60] -v_mul_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x10,0xd2,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x10,0xd2,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x10,0xd2,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x10,0xd2,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x10,0xd2,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x10,0xd2,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x08] +v_mul_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x10,0xd2,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x10] +v_mul_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x00,0x08] -v_mul_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x18] +v_mul_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x00,0x10] + +v_mul_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x10,0xd2,0x80,0x04,0x00,0x18] v_mul_i32_i24 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x12] @@ -32579,92 +33093,113 @@ v_min_legacy_f32 v5, v255, v2 v_min_legacy_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x1a] +v_min_legacy_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x00,0x00] + +v_min_legacy_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x1a,0xd2,0x80,0x04,0x00,0x00] + +v_min_legacy_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x1a,0xd2,0xc1,0x04,0x00,0x00] + +v_min_legacy_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x1a,0xd2,0xf0,0x04,0x00,0x00] + +v_min_legacy_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x1a,0xd2,0xf7,0x04,0x00,0x00] + v_min_legacy_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x00] -v_min_legacy_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x1a,0xd2,0x01,0x05,0x00,0x00] - v_min_legacy_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x1a,0xd2,0xff,0x05,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xcf,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xce,0x00,0x00] + +v_min_legacy_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xd0,0x00,0x00] + +v_min_legacy_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xd2,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd1,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xd4,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd3,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xd6,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd5,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xd8,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd7,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xda,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd9,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xdc,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xdb,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xde,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xdd,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xf6,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xdf,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xf8,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xf7,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xfc,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xf9,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xfe,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xfd,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x00,0x01,0x00] -v_min_legacy_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xff,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x82,0x01,0x00] -v_min_legacy_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xfb,0x01,0x00] +v_min_legacy_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xe0,0x01,0x00] -v_min_legacy_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x02,0x00] +v_min_legacy_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xee,0x01,0x00] -v_min_legacy_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xff,0x03,0x00] +v_min_legacy_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x02,0x00] -v_min_legacy_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x20] +v_min_legacy_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0xfe,0x03,0x00] -v_min_legacy_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x40] +v_min_legacy_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x00,0x20] -v_min_legacy_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x60] +v_min_legacy_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x00,0x40] -v_min_legacy_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x1a,0xd2,0x01,0x05,0x00,0x00] +v_min_legacy_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x00,0x60] -v_min_legacy_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x1a,0xd2,0x01,0x05,0x00,0x00] +v_min_legacy_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x1a,0xd2,0x80,0x04,0x00,0x00] -v_min_legacy_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x1a,0xd2,0x01,0x05,0x00,0x00] +v_min_legacy_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x1a,0xd2,0x80,0x04,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x1a,0xd2,0x01,0x05,0x00,0x00] +v_min_legacy_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x1a,0xd2,0x80,0x04,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x08] +v_min_legacy_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x1a,0xd2,0x80,0x04,0x00,0x00] -v_min_legacy_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x10] +v_min_legacy_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x00,0x08] -v_min_legacy_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x18] +v_min_legacy_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x00,0x10] + +v_min_legacy_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x1a,0xd2,0x80,0x04,0x00,0x18] v_max_legacy_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x1c] @@ -32738,92 +33273,113 @@ v_max_legacy_f32 v5, v255, v2 v_max_legacy_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x1c] +v_max_legacy_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x00,0x00] + +v_max_legacy_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x1c,0xd2,0x80,0x04,0x00,0x00] + +v_max_legacy_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x1c,0xd2,0xc1,0x04,0x00,0x00] + +v_max_legacy_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x1c,0xd2,0xf0,0x04,0x00,0x00] + +v_max_legacy_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x1c,0xd2,0xf7,0x04,0x00,0x00] + v_max_legacy_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x00] -v_max_legacy_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x1c,0xd2,0x01,0x05,0x00,0x00] - v_max_legacy_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x1c,0xd2,0xff,0x05,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xcf,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xce,0x00,0x00] + +v_max_legacy_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xd0,0x00,0x00] + +v_max_legacy_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xd2,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd1,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xd4,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd3,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xd6,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd5,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xd8,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd7,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xda,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd9,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xdc,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xdb,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xde,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xdd,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xf6,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xdf,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xf8,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xf7,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xfc,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xf9,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xfe,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xfd,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x00,0x01,0x00] -v_max_legacy_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xff,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x82,0x01,0x00] -v_max_legacy_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xfb,0x01,0x00] +v_max_legacy_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xe0,0x01,0x00] -v_max_legacy_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x02,0x00] +v_max_legacy_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xee,0x01,0x00] -v_max_legacy_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xff,0x03,0x00] +v_max_legacy_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x02,0x00] -v_max_legacy_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x20] +v_max_legacy_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0xfe,0x03,0x00] -v_max_legacy_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x40] +v_max_legacy_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x00,0x20] -v_max_legacy_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x60] +v_max_legacy_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x00,0x40] -v_max_legacy_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x1c,0xd2,0x01,0x05,0x00,0x00] +v_max_legacy_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x00,0x60] -v_max_legacy_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x1c,0xd2,0x01,0x05,0x00,0x00] +v_max_legacy_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x1c,0xd2,0x80,0x04,0x00,0x00] -v_max_legacy_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x1c,0xd2,0x01,0x05,0x00,0x00] +v_max_legacy_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x1c,0xd2,0x80,0x04,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x1c,0xd2,0x01,0x05,0x00,0x00] +v_max_legacy_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x1c,0xd2,0x80,0x04,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x08] +v_max_legacy_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x1c,0xd2,0x80,0x04,0x00,0x00] -v_max_legacy_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x10] +v_max_legacy_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x00,0x08] -v_max_legacy_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x18] +v_max_legacy_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x00,0x10] + +v_max_legacy_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x1c,0xd2,0x80,0x04,0x00,0x18] v_min_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x1e] @@ -32897,92 +33453,113 @@ v_min_f32 v5, v255, v2 v_min_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x1e] +v_min_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x00,0x00] + +v_min_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x1e,0xd2,0x80,0x04,0x00,0x00] + +v_min_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x1e,0xd2,0xc1,0x04,0x00,0x00] + +v_min_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x1e,0xd2,0xf0,0x04,0x00,0x00] + +v_min_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x1e,0xd2,0xf7,0x04,0x00,0x00] + v_min_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x00] -v_min_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x1e,0xd2,0x01,0x05,0x00,0x00] - v_min_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x1e,0xd2,0xff,0x05,0x00,0x00] -v_min_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xcf,0x00,0x00] +v_min_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xce,0x00,0x00] + +v_min_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xd0,0x00,0x00] + +v_min_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xd2,0x00,0x00] -v_min_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd1,0x00,0x00] +v_min_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xd4,0x00,0x00] -v_min_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd3,0x00,0x00] +v_min_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xd6,0x00,0x00] -v_min_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd5,0x00,0x00] +v_min_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xd8,0x00,0x00] -v_min_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd7,0x00,0x00] +v_min_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xda,0x00,0x00] -v_min_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd9,0x00,0x00] +v_min_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xdc,0x00,0x00] -v_min_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xdb,0x00,0x00] +v_min_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xde,0x00,0x00] -v_min_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xdd,0x00,0x00] +v_min_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xf6,0x00,0x00] -v_min_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xdf,0x00,0x00] +v_min_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xf8,0x00,0x00] -v_min_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xf7,0x00,0x00] +v_min_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xfc,0x00,0x00] -v_min_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xf9,0x00,0x00] +v_min_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xfe,0x00,0x00] -v_min_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xfd,0x00,0x00] +v_min_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x00,0x01,0x00] -v_min_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xff,0x00,0x00] +v_min_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x82,0x01,0x00] -v_min_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xfb,0x01,0x00] +v_min_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xe0,0x01,0x00] -v_min_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x02,0x00] +v_min_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xee,0x01,0x00] -v_min_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xff,0x03,0x00] +v_min_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x02,0x00] -v_min_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x20] +v_min_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0xfe,0x03,0x00] -v_min_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x40] +v_min_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x00,0x20] -v_min_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x60] +v_min_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x00,0x40] -v_min_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x1e,0xd2,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x00,0x60] -v_min_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x1e,0xd2,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x1e,0xd2,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x1e,0xd2,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x1e,0xd2,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x1e,0xd2,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x1e,0xd2,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x08] +v_min_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x1e,0xd2,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x10] +v_min_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x00,0x08] -v_min_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x18] +v_min_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x00,0x10] + +v_min_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x1e,0xd2,0x80,0x04,0x00,0x18] v_max_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x20] @@ -33056,92 +33633,113 @@ v_max_f32 v5, v255, v2 v_max_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x20] +v_max_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x00,0x00] + +v_max_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x20,0xd2,0x80,0x04,0x00,0x00] + +v_max_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x20,0xd2,0xc1,0x04,0x00,0x00] + +v_max_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x20,0xd2,0xf0,0x04,0x00,0x00] + +v_max_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x20,0xd2,0xf7,0x04,0x00,0x00] + v_max_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x00] -v_max_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x20,0xd2,0x01,0x05,0x00,0x00] - v_max_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x20,0xd2,0xff,0x05,0x00,0x00] -v_max_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xcf,0x00,0x00] +v_max_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xce,0x00,0x00] + +v_max_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xd0,0x00,0x00] + +v_max_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xd2,0x00,0x00] -v_max_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd1,0x00,0x00] +v_max_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xd4,0x00,0x00] -v_max_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd3,0x00,0x00] +v_max_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xd6,0x00,0x00] -v_max_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd5,0x00,0x00] +v_max_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xd8,0x00,0x00] -v_max_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd7,0x00,0x00] +v_max_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xda,0x00,0x00] -v_max_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd9,0x00,0x00] +v_max_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xdc,0x00,0x00] -v_max_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xdb,0x00,0x00] +v_max_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xde,0x00,0x00] -v_max_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xdd,0x00,0x00] +v_max_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xf6,0x00,0x00] -v_max_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xdf,0x00,0x00] +v_max_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xf8,0x00,0x00] -v_max_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xf7,0x00,0x00] +v_max_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xfc,0x00,0x00] -v_max_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xf9,0x00,0x00] +v_max_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xfe,0x00,0x00] -v_max_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xfd,0x00,0x00] +v_max_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x00,0x01,0x00] -v_max_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xff,0x00,0x00] +v_max_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x82,0x01,0x00] -v_max_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xfb,0x01,0x00] +v_max_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xe0,0x01,0x00] -v_max_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x02,0x00] +v_max_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xee,0x01,0x00] -v_max_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xff,0x03,0x00] +v_max_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x02,0x00] -v_max_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x20] +v_max_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0xfe,0x03,0x00] -v_max_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x40] +v_max_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x00,0x20] -v_max_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x60] +v_max_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x00,0x40] -v_max_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x20,0xd2,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x00,0x60] -v_max_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x20,0xd2,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x20,0xd2,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x20,0xd2,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x20,0xd2,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x20,0xd2,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x20,0xd2,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x08] +v_max_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x20,0xd2,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x10] +v_max_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x00,0x08] -v_max_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x18] +v_max_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x00,0x10] + +v_max_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x20,0xd2,0x80,0x04,0x00,0x18] v_min_i32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x22] @@ -35315,92 +35913,113 @@ v_mac_f32 v5, v255, v2 v_mac_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x3e] +v_mac_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x00,0x00] + +v_mac_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x3e,0xd2,0x80,0x04,0x00,0x00] + +v_mac_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x3e,0xd2,0xc1,0x04,0x00,0x00] + +v_mac_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x3e,0xd2,0xf0,0x04,0x00,0x00] + +v_mac_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x3e,0xd2,0xf7,0x04,0x00,0x00] + v_mac_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x00] -v_mac_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x3e,0xd2,0x01,0x05,0x00,0x00] - v_mac_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x3e,0xd2,0xff,0x05,0x00,0x00] -v_mac_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xcf,0x00,0x00] +v_mac_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xce,0x00,0x00] + +v_mac_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xd0,0x00,0x00] + +v_mac_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xd2,0x00,0x00] -v_mac_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd1,0x00,0x00] +v_mac_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xd4,0x00,0x00] -v_mac_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd3,0x00,0x00] +v_mac_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xd6,0x00,0x00] -v_mac_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd5,0x00,0x00] +v_mac_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xd8,0x00,0x00] -v_mac_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd7,0x00,0x00] +v_mac_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xda,0x00,0x00] -v_mac_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd9,0x00,0x00] +v_mac_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xdc,0x00,0x00] -v_mac_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xdb,0x00,0x00] +v_mac_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xde,0x00,0x00] -v_mac_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xdd,0x00,0x00] +v_mac_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xf6,0x00,0x00] -v_mac_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xdf,0x00,0x00] +v_mac_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xf8,0x00,0x00] -v_mac_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xf7,0x00,0x00] +v_mac_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xfc,0x00,0x00] -v_mac_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xf9,0x00,0x00] +v_mac_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xfe,0x00,0x00] -v_mac_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xfd,0x00,0x00] +v_mac_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x00,0x01,0x00] -v_mac_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xff,0x00,0x00] +v_mac_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x82,0x01,0x00] -v_mac_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xfb,0x01,0x00] +v_mac_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xe0,0x01,0x00] -v_mac_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x02,0x00] +v_mac_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xee,0x01,0x00] -v_mac_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xff,0x03,0x00] +v_mac_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x02,0x00] -v_mac_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x20] +v_mac_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0xfe,0x03,0x00] -v_mac_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x40] +v_mac_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x00,0x20] -v_mac_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x60] +v_mac_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x00,0x40] -v_mac_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x3e,0xd2,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x00,0x60] -v_mac_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x3e,0xd2,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x3e,0xd2,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x3e,0xd2,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x3e,0xd2,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x08,0x3e,0xd2,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x3e,0xd2,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x08] +v_mac_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x08,0x3e,0xd2,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x10] +v_mac_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x00,0x08] -v_mac_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x18] +v_mac_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x00,0x10] + +v_mac_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x3e,0xd2,0x80,0x04,0x00,0x18] v_madmk_f32 v5, 0, 0x11213141, v3 // CHECK: [0x80,0x06,0x0a,0x40,0x41,0x31,0x21,0x11] @@ -36803,9 +37422,15 @@ v_ldexp_f32_e64 v5, 0, s2 v_ldexp_f32_e64 v255, 0, s2 // CHECK: [0xff,0x00,0x56,0xd2,0x80,0x04,0x00,0x00] +v_ldexp_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x56,0xd2,0xc1,0x04,0x00,0x00] + v_ldexp_f32_e64 v5, 0.5, s2 // CHECK: [0x05,0x00,0x56,0xd2,0xf0,0x04,0x00,0x00] +v_ldexp_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x56,0xd2,0xf7,0x04,0x00,0x00] + v_ldexp_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x56,0xd2,0x01,0x05,0x00,0x00] @@ -36863,9 +37488,6 @@ v_ldexp_f32_e64 v5, 0, 0.5 v_ldexp_f32_e64 v5, 0, -4.0 // CHECK: [0x05,0x00,0x56,0xd2,0x80,0xee,0x01,0x00] -v_ldexp_f32_e64 v5, 0, scc -// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xfa,0x01,0x00] - v_ldexp_f32_e64 v5, 0, v2 // CHECK: [0x05,0x00,0x56,0xd2,0x80,0x04,0x02,0x00] @@ -36944,80 +37566,89 @@ v_cvt_pkaccum_u8_f32 v5, v255, v2 v_cvt_pkaccum_u8_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x58] +v_cvt_pkaccum_u8_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x58,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x58,0xd2,0xc1,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x58,0xd2,0xf0,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x58,0xd2,0xf7,0x04,0x00,0x00] + v_cvt_pkaccum_u8_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x58,0xd2,0x01,0x05,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x58,0xd2,0x01,0x05,0x00,0x00] - v_cvt_pkaccum_u8_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x58,0xd2,0xff,0x05,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xcf,0x00,0x00] - -v_cvt_pkaccum_u8_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd1,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xce,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd3,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xd0,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd5,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xd2,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd7,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xd4,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd9,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xd6,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xdb,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xd8,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xdd,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xda,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xdf,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xdc,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xf7,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xde,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xf9,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xf6,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xfd,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xf8,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xff,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xfc,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, 0 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x01,0x01,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xfe,0x00,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, -1 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x83,0x01,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0x00,0x01,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, 0.5 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xe1,0x01,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0x82,0x01,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, -4.0 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xef,0x01,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xe0,0x01,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xfb,0x01,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xee,0x01,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x05,0x02,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0x04,0x02,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xff,0x03,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0xfe,0x03,0x00] -v_cvt_pkaccum_u8_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x05,0x00,0x20] +v_cvt_pkaccum_u8_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x58,0xd2,0x80,0x04,0x00,0x20] -v_cvt_pkaccum_u8_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x58,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkaccum_u8_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x58,0xd2,0x80,0x04,0x00,0x00] v_cvt_pknorm_i16_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x5a] @@ -37091,80 +37722,101 @@ v_cvt_pknorm_i16_f32 v5, v255, v2 v_cvt_pknorm_i16_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x5a] +v_cvt_pknorm_i16_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x5a,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x5a,0xd2,0xc1,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x5a,0xd2,0xf0,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x5a,0xd2,0xf7,0x04,0x00,0x00] + v_cvt_pknorm_i16_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x5a,0xd2,0x01,0x05,0x00,0x00] - v_cvt_pknorm_i16_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x5a,0xd2,0xff,0x05,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xcf,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xce,0x00,0x00] + +v_cvt_pknorm_i16_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xd0,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd1,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xd2,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd3,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xd4,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd5,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xd6,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd7,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xd8,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd9,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xda,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xdb,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xdc,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xdd,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xde,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xdf,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xf6,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xf7,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xf8,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xf9,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xfc,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xfd,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xfe,0x00,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xff,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0x00,0x01,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xfb,0x01,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0x82,0x01,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x02,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xe0,0x01,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xff,0x03,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xee,0x01,0x00] -v_cvt_pknorm_i16_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x20] +v_cvt_pknorm_i16_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0x04,0x02,0x00] -v_cvt_pknorm_i16_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x40] +v_cvt_pknorm_i16_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0xfe,0x03,0x00] -v_cvt_pknorm_i16_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x60] +v_cvt_pknorm_i16_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0x04,0x00,0x20] -v_cvt_pknorm_i16_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x5a,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0x04,0x00,0x40] -v_cvt_pknorm_i16_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x5a,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x5a,0xd2,0x80,0x04,0x00,0x60] -v_cvt_pknorm_i16_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x5a,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_i16_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x5a,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x5a,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x5a,0xd2,0x80,0x04,0x00,0x00] v_cvt_pknorm_u16_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x5c] @@ -37238,80 +37890,101 @@ v_cvt_pknorm_u16_f32 v5, v255, v2 v_cvt_pknorm_u16_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x5c] +v_cvt_pknorm_u16_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x5c,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x5c,0xd2,0xc1,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x5c,0xd2,0xf0,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x5c,0xd2,0xf7,0x04,0x00,0x00] + v_cvt_pknorm_u16_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x5c,0xd2,0x01,0x05,0x00,0x00] - v_cvt_pknorm_u16_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x5c,0xd2,0xff,0x05,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xcf,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xce,0x00,0x00] + +v_cvt_pknorm_u16_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xd0,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd1,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xd2,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd3,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xd4,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd5,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xd6,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd7,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xd8,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd9,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xda,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xdb,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xdc,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xdd,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xde,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xdf,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xf6,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xf7,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xf8,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xf9,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xfc,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xfd,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xfe,0x00,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xff,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0x00,0x01,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xfb,0x01,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0x82,0x01,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x02,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xe0,0x01,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xff,0x03,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xee,0x01,0x00] -v_cvt_pknorm_u16_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x20] +v_cvt_pknorm_u16_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0x04,0x02,0x00] -v_cvt_pknorm_u16_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x40] +v_cvt_pknorm_u16_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0xfe,0x03,0x00] -v_cvt_pknorm_u16_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x60] +v_cvt_pknorm_u16_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0x04,0x00,0x20] -v_cvt_pknorm_u16_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x5c,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0x04,0x00,0x40] -v_cvt_pknorm_u16_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x5c,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x5c,0xd2,0x80,0x04,0x00,0x60] -v_cvt_pknorm_u16_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x5c,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_u16_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x5c,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x5c,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x5c,0xd2,0x80,0x04,0x00,0x00] v_cvt_pkrtz_f16_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x5e] @@ -37385,80 +38058,101 @@ v_cvt_pkrtz_f16_f32 v5, v255, v2 v_cvt_pkrtz_f16_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x5e] +v_cvt_pkrtz_f16_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x5e,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x5e,0xd2,0xc1,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x5e,0xd2,0xf0,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x5e,0xd2,0xf7,0x04,0x00,0x00] + v_cvt_pkrtz_f16_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x5e,0xd2,0x01,0x05,0x00,0x00] - v_cvt_pkrtz_f16_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x5e,0xd2,0xff,0x05,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, s103 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xcf,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, s103 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xce,0x00,0x00] + +v_cvt_pkrtz_f16_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xd0,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd1,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xd2,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd3,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xd4,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd5,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xd6,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd7,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xd8,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd9,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xda,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xdb,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xdc,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xdd,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xde,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xdf,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xf6,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xf7,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xf8,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xf9,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xfc,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xfd,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xfe,0x00,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xff,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0x00,0x01,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xfb,0x01,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0x82,0x01,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x02,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xe0,0x01,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xff,0x03,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xee,0x01,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x20] +v_cvt_pkrtz_f16_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0x04,0x02,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x40] +v_cvt_pkrtz_f16_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0xfe,0x03,0x00] -v_cvt_pkrtz_f16_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x60] +v_cvt_pkrtz_f16_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0x04,0x00,0x20] -v_cvt_pkrtz_f16_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x5e,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0x04,0x00,0x40] -v_cvt_pkrtz_f16_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x5e,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x5e,0xd2,0x80,0x04,0x00,0x60] -v_cvt_pkrtz_f16_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x5e,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkrtz_f16_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x5e,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x5e,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x5e,0xd2,0x80,0x04,0x00,0x00] v_cvt_pk_u16_u32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x60] @@ -37760,197 +38454,263 @@ v_cvt_pk_i16_i32_e64 v5, 0, v2 v_cvt_pk_i16_i32_e64 v5, 0, v255 // CHECK: [0x05,0x00,0x62,0xd2,0x80,0xfe,0x03,0x00] -v_mad_legacy_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0x02] + +v_mad_legacy_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0x80,0xd2,0x01,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x67,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x68,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x69,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x6a,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x6b,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x6c,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x6d,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x6e,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x6f,0x00,0x01,0x02] + +v_mad_legacy_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x7b,0x00,0x01,0x02] -v_mad_legacy_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0x80,0xd2,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x7c,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x67,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x7e,0x00,0x01,0x02] -v_mad_legacy_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x68,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x7f,0x00,0x01,0x02] -v_mad_legacy_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x69,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x80,0x00,0x01,0x02] -v_mad_legacy_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x6a,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0xc1,0x00,0x01,0x02] -v_mad_legacy_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x6b,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0xf0,0x00,0x01,0x02] -v_mad_legacy_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x6c,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0xf7,0x00,0x01,0x02] -v_mad_legacy_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x6d,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x01,0x01,0x02] -v_mad_legacy_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x6e,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0xff,0x01,0x01,0x02] -v_mad_legacy_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x6f,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x82,0x01,0x02] -v_mad_legacy_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x7b,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0xe0,0x01,0x02] -v_mad_legacy_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x7c,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0xee,0x01,0x02] -v_mad_legacy_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x7e,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x02,0x02] -v_mad_legacy_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x7f,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0xfe,0x03,0x02] -v_mad_legacy_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0xfd,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x05,0x03] -v_mad_legacy_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x05,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0xc1,0x03] -v_mad_legacy_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0xff,0x05,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0xdd,0x03] -v_mad_legacy_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0xfe,0x0f,0x04] +v_mad_legacy_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x0d,0x04] -v_mad_legacy_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0xfe,0x07] +v_mad_legacy_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0xfd,0x07] -v_mad_legacy_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x24] +v_mad_legacy_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0x22] -v_mad_legacy_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x44] +v_mad_legacy_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0x42] -v_mad_legacy_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x84] +v_mad_legacy_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0x82] -v_mad_legacy_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0xe4] +v_mad_legacy_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0xe2] -v_mad_legacy_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0x80,0xd2,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0x80,0xd2,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0x80,0xd2,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0x80,0xd2,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0x80,0xd2,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0x80,0xd2,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0x80,0xd2,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0x80,0xd2,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0x80,0xd2,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0x80,0xd2,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x0c] +v_mad_legacy_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0x0a] -v_mad_legacy_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x14] +v_mad_legacy_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0x12] -v_mad_legacy_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x1c] +v_mad_legacy_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x00,0x01,0x1a] -v_mad_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0x02] -v_mad_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0x82,0xd2,0x01,0x04,0x0e,0x04] +v_mad_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0x82,0xd2,0x01,0x00,0x01,0x02] -v_mad_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x67,0x04,0x0e,0x04] +v_mad_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x67,0x00,0x01,0x02] -v_mad_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x68,0x04,0x0e,0x04] +v_mad_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x68,0x00,0x01,0x02] -v_mad_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x69,0x04,0x0e,0x04] +v_mad_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x69,0x00,0x01,0x02] -v_mad_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x6a,0x04,0x0e,0x04] +v_mad_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x6a,0x00,0x01,0x02] -v_mad_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x6b,0x04,0x0e,0x04] +v_mad_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x6b,0x00,0x01,0x02] -v_mad_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x6c,0x04,0x0e,0x04] +v_mad_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x6c,0x00,0x01,0x02] -v_mad_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x6d,0x04,0x0e,0x04] +v_mad_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x6d,0x00,0x01,0x02] -v_mad_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x6e,0x04,0x0e,0x04] +v_mad_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x6e,0x00,0x01,0x02] -v_mad_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x6f,0x04,0x0e,0x04] +v_mad_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x6f,0x00,0x01,0x02] -v_mad_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x7b,0x04,0x0e,0x04] +v_mad_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x7b,0x00,0x01,0x02] -v_mad_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x7c,0x04,0x0e,0x04] +v_mad_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x7c,0x00,0x01,0x02] -v_mad_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x7e,0x04,0x0e,0x04] +v_mad_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x7e,0x00,0x01,0x02] -v_mad_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x7f,0x04,0x0e,0x04] +v_mad_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x7f,0x00,0x01,0x02] -v_mad_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0xfd,0x04,0x0e,0x04] +v_mad_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x80,0x00,0x01,0x02] -v_mad_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x05,0x0e,0x04] +v_mad_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0xc1,0x00,0x01,0x02] -v_mad_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0xff,0x05,0x0e,0x04] +v_mad_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0xf0,0x00,0x01,0x02] -v_mad_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0xfe,0x0f,0x04] +v_mad_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0xf7,0x00,0x01,0x02] -v_mad_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0xfe,0x07] +v_mad_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x01,0x01,0x02] -v_mad_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x24] +v_mad_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0xff,0x01,0x01,0x02] -v_mad_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x44] +v_mad_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x82,0x01,0x02] -v_mad_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x84] +v_mad_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0xe0,0x01,0x02] -v_mad_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0xe4] +v_mad_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0xee,0x01,0x02] -v_mad_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0x82,0xd2,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x02,0x02] -v_mad_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0x82,0xd2,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0xfe,0x03,0x02] -v_mad_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0x82,0xd2,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x05,0x03] -v_mad_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0x82,0xd2,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0xc1,0x03] -v_mad_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0x82,0xd2,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0xdd,0x03] -v_mad_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x0c] +v_mad_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x0d,0x04] -v_mad_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x14] +v_mad_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0xfd,0x07] -v_mad_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x1c] +v_mad_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0x22] + +v_mad_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0x42] + +v_mad_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0x82] + +v_mad_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0xe2] + +v_mad_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0x82,0xd2,0x01,0x00,0x01,0x02] + +v_mad_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0x82,0xd2,0x01,0x00,0x01,0x02] + +v_mad_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0x82,0xd2,0x01,0x00,0x01,0x02] + +v_mad_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0x82,0xd2,0x01,0x00,0x01,0x02] + +v_mad_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0x82,0xd2,0x01,0x00,0x01,0x02] + +v_mad_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0x0a] + +v_mad_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0x12] + +v_mad_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x00,0x01,0x1a] v_mad_i32_i24 v5, s1, 0, 0 // CHECK: [0x05,0x00,0x84,0xd2,0x01,0x00,0x01,0x02] @@ -38138,389 +38898,521 @@ v_mad_u32_u24 v5, s1, 0, v3 v_mad_u32_u24 v5, s1, 0, v255 // CHECK: [0x05,0x00,0x86,0xd2,0x01,0x00,0xfd,0x07] -v_cubeid_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0x02] + +v_cubeid_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0x88,0xd2,0x01,0x00,0x01,0x02] + +v_cubeid_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x67,0x00,0x01,0x02] + +v_cubeid_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x68,0x00,0x01,0x02] + +v_cubeid_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x69,0x00,0x01,0x02] + +v_cubeid_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x6a,0x00,0x01,0x02] + +v_cubeid_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x6b,0x00,0x01,0x02] + +v_cubeid_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x6c,0x00,0x01,0x02] + +v_cubeid_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x6d,0x00,0x01,0x02] + +v_cubeid_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x6e,0x00,0x01,0x02] + +v_cubeid_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x6f,0x00,0x01,0x02] + +v_cubeid_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x7b,0x00,0x01,0x02] + +v_cubeid_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x7c,0x00,0x01,0x02] + +v_cubeid_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x7e,0x00,0x01,0x02] + +v_cubeid_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x7f,0x00,0x01,0x02] + +v_cubeid_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x00,0x01,0x02] + +v_cubeid_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0xc1,0x00,0x01,0x02] + +v_cubeid_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0xf0,0x00,0x01,0x02] + +v_cubeid_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0xf7,0x00,0x01,0x02] + +v_cubeid_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x01,0x01,0x02] + +v_cubeid_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0xff,0x01,0x01,0x02] + +v_cubeid_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x82,0x01,0x02] + +v_cubeid_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xe0,0x01,0x02] + +v_cubeid_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xee,0x01,0x02] + +v_cubeid_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x02,0x02] + +v_cubeid_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xfe,0x03,0x02] -v_cubeid_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0x88,0xd2,0x01,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x05,0x03] -v_cubeid_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x67,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0xc1,0x03] -v_cubeid_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x68,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0xdd,0x03] -v_cubeid_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x69,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x0d,0x04] -v_cubeid_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x6a,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0xfd,0x07] -v_cubeid_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x6b,0x04,0x0e,0x04] +v_cubeid_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0x22] -v_cubeid_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x6c,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0x42] -v_cubeid_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x6d,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0x82] -v_cubeid_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x6e,0x04,0x0e,0x04] +v_cubeid_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0xe2] -v_cubeid_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x6f,0x04,0x0e,0x04] +v_cubeid_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0x88,0xd2,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x7b,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0x88,0xd2,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x7c,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0x88,0xd2,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x7e,0x04,0x0e,0x04] +v_cubeid_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0x88,0xd2,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x7f,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0x88,0xd2,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0xfd,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0x0a] -v_cubeid_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0x12] -v_cubeid_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0xff,0x05,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x00,0x01,0x1a] -v_cubeid_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xfe,0x0f,0x04] +v_cubesc_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0xfe,0x07] +v_cubesc_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0x8a,0xd2,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x24] +v_cubesc_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x67,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x44] +v_cubesc_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x68,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x84] +v_cubesc_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x69,0x00,0x01,0x02] -v_cubeid_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0xe4] +v_cubesc_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x6a,0x00,0x01,0x02] -v_cubeid_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0x88,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x6b,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0x88,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x6c,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0x88,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x6d,0x00,0x01,0x02] -v_cubeid_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0x88,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x6e,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0x88,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x6f,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x0c] +v_cubesc_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x7b,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x14] +v_cubesc_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x7c,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x1c] +v_cubesc_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x7e,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x7f,0x00,0x01,0x02] -v_cubesc_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x80,0x00,0x01,0x02] -v_cubesc_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x67,0x04,0x0e,0x04] +v_cubesc_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0xc1,0x00,0x01,0x02] -v_cubesc_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x68,0x04,0x0e,0x04] +v_cubesc_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0xf0,0x00,0x01,0x02] -v_cubesc_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x69,0x04,0x0e,0x04] +v_cubesc_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0xf7,0x00,0x01,0x02] -v_cubesc_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x6a,0x04,0x0e,0x04] +v_cubesc_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x01,0x01,0x02] -v_cubesc_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x6b,0x04,0x0e,0x04] +v_cubesc_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0xff,0x01,0x01,0x02] -v_cubesc_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x6c,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x82,0x01,0x02] -v_cubesc_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x6d,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0xe0,0x01,0x02] -v_cubesc_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x6e,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0xee,0x01,0x02] -v_cubesc_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x6f,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x02,0x02] -v_cubesc_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x7b,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0xfe,0x03,0x02] -v_cubesc_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x7c,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x05,0x03] -v_cubesc_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x7e,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0xc1,0x03] -v_cubesc_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x7f,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0xdd,0x03] -v_cubesc_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0xfd,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x0d,0x04] -v_cubesc_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x05,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0xfd,0x07] -v_cubesc_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0xff,0x05,0x0e,0x04] +v_cubesc_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x22] -v_cubesc_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0xfe,0x0f,0x04] +v_cubesc_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x42] -v_cubesc_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0xfe,0x07] +v_cubesc_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x82] -v_cubesc_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x24] +v_cubesc_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0xe2] -v_cubesc_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x44] +v_cubesc_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0x8a,0xd2,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x84] +v_cubesc_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0x8a,0xd2,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0xe4] +v_cubesc_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0x8a,0xd2,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0x8a,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0x8a,0xd2,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0x8a,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0x8a,0xd2,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0x8a,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x0a] -v_cubesc_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0x8a,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x12] -v_cubesc_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0x8a,0xd2,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x1a] -v_cubesc_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x0c] +v_cubetc_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x14] +v_cubetc_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0x8c,0xd2,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x1c] +v_cubetc_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x67,0x00,0x01,0x02] -v_cubetc_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x68,0x00,0x01,0x02] -v_cubetc_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x69,0x00,0x01,0x02] -v_cubetc_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x67,0x04,0x0e,0x04] +v_cubetc_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x6a,0x00,0x01,0x02] -v_cubetc_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x68,0x04,0x0e,0x04] +v_cubetc_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x6b,0x00,0x01,0x02] -v_cubetc_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x69,0x04,0x0e,0x04] +v_cubetc_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x6c,0x00,0x01,0x02] -v_cubetc_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x6a,0x04,0x0e,0x04] +v_cubetc_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x6d,0x00,0x01,0x02] -v_cubetc_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x6b,0x04,0x0e,0x04] +v_cubetc_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x6e,0x00,0x01,0x02] -v_cubetc_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x6c,0x04,0x0e,0x04] +v_cubetc_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x6f,0x00,0x01,0x02] -v_cubetc_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x6d,0x04,0x0e,0x04] +v_cubetc_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x7b,0x00,0x01,0x02] -v_cubetc_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x6e,0x04,0x0e,0x04] +v_cubetc_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x7c,0x00,0x01,0x02] -v_cubetc_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x6f,0x04,0x0e,0x04] +v_cubetc_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x7e,0x00,0x01,0x02] -v_cubetc_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x7b,0x04,0x0e,0x04] +v_cubetc_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x7f,0x00,0x01,0x02] -v_cubetc_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x7c,0x04,0x0e,0x04] +v_cubetc_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0x00,0x01,0x02] -v_cubetc_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x7e,0x04,0x0e,0x04] +v_cubetc_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0xc1,0x00,0x01,0x02] -v_cubetc_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x7f,0x04,0x0e,0x04] +v_cubetc_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0xf0,0x00,0x01,0x02] -v_cubetc_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0xfd,0x04,0x0e,0x04] +v_cubetc_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0xf7,0x00,0x01,0x02] -v_cubetc_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x05,0x0e,0x04] +v_cubetc_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x01,0x01,0x02] -v_cubetc_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0xff,0x05,0x0e,0x04] +v_cubetc_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0xff,0x01,0x01,0x02] -v_cubetc_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0xfe,0x0f,0x04] +v_cubetc_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x82,0x01,0x02] -v_cubetc_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0xfe,0x07] +v_cubetc_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0xe0,0x01,0x02] -v_cubetc_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x24] +v_cubetc_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0xee,0x01,0x02] -v_cubetc_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x44] +v_cubetc_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x02,0x02] -v_cubetc_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x84] +v_cubetc_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0xfe,0x03,0x02] -v_cubetc_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0xe4] +v_cubetc_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x05,0x03] -v_cubetc_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0x8c,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0xc1,0x03] -v_cubetc_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0x8c,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0xdd,0x03] -v_cubetc_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0x8c,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x0d,0x04] -v_cubetc_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0x8c,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0xfd,0x07] -v_cubetc_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0x8c,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0x22] -v_cubetc_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x0c] +v_cubetc_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0x42] -v_cubetc_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x14] +v_cubetc_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0x82] -v_cubetc_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x1c] +v_cubetc_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0xe2] -v_cubema_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0x8c,0xd2,0x01,0x00,0x01,0x02] -v_cubema_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0x8c,0xd2,0x01,0x00,0x01,0x02] -v_cubema_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x67,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0x8c,0xd2,0x01,0x00,0x01,0x02] -v_cubema_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x68,0x04,0x0e,0x04] +v_cubetc_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0x8c,0xd2,0x01,0x00,0x01,0x02] -v_cubema_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x69,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0x8c,0xd2,0x01,0x00,0x01,0x02] -v_cubema_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x6a,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0x0a] -v_cubema_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x6b,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0x12] -v_cubema_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x6c,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x00,0x01,0x1a] -v_cubema_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x6d,0x04,0x0e,0x04] +v_cubema_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0x02] -v_cubema_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x6e,0x04,0x0e,0x04] +v_cubema_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0x8e,0xd2,0x01,0x00,0x01,0x02] -v_cubema_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x6f,0x04,0x0e,0x04] +v_cubema_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x67,0x00,0x01,0x02] -v_cubema_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x7b,0x04,0x0e,0x04] +v_cubema_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x68,0x00,0x01,0x02] -v_cubema_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x7c,0x04,0x0e,0x04] +v_cubema_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x69,0x00,0x01,0x02] -v_cubema_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x7e,0x04,0x0e,0x04] +v_cubema_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x6a,0x00,0x01,0x02] -v_cubema_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x7f,0x04,0x0e,0x04] +v_cubema_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x6b,0x00,0x01,0x02] -v_cubema_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0xfd,0x04,0x0e,0x04] +v_cubema_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x6c,0x00,0x01,0x02] -v_cubema_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x05,0x0e,0x04] +v_cubema_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x6d,0x00,0x01,0x02] -v_cubema_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0xff,0x05,0x0e,0x04] +v_cubema_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x6e,0x00,0x01,0x02] -v_cubema_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0xfe,0x0f,0x04] +v_cubema_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x6f,0x00,0x01,0x02] -v_cubema_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0xfe,0x07] +v_cubema_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x7b,0x00,0x01,0x02] -v_cubema_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x24] +v_cubema_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x7c,0x00,0x01,0x02] -v_cubema_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x44] +v_cubema_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x7e,0x00,0x01,0x02] -v_cubema_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x84] +v_cubema_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x7f,0x00,0x01,0x02] -v_cubema_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0xe4] +v_cubema_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x80,0x00,0x01,0x02] -v_cubema_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0x8e,0xd2,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0xc1,0x00,0x01,0x02] -v_cubema_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0x8e,0xd2,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0xf0,0x00,0x01,0x02] -v_cubema_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0x8e,0xd2,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0xf7,0x00,0x01,0x02] -v_cubema_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0x8e,0xd2,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x01,0x01,0x02] -v_cubema_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0x8e,0xd2,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0xff,0x01,0x01,0x02] -v_cubema_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x0c] +v_cubema_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x82,0x01,0x02] -v_cubema_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x14] +v_cubema_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0xe0,0x01,0x02] -v_cubema_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x1c] +v_cubema_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0xee,0x01,0x02] + +v_cubema_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x02,0x02] + +v_cubema_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0xfe,0x03,0x02] + +v_cubema_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x05,0x03] + +v_cubema_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0xc1,0x03] + +v_cubema_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0xdd,0x03] + +v_cubema_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x0d,0x04] + +v_cubema_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0xfd,0x07] + +v_cubema_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0x22] + +v_cubema_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0x42] + +v_cubema_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0x82] + +v_cubema_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0xe2] + +v_cubema_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0x8e,0xd2,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0x8e,0xd2,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0x8e,0xd2,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0x8e,0xd2,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0x8e,0xd2,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0x0a] + +v_cubema_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0x12] + +v_cubema_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x00,0x01,0x1a] v_bfe_u32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0x90,0xd2,0x01,0x00,0x01,0x02] @@ -38801,182 +39693,248 @@ v_bfi_b32 v5, s1, 0, v3 v_bfi_b32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0x94,0xd2,0x01,0x00,0xfd,0x07] -v_fma_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0x02] + +v_fma_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0x96,0xd2,0x01,0x00,0x01,0x02] + +v_fma_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x67,0x00,0x01,0x02] + +v_fma_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x68,0x00,0x01,0x02] + +v_fma_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x69,0x00,0x01,0x02] + +v_fma_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x6a,0x00,0x01,0x02] + +v_fma_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x6b,0x00,0x01,0x02] + +v_fma_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x6c,0x00,0x01,0x02] + +v_fma_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x6d,0x00,0x01,0x02] + +v_fma_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x6e,0x00,0x01,0x02] + +v_fma_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x6f,0x00,0x01,0x02] + +v_fma_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x7b,0x00,0x01,0x02] + +v_fma_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x7c,0x00,0x01,0x02] + +v_fma_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x7e,0x00,0x01,0x02] + +v_fma_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x7f,0x00,0x01,0x02] + +v_fma_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x00,0x01,0x02] + +v_fma_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0xc1,0x00,0x01,0x02] + +v_fma_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0xf0,0x00,0x01,0x02] -v_fma_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0x96,0xd2,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0xf7,0x00,0x01,0x02] -v_fma_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x67,0x04,0x0e,0x04] +v_fma_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x01,0x01,0x02] -v_fma_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x68,0x04,0x0e,0x04] +v_fma_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0xff,0x01,0x01,0x02] -v_fma_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x69,0x04,0x0e,0x04] +v_fma_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x82,0x01,0x02] -v_fma_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x6a,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xe0,0x01,0x02] -v_fma_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x6b,0x04,0x0e,0x04] +v_fma_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xee,0x01,0x02] -v_fma_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x6c,0x04,0x0e,0x04] +v_fma_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x02,0x02] -v_fma_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x6d,0x04,0x0e,0x04] +v_fma_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xfe,0x03,0x02] -v_fma_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x6e,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x05,0x03] -v_fma_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x6f,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0xc1,0x03] -v_fma_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x7b,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0xdd,0x03] -v_fma_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x7c,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x0d,0x04] -v_fma_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x7e,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0xfd,0x07] -v_fma_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x7f,0x04,0x0e,0x04] +v_fma_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0x22] -v_fma_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0xfd,0x04,0x0e,0x04] +v_fma_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0x42] -v_fma_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x0e,0x04] +v_fma_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0x82] -v_fma_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0xff,0x05,0x0e,0x04] +v_fma_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0xe2] -v_fma_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xfe,0x0f,0x04] +v_fma_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0x96,0xd2,0x01,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0xfe,0x07] +v_fma_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0x96,0xd2,0x01,0x00,0x01,0x02] -v_fma_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x24] +v_fma_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0x96,0xd2,0x01,0x00,0x01,0x02] -v_fma_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x44] +v_fma_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0x96,0xd2,0x01,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x84] +v_fma_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0x96,0xd2,0x01,0x00,0x01,0x02] -v_fma_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0xe4] +v_fma_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0x0a] -v_fma_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0x96,0xd2,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0x12] -v_fma_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0x96,0xd2,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x00,0x01,0x1a] -v_fma_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0x96,0xd2,0x01,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0x02] -v_fma_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0x96,0xd2,0x01,0x04,0x0e,0x04] +v_fma_f64 v[254:255], s[2:3], 0, 0 +// CHECK: [0xfe,0x00,0x98,0xd2,0x02,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0x96,0xd2,0x01,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[4:5], 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x04,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x0c] +v_fma_f64 v[5:6], s[102:103], 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x66,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x14] +v_fma_f64 v[5:6], flat_scratch, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x68,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x1c] +v_fma_f64 v[5:6], vcc, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x6a,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], tba, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x6c,0x00,0x01,0x02] -v_fma_f64 v[254:255], s[2:3], v[2:3], v[3:4] -// CHECK: [0xfe,0x00,0x98,0xd2,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], tma, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x6e,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[4:5], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x04,0x04,0x0e,0x04] +v_fma_f64 v[5:6], ttmp[10:11], 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x7a,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[102:103], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x66,0x04,0x0e,0x04] +v_fma_f64 v[5:6], exec, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x7e,0x00,0x01,0x02] -v_fma_f64 v[5:6], flat_scratch, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x68,0x04,0x0e,0x04] +v_fma_f64 v[5:6], 0, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x80,0x00,0x01,0x02] -v_fma_f64 v[5:6], vcc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x6a,0x04,0x0e,0x04] +v_fma_f64 v[5:6], -1, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0xc1,0x00,0x01,0x02] -v_fma_f64 v[5:6], tba, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x6c,0x04,0x0e,0x04] +v_fma_f64 v[5:6], 0.5, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0xf0,0x00,0x01,0x02] -v_fma_f64 v[5:6], tma, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x6e,0x04,0x0e,0x04] +v_fma_f64 v[5:6], -4.0, 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0xf7,0x00,0x01,0x02] -v_fma_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x7a,0x04,0x0e,0x04] +v_fma_f64 v[5:6], v[1:2], 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x01,0x01,0x01,0x02] -v_fma_f64 v[5:6], exec, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x7e,0x04,0x0e,0x04] +v_fma_f64 v[5:6], v[254:255], 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0xfe,0x01,0x01,0x02] -v_fma_f64 v[5:6], scc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0xfd,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], -1, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x82,0x01,0x02] -v_fma_f64 v[5:6], v[1:2], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x01,0x05,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], 0.5, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0xe0,0x01,0x02] -v_fma_f64 v[5:6], v[254:255], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0xfe,0x05,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], -4.0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0xee,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[254:255], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0xfc,0x0f,0x04] +v_fma_f64 v[5:6], s[2:3], v[2:3], 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x02,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[254:255] -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0xfa,0x07] +v_fma_f64 v[5:6], s[2:3], v[254:255], 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0xfc,0x03,0x02] -v_fma_f64 v[5:6], -s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x24] +v_fma_f64 v[5:6], s[2:3], 0, -1 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x05,0x03] -v_fma_f64 v[5:6], s[2:3], -v[2:3], v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x44] +v_fma_f64 v[5:6], s[2:3], 0, 0.5 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0xc1,0x03] -v_fma_f64 v[5:6], s[2:3], v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x84] +v_fma_f64 v[5:6], s[2:3], 0, -4.0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0xdd,0x03] -v_fma_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0xe4] +v_fma_f64 v[5:6], s[2:3], 0, v[3:4] +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x0d,0x04] -v_fma_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4] -// CHECK: [0x05,0x01,0x98,0xd2,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], 0, v[254:255] +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0xf9,0x07] -v_fma_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4] -// CHECK: [0x05,0x02,0x98,0xd2,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], -s[2:3], 0, 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0x22] -v_fma_f64 v[5:6], s[2:3], v[2:3], |v[3:4]| -// CHECK: [0x05,0x04,0x98,0xd2,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], neg(0), 0 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0x42] -v_fma_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]| -// CHECK: [0x05,0x07,0x98,0xd2,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], 0, neg(0) +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0x82] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp -// CHECK: [0x05,0x08,0x98,0xd2,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], -s[2:3], neg(0), neg(0) +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0xe2] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2 -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x0c] +v_fma_f64 v[5:6], |s[2:3]|, 0, 0 +// CHECK: [0x05,0x01,0x98,0xd2,0x02,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4 -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x14] +v_fma_f64 v[5:6], s[2:3], |0|, 0 +// CHECK: [0x05,0x02,0x98,0xd2,0x02,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2 -// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x1c] +v_fma_f64 v[5:6], s[2:3], 0, |0| +// CHECK: [0x05,0x04,0x98,0xd2,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], |s[2:3]|, |0|, |0| +// CHECK: [0x05,0x07,0x98,0xd2,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], s[2:3], 0, 0 clamp +// CHECK: [0x05,0x08,0x98,0xd2,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], s[2:3], 0, 0 mul:2 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0x0a] + +v_fma_f64 v[5:6], s[2:3], 0, 0 mul:4 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0x12] + +v_fma_f64 v[5:6], s[2:3], 0, 0 div:2 +// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x00,0x01,0x1a] v_lerp_u8 v5, s1, 0, 0 // CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x00,0x01,0x02] @@ -39239,197 +40197,263 @@ v_alignbyte_b32 v5, s1, 0, v3 v_alignbyte_b32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x00,0xfd,0x07] -v_mullit_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0x02] + +v_mullit_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xa0,0xd2,0x01,0x00,0x01,0x02] + +v_mullit_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x67,0x00,0x01,0x02] + +v_mullit_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x68,0x00,0x01,0x02] + +v_mullit_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x69,0x00,0x01,0x02] + +v_mullit_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x6a,0x00,0x01,0x02] + +v_mullit_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x6b,0x00,0x01,0x02] + +v_mullit_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x6c,0x00,0x01,0x02] + +v_mullit_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x6d,0x00,0x01,0x02] + +v_mullit_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x6e,0x00,0x01,0x02] + +v_mullit_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x6f,0x00,0x01,0x02] + +v_mullit_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x7b,0x00,0x01,0x02] + +v_mullit_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x7c,0x00,0x01,0x02] + +v_mullit_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x7e,0x00,0x01,0x02] -v_mullit_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x04] +v_mullit_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x7f,0x00,0x01,0x02] -v_mullit_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x67,0x04,0x0e,0x04] +v_mullit_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x80,0x00,0x01,0x02] -v_mullit_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x68,0x04,0x0e,0x04] +v_mullit_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0xc1,0x00,0x01,0x02] -v_mullit_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x69,0x04,0x0e,0x04] +v_mullit_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0xf0,0x00,0x01,0x02] -v_mullit_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x6a,0x04,0x0e,0x04] +v_mullit_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0xf7,0x00,0x01,0x02] -v_mullit_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x6b,0x04,0x0e,0x04] +v_mullit_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x01,0x01,0x02] -v_mullit_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x6c,0x04,0x0e,0x04] +v_mullit_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0xff,0x01,0x01,0x02] -v_mullit_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x6d,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x82,0x01,0x02] -v_mullit_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x6e,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0xe0,0x01,0x02] -v_mullit_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x6f,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0xee,0x01,0x02] -v_mullit_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x7b,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x02,0x02] -v_mullit_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x7c,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0xfe,0x03,0x02] -v_mullit_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x7e,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x05,0x03] -v_mullit_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x7f,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0xc1,0x03] -v_mullit_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0xfd,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0xdd,0x03] -v_mullit_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x05,0x0e,0x04] +v_mullit_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x0d,0x04] -v_mullit_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0xff,0x05,0x0e,0x04] +v_mullit_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0xfd,0x07] -v_mullit_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0xfe,0x0f,0x04] +v_mullit_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0x22] -v_mullit_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0xfe,0x07] +v_mullit_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0x42] -v_mullit_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x24] +v_mullit_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0x82] -v_mullit_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x44] +v_mullit_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0xe2] -v_mullit_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x84] +v_mullit_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xa0,0xd2,0x01,0x00,0x01,0x02] -v_mullit_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0xe4] +v_mullit_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xa0,0xd2,0x01,0x00,0x01,0x02] -v_mullit_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xa0,0xd2,0x01,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xa0,0xd2,0x01,0x00,0x01,0x02] -v_mullit_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xa0,0xd2,0x01,0x04,0x0e,0x04] +v_mullit_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xa0,0xd2,0x01,0x00,0x01,0x02] -v_mullit_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xa0,0xd2,0x01,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0xa0,0xd2,0x01,0x00,0x01,0x02] -v_mullit_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xa0,0xd2,0x01,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0x0a] -v_mullit_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0xa0,0xd2,0x01,0x04,0x0e,0x04] +v_mullit_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0x12] -v_mullit_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x0c] +v_mullit_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x00,0x01,0x1a] -v_mullit_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x14] +v_min3_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0x02] -v_mullit_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x1c] +v_min3_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xa2,0xd2,0x01,0x00,0x01,0x02] -v_min3_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x67,0x00,0x01,0x02] -v_min3_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x68,0x00,0x01,0x02] -v_min3_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x67,0x04,0x0e,0x04] +v_min3_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x69,0x00,0x01,0x02] -v_min3_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x68,0x04,0x0e,0x04] +v_min3_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x6a,0x00,0x01,0x02] -v_min3_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x69,0x04,0x0e,0x04] +v_min3_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x6b,0x00,0x01,0x02] -v_min3_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x6a,0x04,0x0e,0x04] +v_min3_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x6c,0x00,0x01,0x02] -v_min3_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x6b,0x04,0x0e,0x04] +v_min3_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x6d,0x00,0x01,0x02] -v_min3_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x6c,0x04,0x0e,0x04] +v_min3_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x6e,0x00,0x01,0x02] -v_min3_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x6d,0x04,0x0e,0x04] +v_min3_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x6f,0x00,0x01,0x02] -v_min3_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x6e,0x04,0x0e,0x04] +v_min3_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x7b,0x00,0x01,0x02] -v_min3_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x6f,0x04,0x0e,0x04] +v_min3_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x7c,0x00,0x01,0x02] -v_min3_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x7b,0x04,0x0e,0x04] +v_min3_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x7e,0x00,0x01,0x02] -v_min3_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x7c,0x04,0x0e,0x04] +v_min3_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x7f,0x00,0x01,0x02] -v_min3_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x7e,0x04,0x0e,0x04] +v_min3_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x80,0x00,0x01,0x02] -v_min3_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x7f,0x04,0x0e,0x04] +v_min3_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0xc1,0x00,0x01,0x02] -v_min3_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0xfd,0x04,0x0e,0x04] +v_min3_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0xf0,0x00,0x01,0x02] -v_min3_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x05,0x0e,0x04] +v_min3_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0xf7,0x00,0x01,0x02] -v_min3_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0xff,0x05,0x0e,0x04] +v_min3_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x01,0x01,0x02] -v_min3_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0xfe,0x0f,0x04] +v_min3_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0xff,0x01,0x01,0x02] -v_min3_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0xfe,0x07] +v_min3_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x82,0x01,0x02] -v_min3_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x24] +v_min3_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0xe0,0x01,0x02] -v_min3_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x44] +v_min3_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0xee,0x01,0x02] -v_min3_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x84] +v_min3_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x02,0x02] -v_min3_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0xe4] +v_min3_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0xfe,0x03,0x02] -v_min3_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xa2,0xd2,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x05,0x03] -v_min3_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xa2,0xd2,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0xc1,0x03] -v_min3_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xa2,0xd2,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0xdd,0x03] -v_min3_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xa2,0xd2,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x0d,0x04] -v_min3_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0xa2,0xd2,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0xfd,0x07] -v_min3_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x0c] +v_min3_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0x22] -v_min3_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x14] +v_min3_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0x42] -v_min3_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x1c] +v_min3_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0x82] + +v_min3_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0xe2] + +v_min3_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xa2,0xd2,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xa2,0xd2,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xa2,0xd2,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xa2,0xd2,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0xa2,0xd2,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0x0a] + +v_min3_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0x12] + +v_min3_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x00,0x01,0x1a] v_min3_i32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x00,0x01,0x02] @@ -39617,101 +40641,134 @@ v_min3_u32 v5, s1, 0, v3 v_min3_u32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x00,0xfd,0x07] -v_max3_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0x02] + +v_max3_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xa8,0xd2,0x01,0x00,0x01,0x02] + +v_max3_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x67,0x00,0x01,0x02] + +v_max3_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x68,0x00,0x01,0x02] + +v_max3_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x69,0x00,0x01,0x02] + +v_max3_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x6a,0x00,0x01,0x02] + +v_max3_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x6b,0x00,0x01,0x02] + +v_max3_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x6c,0x00,0x01,0x02] + +v_max3_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x6d,0x00,0x01,0x02] + +v_max3_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x6e,0x00,0x01,0x02] -v_max3_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x6f,0x00,0x01,0x02] -v_max3_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x67,0x04,0x0e,0x04] +v_max3_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x7b,0x00,0x01,0x02] -v_max3_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x68,0x04,0x0e,0x04] +v_max3_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x7c,0x00,0x01,0x02] -v_max3_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x69,0x04,0x0e,0x04] +v_max3_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x7e,0x00,0x01,0x02] -v_max3_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x6a,0x04,0x0e,0x04] +v_max3_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x7f,0x00,0x01,0x02] -v_max3_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x6b,0x04,0x0e,0x04] +v_max3_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x80,0x00,0x01,0x02] -v_max3_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x6c,0x04,0x0e,0x04] +v_max3_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0xc1,0x00,0x01,0x02] -v_max3_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x6d,0x04,0x0e,0x04] +v_max3_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0xf0,0x00,0x01,0x02] -v_max3_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x6e,0x04,0x0e,0x04] +v_max3_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0xf7,0x00,0x01,0x02] -v_max3_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x6f,0x04,0x0e,0x04] +v_max3_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x01,0x01,0x02] -v_max3_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x7b,0x04,0x0e,0x04] +v_max3_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0xff,0x01,0x01,0x02] -v_max3_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x7c,0x04,0x0e,0x04] +v_max3_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x82,0x01,0x02] -v_max3_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x7e,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0xe0,0x01,0x02] -v_max3_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x7f,0x04,0x0e,0x04] +v_max3_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0xee,0x01,0x02] -v_max3_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0xfd,0x04,0x0e,0x04] +v_max3_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x02,0x02] -v_max3_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x05,0x0e,0x04] +v_max3_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0xfe,0x03,0x02] -v_max3_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0xff,0x05,0x0e,0x04] +v_max3_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x05,0x03] -v_max3_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0xfe,0x0f,0x04] +v_max3_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0xc1,0x03] -v_max3_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0xfe,0x07] +v_max3_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0xdd,0x03] -v_max3_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x24] +v_max3_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x0d,0x04] -v_max3_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x44] +v_max3_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0xfd,0x07] -v_max3_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x84] +v_max3_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0x22] -v_max3_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0xe4] +v_max3_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0x42] -v_max3_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xa8,0xd2,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0x82] -v_max3_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xa8,0xd2,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0xe2] -v_max3_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xa8,0xd2,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xa8,0xd2,0x01,0x00,0x01,0x02] -v_max3_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xa8,0xd2,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xa8,0xd2,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0xa8,0xd2,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xa8,0xd2,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x0c] +v_max3_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xa8,0xd2,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x14] +v_max3_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0xa8,0xd2,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x1c] +v_max3_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0x0a] + +v_max3_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0x12] + +v_max3_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x00,0x01,0x1a] v_max3_i32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x00,0x01,0x02] @@ -39899,101 +40956,134 @@ v_max3_u32 v5, s1, 0, v3 v_max3_u32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xac,0xd2,0x01,0x00,0xfd,0x07] -v_med3_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0x02] + +v_med3_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xae,0xd2,0x01,0x00,0x01,0x02] + +v_med3_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x67,0x00,0x01,0x02] + +v_med3_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x68,0x00,0x01,0x02] + +v_med3_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x69,0x00,0x01,0x02] + +v_med3_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x6a,0x00,0x01,0x02] + +v_med3_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x6b,0x00,0x01,0x02] + +v_med3_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x6c,0x00,0x01,0x02] -v_med3_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xae,0xd2,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x6d,0x00,0x01,0x02] -v_med3_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x67,0x04,0x0e,0x04] +v_med3_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x6e,0x00,0x01,0x02] -v_med3_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x68,0x04,0x0e,0x04] +v_med3_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x6f,0x00,0x01,0x02] -v_med3_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x69,0x04,0x0e,0x04] +v_med3_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x7b,0x00,0x01,0x02] -v_med3_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x6a,0x04,0x0e,0x04] +v_med3_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x7c,0x00,0x01,0x02] -v_med3_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x6b,0x04,0x0e,0x04] +v_med3_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x7e,0x00,0x01,0x02] -v_med3_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x6c,0x04,0x0e,0x04] +v_med3_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x7f,0x00,0x01,0x02] -v_med3_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x6d,0x04,0x0e,0x04] +v_med3_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x80,0x00,0x01,0x02] -v_med3_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x6e,0x04,0x0e,0x04] +v_med3_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0xc1,0x00,0x01,0x02] -v_med3_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x6f,0x04,0x0e,0x04] +v_med3_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0xf0,0x00,0x01,0x02] -v_med3_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x7b,0x04,0x0e,0x04] +v_med3_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0xf7,0x00,0x01,0x02] -v_med3_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x7c,0x04,0x0e,0x04] +v_med3_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x01,0x01,0x02] -v_med3_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x7e,0x04,0x0e,0x04] +v_med3_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0xff,0x01,0x01,0x02] -v_med3_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x7f,0x04,0x0e,0x04] +v_med3_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x82,0x01,0x02] -v_med3_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0xfd,0x04,0x0e,0x04] +v_med3_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0xe0,0x01,0x02] -v_med3_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x05,0x0e,0x04] +v_med3_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0xee,0x01,0x02] -v_med3_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0xff,0x05,0x0e,0x04] +v_med3_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x02,0x02] -v_med3_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0xfe,0x0f,0x04] +v_med3_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0xfe,0x03,0x02] -v_med3_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0xfe,0x07] +v_med3_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x05,0x03] -v_med3_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x24] +v_med3_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0xc1,0x03] -v_med3_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x44] +v_med3_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0xdd,0x03] -v_med3_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x84] +v_med3_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x0d,0x04] -v_med3_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0xe4] +v_med3_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0xfd,0x07] -v_med3_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xae,0xd2,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0x22] -v_med3_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xae,0xd2,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0x42] -v_med3_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xae,0xd2,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0x82] -v_med3_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xae,0xd2,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0xe2] -v_med3_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0xae,0xd2,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xae,0xd2,0x01,0x00,0x01,0x02] -v_med3_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x0c] +v_med3_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xae,0xd2,0x01,0x00,0x01,0x02] -v_med3_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x14] +v_med3_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xae,0xd2,0x01,0x00,0x01,0x02] -v_med3_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x1c] +v_med3_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xae,0xd2,0x01,0x00,0x01,0x02] + +v_med3_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0xae,0xd2,0x01,0x00,0x01,0x02] + +v_med3_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0x0a] + +v_med3_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0x12] + +v_med3_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x00,0x01,0x1a] v_med3_i32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x00,0x01,0x02] @@ -40565,11 +41655,14 @@ v_cvt_pk_u8_f32 v5, exec_hi, 0, 0 v_cvt_pk_u8_f32 v5, 0, 0, 0 // CHECK: [0x05,0x00,0xbc,0xd2,0x80,0x00,0x01,0x02] +v_cvt_pk_u8_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xbc,0xd2,0xc1,0x00,0x01,0x02] + v_cvt_pk_u8_f32 v5, 0.5, 0, 0 // CHECK: [0x05,0x00,0xbc,0xd2,0xf0,0x00,0x01,0x02] -v_cvt_pk_u8_f32 v5, scc, 0, 0 -// CHECK: [0x05,0x00,0xbc,0xd2,0xfd,0x00,0x01,0x02] +v_cvt_pk_u8_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xbc,0xd2,0xf7,0x00,0x01,0x02] v_cvt_pk_u8_f32 v5, v1, 0, 0 // CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x01,0x01,0x02] @@ -40607,182 +41700,248 @@ v_cvt_pk_u8_f32 v5, s1, 0, v3 v_cvt_pk_u8_f32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x00,0xfd,0x07] -v_div_fixup_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0x02] + +v_div_fixup_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xbe,0xd2,0x01,0x00,0x01,0x02] + +v_div_fixup_f32 v5, s103, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x67,0x00,0x01,0x02] + +v_div_fixup_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x68,0x00,0x01,0x02] + +v_div_fixup_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x69,0x00,0x01,0x02] + +v_div_fixup_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x6a,0x00,0x01,0x02] + +v_div_fixup_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x6b,0x00,0x01,0x02] + +v_div_fixup_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x6c,0x00,0x01,0x02] + +v_div_fixup_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x6d,0x00,0x01,0x02] + +v_div_fixup_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x6e,0x00,0x01,0x02] + +v_div_fixup_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x6f,0x00,0x01,0x02] + +v_div_fixup_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x7b,0x00,0x01,0x02] + +v_div_fixup_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x7c,0x00,0x01,0x02] + +v_div_fixup_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x7e,0x00,0x01,0x02] + +v_div_fixup_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x7f,0x00,0x01,0x02] + +v_div_fixup_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x80,0x00,0x01,0x02] + +v_div_fixup_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0xc1,0x00,0x01,0x02] + +v_div_fixup_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0xf0,0x00,0x01,0x02] + +v_div_fixup_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0xf7,0x00,0x01,0x02] + +v_div_fixup_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x01,0x01,0x02] + +v_div_fixup_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0xff,0x01,0x01,0x02] + +v_div_fixup_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x82,0x01,0x02] + +v_div_fixup_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0xe0,0x01,0x02] -v_div_fixup_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0xee,0x01,0x02] -v_div_fixup_f32 v5, s103, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x67,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x02,0x02] -v_div_fixup_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x68,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0xfe,0x03,0x02] -v_div_fixup_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x69,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x05,0x03] -v_div_fixup_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x6a,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0xc1,0x03] -v_div_fixup_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x6b,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0xdd,0x03] -v_div_fixup_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x6c,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x0d,0x04] -v_div_fixup_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x6d,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0xfd,0x07] -v_div_fixup_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x6e,0x04,0x0e,0x04] +v_div_fixup_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0x22] -v_div_fixup_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x6f,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0x42] -v_div_fixup_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x7b,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0x82] -v_div_fixup_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x7c,0x04,0x0e,0x04] +v_div_fixup_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0xe2] -v_div_fixup_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x7e,0x04,0x0e,0x04] +v_div_fixup_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xbe,0xd2,0x01,0x00,0x01,0x02] -v_div_fixup_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x7f,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xbe,0xd2,0x01,0x00,0x01,0x02] -v_div_fixup_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0xfd,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xbe,0xd2,0x01,0x00,0x01,0x02] -v_div_fixup_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x05,0x0e,0x04] +v_div_fixup_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xbe,0xd2,0x01,0x00,0x01,0x02] -v_div_fixup_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0xff,0x05,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x08,0xbe,0xd2,0x01,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0xfe,0x0f,0x04] +v_div_fixup_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0x0a] -v_div_fixup_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0xfe,0x07] +v_div_fixup_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0x12] -v_div_fixup_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x24] +v_div_fixup_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x00,0x01,0x1a] -v_div_fixup_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x44] +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x84] +v_div_fixup_f64 v[254:255], s[2:3], 0, 0 +// CHECK: [0xfe,0x00,0xc0,0xd2,0x02,0x00,0x01,0x02] -v_div_fixup_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0xe4] +v_div_fixup_f64 v[5:6], s[4:5], 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x04,0x00,0x01,0x02] -v_div_fixup_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xbe,0xd2,0x01,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[102:103], 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x66,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xbe,0xd2,0x01,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], flat_scratch, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x68,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xbe,0xd2,0x01,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], vcc, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x6a,0x00,0x01,0x02] -v_div_fixup_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xbe,0xd2,0x01,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], tba, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x6c,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x08,0xbe,0xd2,0x01,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], tma, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x6e,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x0c] +v_div_fixup_f64 v[5:6], ttmp[10:11], 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x7a,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x14] +v_div_fixup_f64 v[5:6], exec, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x7e,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x1c] +v_div_fixup_f64 v[5:6], 0, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x80,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], -1, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0xc1,0x00,0x01,0x02] -v_div_fixup_f64 v[254:255], s[2:3], v[2:3], v[3:4] -// CHECK: [0xfe,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], 0.5, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0xf0,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[4:5], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x04,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], -4.0, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0xf7,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[102:103], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x66,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], v[1:2], 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x01,0x01,0x01,0x02] -v_div_fixup_f64 v[5:6], flat_scratch, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x68,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], v[254:255], 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0xfe,0x01,0x01,0x02] -v_div_fixup_f64 v[5:6], vcc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x6a,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], -1, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x82,0x01,0x02] -v_div_fixup_f64 v[5:6], tba, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x6c,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0.5, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0xe0,0x01,0x02] -v_div_fixup_f64 v[5:6], tma, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x6e,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], -4.0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0xee,0x01,0x02] -v_div_fixup_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x7a,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], v[2:3], 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x02,0x02] -v_div_fixup_f64 v[5:6], exec, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x7e,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], v[254:255], 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0xfc,0x03,0x02] -v_div_fixup_f64 v[5:6], scc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0xfd,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0, -1 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x05,0x03] -v_div_fixup_f64 v[5:6], v[1:2], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x01,0x05,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0, 0.5 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0xc1,0x03] -v_div_fixup_f64 v[5:6], v[254:255], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0xfe,0x05,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0, -4.0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0xdd,0x03] -v_div_fixup_f64 v[5:6], s[2:3], v[254:255], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0xfc,0x0f,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0, v[3:4] +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x0d,0x04] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[254:255] -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0xfa,0x07] +v_div_fixup_f64 v[5:6], s[2:3], 0, v[254:255] +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0xf9,0x07] -v_div_fixup_f64 v[5:6], -s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x24] +v_div_fixup_f64 v[5:6], -s[2:3], 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0x22] -v_div_fixup_f64 v[5:6], s[2:3], -v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x44] +v_div_fixup_f64 v[5:6], s[2:3], neg(0), 0 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0x42] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x84] +v_div_fixup_f64 v[5:6], s[2:3], 0, neg(0) +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0x82] -v_div_fixup_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0xe4] +v_div_fixup_f64 v[5:6], -s[2:3], neg(0), neg(0) +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0xe2] -v_div_fixup_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4] -// CHECK: [0x05,0x01,0xc0,0xd2,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], |s[2:3]|, 0, 0 +// CHECK: [0x05,0x01,0xc0,0xd2,0x02,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4] -// CHECK: [0x05,0x02,0xc0,0xd2,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], |0|, 0 +// CHECK: [0x05,0x02,0xc0,0xd2,0x02,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], |v[3:4]| -// CHECK: [0x05,0x04,0xc0,0xd2,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0, |0| +// CHECK: [0x05,0x04,0xc0,0xd2,0x02,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]| -// CHECK: [0x05,0x07,0xc0,0xd2,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], |s[2:3]|, |0|, |0| +// CHECK: [0x05,0x07,0xc0,0xd2,0x02,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp -// CHECK: [0x05,0x08,0xc0,0xd2,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 clamp +// CHECK: [0x05,0x08,0xc0,0xd2,0x02,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2 -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x0c] +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 mul:2 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0x0a] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4 -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x14] +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 mul:4 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0x12] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2 -// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x1c] +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 div:2 +// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x00,0x01,0x1a] v_lshl_b64 v[5:6], 0, s2 // CHECK: [0x05,0x00,0xc2,0xd2,0x80,0x04,0x00,0x00] @@ -41024,12 +42183,36 @@ v_add_f64 v[5:6], s[4:5], s[4:5] v_add_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0xc8,0xd2,0x04,0x08,0x00,0x00] +v_add_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0xc8,0xd2,0x80,0x08,0x00,0x00] + +v_add_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0xc8,0xd2,0xc1,0x08,0x00,0x00] + +v_add_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0xc8,0xd2,0xf0,0x08,0x00,0x00] + +v_add_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0xc8,0xd2,0xf7,0x08,0x00,0x00] + v_add_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0xc8,0xd2,0x01,0x09,0x00,0x00] v_add_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0xc8,0xd2,0xfe,0x09,0x00,0x00] +v_add_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x00,0x01,0x00] + +v_add_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x82,0x01,0x00] + +v_add_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0xe0,0x01,0x00] + +v_add_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0xee,0x01,0x00] + v_add_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x04,0x02,0x00] @@ -41072,12 +42255,36 @@ v_mul_f64 v[5:6], s[4:5], s[4:5] v_mul_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0xca,0xd2,0x04,0x08,0x00,0x00] +v_mul_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0xca,0xd2,0x80,0x08,0x00,0x00] + +v_mul_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0xca,0xd2,0xc1,0x08,0x00,0x00] + +v_mul_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0xca,0xd2,0xf0,0x08,0x00,0x00] + +v_mul_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0xca,0xd2,0xf7,0x08,0x00,0x00] + v_mul_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0xca,0xd2,0x01,0x09,0x00,0x00] v_mul_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0xca,0xd2,0xfe,0x09,0x00,0x00] +v_mul_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x00,0x01,0x00] + +v_mul_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x82,0x01,0x00] + +v_mul_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0xca,0xd2,0x04,0xe0,0x01,0x00] + +v_mul_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0xca,0xd2,0x04,0xee,0x01,0x00] + v_mul_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0xca,0xd2,0x04,0x04,0x02,0x00] @@ -41120,12 +42327,36 @@ v_min_f64 v[5:6], s[4:5], s[4:5] v_min_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0xcc,0xd2,0x04,0x08,0x00,0x00] +v_min_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0xcc,0xd2,0x80,0x08,0x00,0x00] + +v_min_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0xcc,0xd2,0xc1,0x08,0x00,0x00] + +v_min_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0xcc,0xd2,0xf0,0x08,0x00,0x00] + +v_min_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0xcc,0xd2,0xf7,0x08,0x00,0x00] + v_min_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0xcc,0xd2,0x01,0x09,0x00,0x00] v_min_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0xcc,0xd2,0xfe,0x09,0x00,0x00] +v_min_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x00,0x01,0x00] + +v_min_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x82,0x01,0x00] + +v_min_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0xe0,0x01,0x00] + +v_min_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0xee,0x01,0x00] + v_min_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x04,0x02,0x00] @@ -41168,12 +42399,36 @@ v_max_f64 v[5:6], s[4:5], s[4:5] v_max_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0xce,0xd2,0x04,0x08,0x00,0x00] +v_max_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0xce,0xd2,0x80,0x08,0x00,0x00] + +v_max_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0xce,0xd2,0xc1,0x08,0x00,0x00] + +v_max_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0xce,0xd2,0xf0,0x08,0x00,0x00] + +v_max_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0xce,0xd2,0xf7,0x08,0x00,0x00] + v_max_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0xce,0xd2,0x01,0x09,0x00,0x00] v_max_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0xce,0xd2,0xfe,0x09,0x00,0x00] +v_max_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x00,0x01,0x00] + +v_max_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x82,0x01,0x00] + +v_max_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0xce,0xd2,0x04,0xe0,0x01,0x00] + +v_max_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0xce,0xd2,0x04,0xee,0x01,0x00] + v_max_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0xce,0xd2,0x04,0x04,0x02,0x00] @@ -41216,9 +42471,15 @@ v_ldexp_f64 v[5:6], 0, s2 v_ldexp_f64 v[254:255], 0, s2 // CHECK: [0xfe,0x00,0xd0,0xd2,0x80,0x04,0x00,0x00] +v_ldexp_f64 v[5:6], -1, s2 +// CHECK: [0x05,0x00,0xd0,0xd2,0xc1,0x04,0x00,0x00] + v_ldexp_f64 v[5:6], 0.5, s2 // CHECK: [0x05,0x00,0xd0,0xd2,0xf0,0x04,0x00,0x00] +v_ldexp_f64 v[5:6], -4.0, s2 +// CHECK: [0x05,0x00,0xd0,0xd2,0xf7,0x04,0x00,0x00] + v_ldexp_f64 v[5:6], v[1:2], s2 // CHECK: [0x05,0x00,0xd0,0xd2,0x01,0x05,0x00,0x00] @@ -41276,15 +42537,18 @@ v_ldexp_f64 v[5:6], 0, 0.5 v_ldexp_f64 v[5:6], 0, -4.0 // CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xee,0x01,0x00] -v_ldexp_f64 v[5:6], 0, scc -// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xfa,0x01,0x00] - v_ldexp_f64 v[5:6], 0, v2 // CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x04,0x02,0x00] v_ldexp_f64 v[5:6], 0, v255 // CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xfe,0x03,0x00] +v_ldexp_f64 v[5:6], neg(0), s2 +// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x04,0x00,0x20] + +v_ldexp_f64 v[5:6], |0|, s2 +// CHECK: [0x05,0x01,0xd0,0xd2,0x80,0x04,0x00,0x00] + v_ldexp_f64 v[5:6], 0, s2 clamp // CHECK: [0x05,0x08,0xd0,0xd2,0x80,0x04,0x00,0x00] @@ -41780,56 +43044,92 @@ v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[3:4] v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[254:255] // CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x00,0xf9,0x07] -v_div_fmas_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v255, 0, 0, 0 +// CHECK: [0xff,0x00,0xde,0xd2,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0xc1,0x00,0x01,0x02] + +v_div_fmas_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0xf0,0x00,0x01,0x02] + +v_div_fmas_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0xf7,0x00,0x01,0x02] + +v_div_fmas_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x01,0x01,0x02] + +v_div_fmas_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0xff,0x01,0x01,0x02] + +v_div_fmas_f32 v5, 0, -1, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x82,0x01,0x02] + +v_div_fmas_f32 v5, 0, 0.5, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0xe0,0x01,0x02] -v_div_fmas_f32 v255, v1, v2, v3 -// CHECK: [0xff,0x00,0xde,0xd2,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, -4.0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0xee,0x01,0x02] -v_div_fmas_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd2,0xff,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, v2, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x04,0x02,0x02] -v_div_fmas_f32 v5, v1, v255, v3 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0xff,0x0f,0x04] +v_div_fmas_f32 v5, 0, v255, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0xfe,0x03,0x02] -v_div_fmas_f32 v5, v1, v2, v255 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0xfe,0x07] +v_div_fmas_f32 v5, 0, 0, -1 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x05,0x03] -v_div_fmas_f32 v5, -v1, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x24] +v_div_fmas_f32 v5, 0, 0, 0.5 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0xc1,0x03] -v_div_fmas_f32 v5, v1, -v2, v3 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x44] +v_div_fmas_f32 v5, 0, 0, -4.0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0xdd,0x03] -v_div_fmas_f32 v5, v1, v2, -v3 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x84] +v_div_fmas_f32 v5, 0, 0, v3 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x0d,0x04] -v_div_fmas_f32 v5, -v1, -v2, -v3 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0xe4] +v_div_fmas_f32 v5, 0, 0, v255 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0xfd,0x07] -v_div_fmas_f32 v5, |v1|, v2, v3 -// CHECK: [0x05,0x01,0xde,0xd2,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, neg(0), 0, 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0x22] -v_div_fmas_f32 v5, v1, |v2|, v3 -// CHECK: [0x05,0x02,0xde,0xd2,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, neg(0), 0 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0x42] -v_div_fmas_f32 v5, v1, v2, |v3| -// CHECK: [0x05,0x04,0xde,0xd2,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, 0, neg(0) +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0x82] -v_div_fmas_f32 v5, |v1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xde,0xd2,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, neg(0), neg(0), neg(0) +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0xe2] -v_div_fmas_f32 v5, v1, v2, v3 clamp -// CHECK: [0x05,0x08,0xde,0xd2,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, |0|, 0, 0 +// CHECK: [0x05,0x01,0xde,0xd2,0x80,0x00,0x01,0x02] -v_div_fmas_f32 v5, v1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x0c] +v_div_fmas_f32 v5, 0, |0|, 0 +// CHECK: [0x05,0x02,0xde,0xd2,0x80,0x00,0x01,0x02] -v_div_fmas_f32 v5, v1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x14] +v_div_fmas_f32 v5, 0, 0, |0| +// CHECK: [0x05,0x04,0xde,0xd2,0x80,0x00,0x01,0x02] -v_div_fmas_f32 v5, v1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x1c] +v_div_fmas_f32 v5, |0|, |0|, |0| +// CHECK: [0x05,0x07,0xde,0xd2,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, 0, 0, 0 clamp +// CHECK: [0x05,0x08,0xde,0xd2,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, 0, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0x0a] + +v_div_fmas_f32 v5, 0, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0x12] + +v_div_fmas_f32 v5, 0, 0, 0 div:2 +// CHECK: [0x05,0x00,0xde,0xd2,0x80,0x00,0x01,0x1a] v_div_fmas_f64 v[5:6], vcc, vcc, vcc // CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x01] @@ -41837,18 +43137,54 @@ v_div_fmas_f64 v[5:6], vcc, vcc, vcc v_div_fmas_f64 v[254:255], vcc, vcc, vcc // CHECK: [0xfe,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x01] +v_div_fmas_f64 v[5:6], 0, vcc, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0x80,0xd4,0xa8,0x01] + +v_div_fmas_f64 v[5:6], -1, vcc, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0xc1,0xd4,0xa8,0x01] + +v_div_fmas_f64 v[5:6], 0.5, vcc, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0xf0,0xd4,0xa8,0x01] + +v_div_fmas_f64 v[5:6], -4.0, vcc, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0xf7,0xd4,0xa8,0x01] + v_div_fmas_f64 v[5:6], v[1:2], vcc, vcc // CHECK: [0x05,0x00,0xe0,0xd2,0x01,0xd5,0xa8,0x01] v_div_fmas_f64 v[5:6], v[254:255], vcc, vcc // CHECK: [0x05,0x00,0xe0,0xd2,0xfe,0xd5,0xa8,0x01] +v_div_fmas_f64 v[5:6], vcc, 0, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0x00,0xa9,0x01] + +v_div_fmas_f64 v[5:6], vcc, -1, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0x82,0xa9,0x01] + +v_div_fmas_f64 v[5:6], vcc, 0.5, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xe0,0xa9,0x01] + +v_div_fmas_f64 v[5:6], vcc, -4.0, vcc +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xee,0xa9,0x01] + v_div_fmas_f64 v[5:6], vcc, v[2:3], vcc // CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0x04,0xaa,0x01] v_div_fmas_f64 v[5:6], vcc, v[254:255], vcc // CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xfc,0xab,0x01] +v_div_fmas_f64 v[5:6], vcc, vcc, 0 +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0x00,0x02] + +v_div_fmas_f64 v[5:6], vcc, vcc, -1 +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0x04,0x03] + +v_div_fmas_f64 v[5:6], vcc, vcc, 0.5 +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xc0,0x03] + +v_div_fmas_f64 v[5:6], vcc, vcc, -4.0 +// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xdc,0x03] + v_div_fmas_f64 v[5:6], vcc, vcc, v[3:4] // CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0x0c,0x04] @@ -42092,9 +43428,15 @@ v_trig_preop_f64 v[5:6], 0, s2 v_trig_preop_f64 v[254:255], 0, s2 // CHECK: [0xfe,0x00,0xe8,0xd2,0x80,0x04,0x00,0x00] +v_trig_preop_f64 v[5:6], -1, s2 +// CHECK: [0x05,0x00,0xe8,0xd2,0xc1,0x04,0x00,0x00] + v_trig_preop_f64 v[5:6], 0.5, s2 // CHECK: [0x05,0x00,0xe8,0xd2,0xf0,0x04,0x00,0x00] +v_trig_preop_f64 v[5:6], -4.0, s2 +// CHECK: [0x05,0x00,0xe8,0xd2,0xf7,0x04,0x00,0x00] + v_trig_preop_f64 v[5:6], v[1:2], s2 // CHECK: [0x05,0x00,0xe8,0xd2,0x01,0x05,0x00,0x00] @@ -42152,15 +43494,18 @@ v_trig_preop_f64 v[5:6], 0, 0.5 v_trig_preop_f64 v[5:6], 0, -4.0 // CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xee,0x01,0x00] -v_trig_preop_f64 v[5:6], 0, scc -// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xfa,0x01,0x00] - v_trig_preop_f64 v[5:6], 0, v2 // CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x02,0x00] v_trig_preop_f64 v[5:6], 0, v255 // CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xfe,0x03,0x00] +v_trig_preop_f64 v[5:6], neg(0), s2 +// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x00,0x20] + +v_trig_preop_f64 v[5:6], |0|, s2 +// CHECK: [0x05,0x01,0xe8,0xd2,0x80,0x04,0x00,0x00] + v_trig_preop_f64 v[5:6], 0, s2 clamp // CHECK: [0x05,0x08,0xe8,0xd2,0x80,0x04,0x00,0x00] @@ -42173,6 +43518,234 @@ v_trig_preop_f64 v[5:6], 0, s2 mul:4 v_trig_preop_f64 v[5:6], 0, s2 div:2 // CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x00,0x18] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[254:255], s[12:13], s1, 0, 0 +// CHECK: [0xfe,0x0c,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[14:15], s1, 0, 0 +// CHECK: [0x05,0x0e,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[102:103], s1, 0, 0 +// CHECK: [0x05,0x66,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], flat_scratch, s1, 0, 0 +// CHECK: [0x05,0x68,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], vcc, s1, 0, 0 +// CHECK: [0x05,0x6a,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], tba, s1, 0, 0 +// CHECK: [0x05,0x6c,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], tma, s1, 0, 0 +// CHECK: [0x05,0x6e,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], ttmp[10:11], s1, 0, 0 +// CHECK: [0x05,0x7a,0xec,0xd2,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s103, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x67,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x68,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x69,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], vcc_lo, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x6a,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], vcc_hi, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x6b,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tba_lo, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x6c,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tba_hi, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x6d,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tma_lo, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x6e,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tma_hi, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x6f,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], ttmp11, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x7b,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], m0, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x7c,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], exec_lo, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x7e,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], exec_hi, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x7f,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], 0, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x80,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], -1, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0xc1,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], 0.5, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0xf0,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], -4.0, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0xf7,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], v1, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x01,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], v255, 0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0xff,0x01,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s1, -1, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x82,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s1, 0.5, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0xe0,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s1, -4.0, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0xee,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s1, v2, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x04,0x02,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s1, v255, 0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0xfe,0x03,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, -1 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x00,0x05,0x03] + +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, 0.5 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x00,0xc1,0x03] + +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, -4.0 +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x00,0xdd,0x03] + +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, v[3:4] +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x00,0x0d,0x04] + +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, v[254:255] +// CHECK: [0x05,0x0c,0xec,0xd2,0x01,0x00,0xf9,0x07] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[254:255], s[12:13], s1, 0, 0 +// CHECK: [0xfe,0x0c,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[14:15], s1, 0, 0 +// CHECK: [0x05,0x0e,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[102:103], s1, 0, 0 +// CHECK: [0x05,0x66,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], flat_scratch, s1, 0, 0 +// CHECK: [0x05,0x68,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], vcc, s1, 0, 0 +// CHECK: [0x05,0x6a,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], tba, s1, 0, 0 +// CHECK: [0x05,0x6c,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], tma, s1, 0, 0 +// CHECK: [0x05,0x6e,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], ttmp[10:11], s1, 0, 0 +// CHECK: [0x05,0x7a,0xee,0xd2,0x01,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s103, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x67,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x68,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x69,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], vcc_lo, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x6a,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], vcc_hi, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x6b,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], tba_lo, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x6c,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], tba_hi, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x6d,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], tma_lo, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x6e,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], tma_hi, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x6f,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], ttmp11, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x7b,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], m0, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x7c,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], exec_lo, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x7e,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], exec_hi, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x7f,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], 0, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x80,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], -1, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0xc1,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], 0.5, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0xf0,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], -4.0, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0xf7,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], v1, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x01,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], v255, 0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0xff,0x01,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, -1, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x82,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0.5, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0xe0,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, -4.0, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0xee,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, v2, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x04,0x02,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, v255, 0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0xfe,0x03,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, -1 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x00,0x05,0x03] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, 0.5 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x00,0xc1,0x03] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, -4.0 +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x00,0xdd,0x03] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, v[3:4] +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x00,0x0d,0x04] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, v[254:255] +// CHECK: [0x05,0x0c,0xee,0xd2,0x01,0x00,0xf9,0x07] + v_cmp_f_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x00,0x7c] @@ -42266,9 +43839,15 @@ v_cmp_f_f32_e64 tma, 0, s2 v_cmp_f_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x00,0xd0,0x80,0x04,0x00,0x00] +v_cmp_f_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x00,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_f_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x00,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_f_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x00,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_f_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x00,0xd0,0x01,0x05,0x00,0x00] @@ -42317,11 +43896,14 @@ v_cmp_f_f32_e64 s[10:11], 0, exec_hi v_cmp_f_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x00,0x01,0x00] +v_cmp_f_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x82,0x01,0x00] + v_cmp_f_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_f_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_f_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xee,0x01,0x00] v_cmp_f_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x04,0x02,0x00] @@ -42329,9 +43911,15 @@ v_cmp_f_f32_e64 s[10:11], 0, v2 v_cmp_f_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_f_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x04,0x00,0x20] + v_cmp_f_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x04,0x00,0x40] +v_cmp_f_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x04,0x00,0x60] + v_cmp_lt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x02,0x7c] @@ -42425,9 +44013,15 @@ v_cmp_lt_f32_e64 tma, 0, s2 v_cmp_lt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x02,0xd0,0x80,0x04,0x00,0x00] +v_cmp_lt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x02,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_lt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x02,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_lt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x02,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_lt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x02,0xd0,0x01,0x05,0x00,0x00] @@ -42476,11 +44070,14 @@ v_cmp_lt_f32_e64 s[10:11], 0, exec_hi v_cmp_lt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x00,0x01,0x00] +v_cmp_lt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x82,0x01,0x00] + v_cmp_lt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_lt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_lt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xee,0x01,0x00] v_cmp_lt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x04,0x02,0x00] @@ -42488,9 +44085,15 @@ v_cmp_lt_f32_e64 s[10:11], 0, v2 v_cmp_lt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_lt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x04,0x00,0x20] + v_cmp_lt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x04,0x00,0x40] +v_cmp_lt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x04,0x00,0x60] + v_cmp_eq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x04,0x7c] @@ -42584,9 +44187,15 @@ v_cmp_eq_f32_e64 tma, 0, s2 v_cmp_eq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x04,0xd0,0x80,0x04,0x00,0x00] +v_cmp_eq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x04,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_eq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x04,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_eq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x04,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_eq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x04,0xd0,0x01,0x05,0x00,0x00] @@ -42635,11 +44244,14 @@ v_cmp_eq_f32_e64 s[10:11], 0, exec_hi v_cmp_eq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x00,0x01,0x00] +v_cmp_eq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x82,0x01,0x00] + v_cmp_eq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_eq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_eq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xee,0x01,0x00] v_cmp_eq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x04,0x02,0x00] @@ -42647,9 +44259,15 @@ v_cmp_eq_f32_e64 s[10:11], 0, v2 v_cmp_eq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_eq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x04,0x00,0x20] + v_cmp_eq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x04,0x00,0x40] +v_cmp_eq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x04,0x00,0x60] + v_cmp_le_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x06,0x7c] @@ -42743,9 +44361,15 @@ v_cmp_le_f32_e64 tma, 0, s2 v_cmp_le_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x06,0xd0,0x80,0x04,0x00,0x00] +v_cmp_le_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x06,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_le_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x06,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_le_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x06,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_le_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x06,0xd0,0x01,0x05,0x00,0x00] @@ -42794,11 +44418,14 @@ v_cmp_le_f32_e64 s[10:11], 0, exec_hi v_cmp_le_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x00,0x01,0x00] +v_cmp_le_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x82,0x01,0x00] + v_cmp_le_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_le_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_le_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xee,0x01,0x00] v_cmp_le_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x04,0x02,0x00] @@ -42806,9 +44433,15 @@ v_cmp_le_f32_e64 s[10:11], 0, v2 v_cmp_le_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_le_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x04,0x00,0x20] + v_cmp_le_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x04,0x00,0x40] +v_cmp_le_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x04,0x00,0x60] + v_cmp_gt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x08,0x7c] @@ -42902,9 +44535,15 @@ v_cmp_gt_f32_e64 tma, 0, s2 v_cmp_gt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x08,0xd0,0x80,0x04,0x00,0x00] +v_cmp_gt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x08,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_gt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x08,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_gt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x08,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_gt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x08,0xd0,0x01,0x05,0x00,0x00] @@ -42953,11 +44592,14 @@ v_cmp_gt_f32_e64 s[10:11], 0, exec_hi v_cmp_gt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x00,0x01,0x00] +v_cmp_gt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x82,0x01,0x00] + v_cmp_gt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_gt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_gt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xee,0x01,0x00] v_cmp_gt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x04,0x02,0x00] @@ -42965,9 +44607,15 @@ v_cmp_gt_f32_e64 s[10:11], 0, v2 v_cmp_gt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_gt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x04,0x00,0x20] + v_cmp_gt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x04,0x00,0x40] +v_cmp_gt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x04,0x00,0x60] + v_cmp_lg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x0a,0x7c] @@ -43061,9 +44709,15 @@ v_cmp_lg_f32_e64 tma, 0, s2 v_cmp_lg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00] +v_cmp_lg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x0a,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_lg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x0a,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_lg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x0a,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_lg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x0a,0xd0,0x01,0x05,0x00,0x00] @@ -43112,11 +44766,14 @@ v_cmp_lg_f32_e64 s[10:11], 0, exec_hi v_cmp_lg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x00,0x01,0x00] +v_cmp_lg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x82,0x01,0x00] + v_cmp_lg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_lg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_lg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xee,0x01,0x00] v_cmp_lg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x04,0x02,0x00] @@ -43124,9 +44781,15 @@ v_cmp_lg_f32_e64 s[10:11], 0, v2 v_cmp_lg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_lg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x20] + v_cmp_lg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x40] +v_cmp_lg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x60] + v_cmp_ge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x0c,0x7c] @@ -43220,9 +44883,15 @@ v_cmp_ge_f32_e64 tma, 0, s2 v_cmp_ge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00] +v_cmp_ge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x0c,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_ge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x0c,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_ge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x0c,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_ge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x0c,0xd0,0x01,0x05,0x00,0x00] @@ -43271,11 +44940,14 @@ v_cmp_ge_f32_e64 s[10:11], 0, exec_hi v_cmp_ge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x00,0x01,0x00] +v_cmp_ge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x82,0x01,0x00] + v_cmp_ge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_ge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_ge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xee,0x01,0x00] v_cmp_ge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x04,0x02,0x00] @@ -43283,9 +44955,15 @@ v_cmp_ge_f32_e64 s[10:11], 0, v2 v_cmp_ge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_ge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x20] + v_cmp_ge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x40] +v_cmp_ge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x60] + v_cmp_o_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x0e,0x7c] @@ -43379,9 +45057,15 @@ v_cmp_o_f32_e64 tma, 0, s2 v_cmp_o_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00] +v_cmp_o_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x0e,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_o_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x0e,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_o_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x0e,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_o_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x0e,0xd0,0x01,0x05,0x00,0x00] @@ -43430,11 +45114,14 @@ v_cmp_o_f32_e64 s[10:11], 0, exec_hi v_cmp_o_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x00,0x01,0x00] +v_cmp_o_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x82,0x01,0x00] + v_cmp_o_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_o_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_o_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xee,0x01,0x00] v_cmp_o_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x04,0x02,0x00] @@ -43442,9 +45129,15 @@ v_cmp_o_f32_e64 s[10:11], 0, v2 v_cmp_o_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_o_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x20] + v_cmp_o_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x40] +v_cmp_o_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x60] + v_cmp_u_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x10,0x7c] @@ -43538,9 +45231,15 @@ v_cmp_u_f32_e64 tma, 0, s2 v_cmp_u_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x10,0xd0,0x80,0x04,0x00,0x00] +v_cmp_u_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x10,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_u_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x10,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_u_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x10,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_u_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x10,0xd0,0x01,0x05,0x00,0x00] @@ -43589,11 +45288,14 @@ v_cmp_u_f32_e64 s[10:11], 0, exec_hi v_cmp_u_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x00,0x01,0x00] +v_cmp_u_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x82,0x01,0x00] + v_cmp_u_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_u_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_u_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xee,0x01,0x00] v_cmp_u_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x04,0x02,0x00] @@ -43601,9 +45303,15 @@ v_cmp_u_f32_e64 s[10:11], 0, v2 v_cmp_u_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_u_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x04,0x00,0x20] + v_cmp_u_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x04,0x00,0x40] +v_cmp_u_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x12,0x7c] @@ -43697,9 +45405,15 @@ v_cmp_nge_f32_e64 tma, 0, s2 v_cmp_nge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x12,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x12,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x12,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x12,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x12,0xd0,0x01,0x05,0x00,0x00] @@ -43748,11 +45462,14 @@ v_cmp_nge_f32_e64 s[10:11], 0, exec_hi v_cmp_nge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xee,0x01,0x00] v_cmp_nge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x04,0x02,0x00] @@ -43760,9 +45477,15 @@ v_cmp_nge_f32_e64 s[10:11], 0, v2 v_cmp_nge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nlg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x14,0x7c] @@ -43856,9 +45579,15 @@ v_cmp_nlg_f32_e64 tma, 0, s2 v_cmp_nlg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x14,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nlg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x14,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nlg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x14,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nlg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x14,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nlg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x14,0xd0,0x01,0x05,0x00,0x00] @@ -43907,11 +45636,14 @@ v_cmp_nlg_f32_e64 s[10:11], 0, exec_hi v_cmp_nlg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nlg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nlg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nlg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nlg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xee,0x01,0x00] v_cmp_nlg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x04,0x02,0x00] @@ -43919,9 +45651,15 @@ v_cmp_nlg_f32_e64 s[10:11], 0, v2 v_cmp_nlg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nlg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nlg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nlg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x04,0x00,0x60] + v_cmp_ngt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x16,0x7c] @@ -44015,9 +45753,15 @@ v_cmp_ngt_f32_e64 tma, 0, s2 v_cmp_ngt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x16,0xd0,0x80,0x04,0x00,0x00] +v_cmp_ngt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x16,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_ngt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x16,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_ngt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x16,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_ngt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x16,0xd0,0x01,0x05,0x00,0x00] @@ -44066,11 +45810,14 @@ v_cmp_ngt_f32_e64 s[10:11], 0, exec_hi v_cmp_ngt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x00,0x01,0x00] +v_cmp_ngt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x82,0x01,0x00] + v_cmp_ngt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_ngt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_ngt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xee,0x01,0x00] v_cmp_ngt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x04,0x02,0x00] @@ -44078,9 +45825,15 @@ v_cmp_ngt_f32_e64 s[10:11], 0, v2 v_cmp_ngt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_ngt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x04,0x00,0x20] + v_cmp_ngt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x04,0x00,0x40] +v_cmp_ngt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nle_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x18,0x7c] @@ -44174,9 +45927,15 @@ v_cmp_nle_f32_e64 tma, 0, s2 v_cmp_nle_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x18,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nle_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x18,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nle_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x18,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nle_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x18,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nle_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x18,0xd0,0x01,0x05,0x00,0x00] @@ -44225,11 +45984,14 @@ v_cmp_nle_f32_e64 s[10:11], 0, exec_hi v_cmp_nle_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nle_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nle_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nle_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nle_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xee,0x01,0x00] v_cmp_nle_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x04,0x02,0x00] @@ -44237,9 +45999,15 @@ v_cmp_nle_f32_e64 s[10:11], 0, v2 v_cmp_nle_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nle_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nle_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nle_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x04,0x00,0x60] + v_cmp_neq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x1a,0x7c] @@ -44333,9 +46101,15 @@ v_cmp_neq_f32_e64 tma, 0, s2 v_cmp_neq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00] +v_cmp_neq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x1a,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_neq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x1a,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_neq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x1a,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_neq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x1a,0xd0,0x01,0x05,0x00,0x00] @@ -44384,11 +46158,14 @@ v_cmp_neq_f32_e64 s[10:11], 0, exec_hi v_cmp_neq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x00,0x01,0x00] +v_cmp_neq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x82,0x01,0x00] + v_cmp_neq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_neq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_neq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xee,0x01,0x00] v_cmp_neq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x04,0x02,0x00] @@ -44396,9 +46173,15 @@ v_cmp_neq_f32_e64 s[10:11], 0, v2 v_cmp_neq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_neq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x20] + v_cmp_neq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x40] +v_cmp_neq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nlt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x1c,0x7c] @@ -44492,9 +46275,15 @@ v_cmp_nlt_f32_e64 tma, 0, s2 v_cmp_nlt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nlt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x1c,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nlt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x1c,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nlt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x1c,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nlt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x1c,0xd0,0x01,0x05,0x00,0x00] @@ -44543,11 +46332,14 @@ v_cmp_nlt_f32_e64 s[10:11], 0, exec_hi v_cmp_nlt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nlt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nlt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nlt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nlt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xee,0x01,0x00] v_cmp_nlt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x04,0x02,0x00] @@ -44555,9 +46347,15 @@ v_cmp_nlt_f32_e64 s[10:11], 0, v2 v_cmp_nlt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nlt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nlt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nlt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x60] + v_cmp_tru_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x1e,0x7c] @@ -44651,9 +46449,15 @@ v_cmp_tru_f32_e64 tma, 0, s2 v_cmp_tru_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00] +v_cmp_tru_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x1e,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_tru_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x1e,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_tru_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x1e,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_tru_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x1e,0xd0,0x01,0x05,0x00,0x00] @@ -44702,11 +46506,14 @@ v_cmp_tru_f32_e64 s[10:11], 0, exec_hi v_cmp_tru_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x00,0x01,0x00] +v_cmp_tru_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x82,0x01,0x00] + v_cmp_tru_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_tru_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_tru_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xee,0x01,0x00] v_cmp_tru_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x04,0x02,0x00] @@ -44714,9 +46521,15 @@ v_cmp_tru_f32_e64 s[10:11], 0, v2 v_cmp_tru_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_tru_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x20] + v_cmp_tru_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x40] +v_cmp_tru_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_f_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x20,0x7c] @@ -44810,9 +46623,15 @@ v_cmpx_f_f32_e64 tma, 0, s2 v_cmpx_f_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_f_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_f_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x20,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_f_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_f_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x20,0xd0,0x01,0x05,0x00,0x00] @@ -44861,11 +46680,14 @@ v_cmpx_f_f32_e64 s[10:11], 0, exec_hi v_cmpx_f_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_f_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_f_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_f_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_f_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xee,0x01,0x00] v_cmpx_f_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x02,0x00] @@ -44873,9 +46695,15 @@ v_cmpx_f_f32_e64 s[10:11], 0, v2 v_cmpx_f_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_f_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_f_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_f_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_lt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x22,0x7c] @@ -44969,9 +46797,15 @@ v_cmpx_lt_f32_e64 tma, 0, s2 v_cmpx_lt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_lt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_lt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x22,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_lt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_lt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x22,0xd0,0x01,0x05,0x00,0x00] @@ -45020,11 +46854,14 @@ v_cmpx_lt_f32_e64 s[10:11], 0, exec_hi v_cmpx_lt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_lt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_lt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_lt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_lt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xee,0x01,0x00] v_cmpx_lt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x02,0x00] @@ -45032,9 +46869,15 @@ v_cmpx_lt_f32_e64 s[10:11], 0, v2 v_cmpx_lt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_lt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_lt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_lt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_eq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x24,0x7c] @@ -45128,9 +46971,15 @@ v_cmpx_eq_f32_e64 tma, 0, s2 v_cmpx_eq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_eq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_eq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x24,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_eq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_eq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x24,0xd0,0x01,0x05,0x00,0x00] @@ -45179,11 +47028,14 @@ v_cmpx_eq_f32_e64 s[10:11], 0, exec_hi v_cmpx_eq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_eq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_eq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_eq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_eq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xee,0x01,0x00] v_cmpx_eq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x02,0x00] @@ -45191,9 +47043,15 @@ v_cmpx_eq_f32_e64 s[10:11], 0, v2 v_cmpx_eq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_eq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_eq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_eq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_le_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x26,0x7c] @@ -45287,9 +47145,15 @@ v_cmpx_le_f32_e64 tma, 0, s2 v_cmpx_le_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_le_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_le_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x26,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_le_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_le_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x26,0xd0,0x01,0x05,0x00,0x00] @@ -45338,11 +47202,14 @@ v_cmpx_le_f32_e64 s[10:11], 0, exec_hi v_cmpx_le_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_le_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_le_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_le_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_le_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xee,0x01,0x00] v_cmpx_le_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x02,0x00] @@ -45350,9 +47217,15 @@ v_cmpx_le_f32_e64 s[10:11], 0, v2 v_cmpx_le_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_le_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_le_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_le_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_gt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x28,0x7c] @@ -45446,9 +47319,15 @@ v_cmpx_gt_f32_e64 tma, 0, s2 v_cmpx_gt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_gt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_gt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x28,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_gt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_gt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x28,0xd0,0x01,0x05,0x00,0x00] @@ -45497,11 +47376,14 @@ v_cmpx_gt_f32_e64 s[10:11], 0, exec_hi v_cmpx_gt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_gt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_gt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_gt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_gt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xee,0x01,0x00] v_cmpx_gt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x02,0x00] @@ -45509,9 +47391,15 @@ v_cmpx_gt_f32_e64 s[10:11], 0, v2 v_cmpx_gt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_gt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_gt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_gt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_lg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x2a,0x7c] @@ -45605,9 +47493,15 @@ v_cmpx_lg_f32_e64 tma, 0, s2 v_cmpx_lg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_lg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_lg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2a,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_lg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_lg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2a,0xd0,0x01,0x05,0x00,0x00] @@ -45656,11 +47550,14 @@ v_cmpx_lg_f32_e64 s[10:11], 0, exec_hi v_cmpx_lg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_lg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_lg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_lg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_lg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xee,0x01,0x00] v_cmpx_lg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x02,0x00] @@ -45668,9 +47565,15 @@ v_cmpx_lg_f32_e64 s[10:11], 0, v2 v_cmpx_lg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_lg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_lg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_lg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_ge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x2c,0x7c] @@ -45764,9 +47667,15 @@ v_cmpx_ge_f32_e64 tma, 0, s2 v_cmpx_ge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_ge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_ge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2c,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_ge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_ge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2c,0xd0,0x01,0x05,0x00,0x00] @@ -45815,11 +47724,14 @@ v_cmpx_ge_f32_e64 s[10:11], 0, exec_hi v_cmpx_ge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_ge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_ge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_ge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_ge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xee,0x01,0x00] v_cmpx_ge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x02,0x00] @@ -45827,9 +47739,15 @@ v_cmpx_ge_f32_e64 s[10:11], 0, v2 v_cmpx_ge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_ge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_ge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_ge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_o_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x2e,0x7c] @@ -45923,9 +47841,15 @@ v_cmpx_o_f32_e64 tma, 0, s2 v_cmpx_o_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_o_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_o_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2e,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_o_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_o_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2e,0xd0,0x01,0x05,0x00,0x00] @@ -45974,11 +47898,14 @@ v_cmpx_o_f32_e64 s[10:11], 0, exec_hi v_cmpx_o_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_o_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_o_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_o_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_o_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xee,0x01,0x00] v_cmpx_o_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x02,0x00] @@ -45986,9 +47913,15 @@ v_cmpx_o_f32_e64 s[10:11], 0, v2 v_cmpx_o_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_o_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_o_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_o_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_u_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x30,0x7c] @@ -46082,9 +48015,15 @@ v_cmpx_u_f32_e64 tma, 0, s2 v_cmpx_u_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_u_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_u_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x30,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_u_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_u_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x30,0xd0,0x01,0x05,0x00,0x00] @@ -46133,11 +48072,14 @@ v_cmpx_u_f32_e64 s[10:11], 0, exec_hi v_cmpx_u_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_u_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_u_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_u_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_u_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xee,0x01,0x00] v_cmpx_u_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x02,0x00] @@ -46145,9 +48087,15 @@ v_cmpx_u_f32_e64 s[10:11], 0, v2 v_cmpx_u_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_u_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_u_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_u_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x32,0x7c] @@ -46241,9 +48189,15 @@ v_cmpx_nge_f32_e64 tma, 0, s2 v_cmpx_nge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x32,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x32,0xd0,0x01,0x05,0x00,0x00] @@ -46292,11 +48246,14 @@ v_cmpx_nge_f32_e64 s[10:11], 0, exec_hi v_cmpx_nge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x02,0x00] @@ -46304,9 +48261,15 @@ v_cmpx_nge_f32_e64 s[10:11], 0, v2 v_cmpx_nge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nlg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x34,0x7c] @@ -46400,9 +48363,15 @@ v_cmpx_nlg_f32_e64 tma, 0, s2 v_cmpx_nlg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nlg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nlg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x34,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nlg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nlg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x34,0xd0,0x01,0x05,0x00,0x00] @@ -46451,11 +48420,14 @@ v_cmpx_nlg_f32_e64 s[10:11], 0, exec_hi v_cmpx_nlg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nlg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nlg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nlg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nlg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nlg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x02,0x00] @@ -46463,9 +48435,15 @@ v_cmpx_nlg_f32_e64 s[10:11], 0, v2 v_cmpx_nlg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nlg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nlg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nlg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_ngt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x36,0x7c] @@ -46559,9 +48537,15 @@ v_cmpx_ngt_f32_e64 tma, 0, s2 v_cmpx_ngt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_ngt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_ngt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x36,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_ngt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_ngt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x36,0xd0,0x01,0x05,0x00,0x00] @@ -46610,11 +48594,14 @@ v_cmpx_ngt_f32_e64 s[10:11], 0, exec_hi v_cmpx_ngt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_ngt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_ngt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_ngt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_ngt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xee,0x01,0x00] v_cmpx_ngt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x02,0x00] @@ -46622,9 +48609,15 @@ v_cmpx_ngt_f32_e64 s[10:11], 0, v2 v_cmpx_ngt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_ngt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_ngt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_ngt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nle_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x38,0x7c] @@ -46718,9 +48711,15 @@ v_cmpx_nle_f32_e64 tma, 0, s2 v_cmpx_nle_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nle_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nle_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x38,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nle_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nle_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x38,0xd0,0x01,0x05,0x00,0x00] @@ -46769,11 +48768,14 @@ v_cmpx_nle_f32_e64 s[10:11], 0, exec_hi v_cmpx_nle_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nle_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nle_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nle_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nle_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nle_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x02,0x00] @@ -46781,9 +48783,15 @@ v_cmpx_nle_f32_e64 s[10:11], 0, v2 v_cmpx_nle_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nle_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nle_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nle_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_neq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x3a,0x7c] @@ -46877,9 +48885,15 @@ v_cmpx_neq_f32_e64 tma, 0, s2 v_cmpx_neq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_neq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_neq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3a,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_neq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_neq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3a,0xd0,0x01,0x05,0x00,0x00] @@ -46928,11 +48942,14 @@ v_cmpx_neq_f32_e64 s[10:11], 0, exec_hi v_cmpx_neq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_neq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_neq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_neq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_neq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xee,0x01,0x00] v_cmpx_neq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x02,0x00] @@ -46940,9 +48957,15 @@ v_cmpx_neq_f32_e64 s[10:11], 0, v2 v_cmpx_neq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_neq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_neq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_neq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nlt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x3c,0x7c] @@ -47036,9 +49059,15 @@ v_cmpx_nlt_f32_e64 tma, 0, s2 v_cmpx_nlt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nlt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nlt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3c,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nlt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nlt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3c,0xd0,0x01,0x05,0x00,0x00] @@ -47087,11 +49116,14 @@ v_cmpx_nlt_f32_e64 s[10:11], 0, exec_hi v_cmpx_nlt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nlt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nlt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nlt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nlt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nlt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x02,0x00] @@ -47099,9 +49131,15 @@ v_cmpx_nlt_f32_e64 s[10:11], 0, v2 v_cmpx_nlt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nlt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nlt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nlt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_tru_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x3e,0x7c] @@ -47195,9 +49233,15 @@ v_cmpx_tru_f32_e64 tma, 0, s2 v_cmpx_tru_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_tru_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_tru_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3e,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_tru_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_tru_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3e,0xd0,0x01,0x05,0x00,0x00] @@ -47246,11 +49290,14 @@ v_cmpx_tru_f32_e64 s[10:11], 0, exec_hi v_cmpx_tru_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_tru_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_tru_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_tru_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_tru_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xee,0x01,0x00] v_cmpx_tru_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x02,0x00] @@ -47258,9 +49305,15 @@ v_cmpx_tru_f32_e64 s[10:11], 0, v2 v_cmpx_tru_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_tru_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_tru_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_tru_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x60] + v_cmp_f_f64 vcc, s[2:3], v[2:3] // CHECK: [0x02,0x04,0x40,0x7c] @@ -47342,9 +49395,15 @@ v_cmp_f_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_f_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x08,0x00,0x00] +v_cmp_f_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x40,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_f_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x40,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_f_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x40,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_f_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x40,0xd0,0x01,0x09,0x00,0x00] @@ -47354,9 +49413,15 @@ v_cmp_f_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_f_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x00,0x01,0x00] +v_cmp_f_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x82,0x01,0x00] + v_cmp_f_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x40,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_f_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0xee,0x01,0x00] + v_cmp_f_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x04,0x02,0x00] @@ -47453,9 +49518,15 @@ v_cmp_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_lt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x08,0x00,0x00] +v_cmp_lt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x42,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_lt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x42,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_lt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x42,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_lt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x42,0xd0,0x01,0x09,0x00,0x00] @@ -47465,9 +49536,15 @@ v_cmp_lt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_lt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x00,0x01,0x00] +v_cmp_lt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x82,0x01,0x00] + v_cmp_lt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x42,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_lt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0xee,0x01,0x00] + v_cmp_lt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x04,0x02,0x00] @@ -47564,9 +49641,15 @@ v_cmp_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_eq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x08,0x00,0x00] +v_cmp_eq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x44,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_eq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x44,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_eq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x44,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_eq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x44,0xd0,0x01,0x09,0x00,0x00] @@ -47576,9 +49659,15 @@ v_cmp_eq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_eq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x00,0x01,0x00] +v_cmp_eq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x82,0x01,0x00] + v_cmp_eq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x44,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_eq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0xee,0x01,0x00] + v_cmp_eq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x04,0x02,0x00] @@ -47675,9 +49764,15 @@ v_cmp_le_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_le_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x08,0x00,0x00] +v_cmp_le_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x46,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_le_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x46,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_le_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x46,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_le_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x46,0xd0,0x01,0x09,0x00,0x00] @@ -47687,9 +49782,15 @@ v_cmp_le_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_le_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x00,0x01,0x00] +v_cmp_le_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x82,0x01,0x00] + v_cmp_le_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x46,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_le_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0xee,0x01,0x00] + v_cmp_le_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x04,0x02,0x00] @@ -47786,9 +49887,15 @@ v_cmp_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_gt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x08,0x00,0x00] +v_cmp_gt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x48,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_gt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x48,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_gt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x48,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_gt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x48,0xd0,0x01,0x09,0x00,0x00] @@ -47798,9 +49905,15 @@ v_cmp_gt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_gt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x00,0x01,0x00] +v_cmp_gt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x82,0x01,0x00] + v_cmp_gt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x48,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_gt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0xee,0x01,0x00] + v_cmp_gt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x04,0x02,0x00] @@ -47897,9 +50010,15 @@ v_cmp_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_lg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x08,0x00,0x00] +v_cmp_lg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x4a,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_lg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x4a,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_lg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x4a,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_lg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x4a,0xd0,0x01,0x09,0x00,0x00] @@ -47909,9 +50028,15 @@ v_cmp_lg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_lg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x00,0x01,0x00] +v_cmp_lg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x82,0x01,0x00] + v_cmp_lg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_lg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0xee,0x01,0x00] + v_cmp_lg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x04,0x02,0x00] @@ -48008,9 +50133,15 @@ v_cmp_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_ge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x08,0x00,0x00] +v_cmp_ge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x4c,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_ge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x4c,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_ge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x4c,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_ge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x4c,0xd0,0x01,0x09,0x00,0x00] @@ -48020,9 +50151,15 @@ v_cmp_ge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_ge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x00,0x01,0x00] +v_cmp_ge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x82,0x01,0x00] + v_cmp_ge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_ge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0xee,0x01,0x00] + v_cmp_ge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x04,0x02,0x00] @@ -48119,9 +50256,15 @@ v_cmp_o_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_o_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x08,0x00,0x00] +v_cmp_o_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x4e,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_o_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x4e,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_o_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x4e,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_o_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x4e,0xd0,0x01,0x09,0x00,0x00] @@ -48131,9 +50274,15 @@ v_cmp_o_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_o_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x00,0x01,0x00] +v_cmp_o_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x82,0x01,0x00] + v_cmp_o_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_o_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0xee,0x01,0x00] + v_cmp_o_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x04,0x02,0x00] @@ -48230,9 +50379,15 @@ v_cmp_u_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_u_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x08,0x00,0x00] +v_cmp_u_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x50,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_u_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x50,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_u_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x50,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_u_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x50,0xd0,0x01,0x09,0x00,0x00] @@ -48242,9 +50397,15 @@ v_cmp_u_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_u_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x00,0x01,0x00] +v_cmp_u_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x82,0x01,0x00] + v_cmp_u_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x50,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_u_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0xee,0x01,0x00] + v_cmp_u_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x04,0x02,0x00] @@ -48341,9 +50502,15 @@ v_cmp_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x52,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x52,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x52,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x52,0xd0,0x01,0x09,0x00,0x00] @@ -48353,9 +50520,15 @@ v_cmp_nge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x52,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x04,0x02,0x00] @@ -48452,9 +50625,15 @@ v_cmp_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nlg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nlg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x54,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nlg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x54,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nlg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x54,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nlg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x54,0xd0,0x01,0x09,0x00,0x00] @@ -48464,9 +50643,15 @@ v_cmp_nlg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nlg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x54,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nlg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nlg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x04,0x02,0x00] @@ -48563,9 +50748,15 @@ v_cmp_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_ngt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x08,0x00,0x00] +v_cmp_ngt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x56,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_ngt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x56,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_ngt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x56,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_ngt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x56,0xd0,0x01,0x09,0x00,0x00] @@ -48575,9 +50766,15 @@ v_cmp_ngt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x00,0x01,0x00] +v_cmp_ngt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x82,0x01,0x00] + v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x56,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_ngt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0xee,0x01,0x00] + v_cmp_ngt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x04,0x02,0x00] @@ -48674,9 +50871,15 @@ v_cmp_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nle_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nle_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x58,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nle_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x58,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nle_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x58,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nle_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x58,0xd0,0x01,0x09,0x00,0x00] @@ -48686,9 +50889,15 @@ v_cmp_nle_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nle_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nle_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nle_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x58,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nle_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nle_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x04,0x02,0x00] @@ -48785,9 +50994,15 @@ v_cmp_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_neq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x08,0x00,0x00] +v_cmp_neq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x5a,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_neq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x5a,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_neq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x5a,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_neq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x5a,0xd0,0x01,0x09,0x00,0x00] @@ -48797,9 +51012,15 @@ v_cmp_neq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_neq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x00,0x01,0x00] +v_cmp_neq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x82,0x01,0x00] + v_cmp_neq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_neq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0xee,0x01,0x00] + v_cmp_neq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x04,0x02,0x00] @@ -48896,9 +51117,15 @@ v_cmp_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nlt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nlt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x5c,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nlt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x5c,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nlt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x5c,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nlt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x5c,0xd0,0x01,0x09,0x00,0x00] @@ -48908,9 +51135,15 @@ v_cmp_nlt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nlt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nlt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nlt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x04,0x02,0x00] @@ -49007,9 +51240,15 @@ v_cmp_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_tru_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x08,0x00,0x00] +v_cmp_tru_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x5e,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_tru_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x5e,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_tru_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x5e,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_tru_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x5e,0xd0,0x01,0x09,0x00,0x00] @@ -49019,9 +51258,15 @@ v_cmp_tru_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_tru_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x00,0x01,0x00] +v_cmp_tru_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x82,0x01,0x00] + v_cmp_tru_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_tru_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0xee,0x01,0x00] + v_cmp_tru_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x04,0x02,0x00] @@ -49118,9 +51363,15 @@ v_cmpx_f_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_f_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x60,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_f_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x60,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_f_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x60,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_f_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x60,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_f_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x60,0xd0,0x01,0x09,0x00,0x00] @@ -49130,9 +51381,15 @@ v_cmpx_f_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_f_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_f_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_f_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_f_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_f_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x04,0x02,0x00] @@ -49229,9 +51486,15 @@ v_cmpx_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_lt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x62,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_lt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x62,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_lt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x62,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_lt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x62,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_lt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x62,0xd0,0x01,0x09,0x00,0x00] @@ -49241,9 +51504,15 @@ v_cmpx_lt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_lt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_lt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_lt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x04,0x02,0x00] @@ -49340,9 +51609,15 @@ v_cmpx_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_eq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x64,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_eq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x64,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_eq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x64,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_eq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x64,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_eq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x64,0xd0,0x01,0x09,0x00,0x00] @@ -49352,9 +51627,15 @@ v_cmpx_eq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_eq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_eq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_eq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x04,0x02,0x00] @@ -49451,9 +51732,15 @@ v_cmpx_le_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_le_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x66,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_le_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x66,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_le_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x66,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_le_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x66,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_le_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x66,0xd0,0x01,0x09,0x00,0x00] @@ -49463,9 +51750,15 @@ v_cmpx_le_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_le_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_le_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_le_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_le_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_le_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x04,0x02,0x00] @@ -49562,9 +51855,15 @@ v_cmpx_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_gt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x68,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_gt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x68,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_gt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x68,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_gt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x68,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_gt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x68,0xd0,0x01,0x09,0x00,0x00] @@ -49574,9 +51873,15 @@ v_cmpx_gt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_gt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_gt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_gt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x04,0x02,0x00] @@ -49673,9 +51978,15 @@ v_cmpx_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_lg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6a,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_lg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6a,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_lg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6a,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_lg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6a,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_lg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6a,0xd0,0x01,0x09,0x00,0x00] @@ -49685,9 +51996,15 @@ v_cmpx_lg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_lg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_lg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_lg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x04,0x02,0x00] @@ -49784,9 +52101,15 @@ v_cmpx_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_ge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6c,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_ge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6c,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_ge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6c,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_ge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6c,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_ge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6c,0xd0,0x01,0x09,0x00,0x00] @@ -49796,9 +52119,15 @@ v_cmpx_ge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_ge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_ge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_ge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x04,0x02,0x00] @@ -49895,9 +52224,15 @@ v_cmpx_o_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_o_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6e,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_o_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6e,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_o_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6e,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_o_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6e,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_o_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6e,0xd0,0x01,0x09,0x00,0x00] @@ -49907,9 +52242,15 @@ v_cmpx_o_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_o_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_o_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_o_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_o_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_o_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x04,0x02,0x00] @@ -50006,9 +52347,15 @@ v_cmpx_u_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_u_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x70,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_u_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x70,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_u_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x70,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_u_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x70,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_u_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x70,0xd0,0x01,0x09,0x00,0x00] @@ -50018,9 +52365,15 @@ v_cmpx_u_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_u_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_u_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_u_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_u_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_u_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x04,0x02,0x00] @@ -50117,9 +52470,15 @@ v_cmpx_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x72,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x72,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x72,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x72,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x72,0xd0,0x01,0x09,0x00,0x00] @@ -50129,9 +52488,15 @@ v_cmpx_nge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x04,0x02,0x00] @@ -50228,9 +52593,15 @@ v_cmpx_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nlg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x74,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nlg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x74,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nlg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x74,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nlg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x74,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nlg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x74,0xd0,0x01,0x09,0x00,0x00] @@ -50240,9 +52611,15 @@ v_cmpx_nlg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nlg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nlg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nlg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x04,0x02,0x00] @@ -50339,9 +52716,15 @@ v_cmpx_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_ngt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x76,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_ngt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x76,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_ngt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x76,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_ngt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x76,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_ngt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x76,0xd0,0x01,0x09,0x00,0x00] @@ -50351,9 +52734,15 @@ v_cmpx_ngt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_ngt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_ngt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_ngt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x04,0x02,0x00] @@ -50450,9 +52839,15 @@ v_cmpx_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nle_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x78,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nle_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x78,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nle_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x78,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nle_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x78,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nle_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x78,0xd0,0x01,0x09,0x00,0x00] @@ -50462,9 +52857,15 @@ v_cmpx_nle_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nle_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nle_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nle_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x04,0x02,0x00] @@ -50561,9 +52962,15 @@ v_cmpx_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_neq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7a,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_neq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7a,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_neq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7a,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_neq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7a,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_neq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7a,0xd0,0x01,0x09,0x00,0x00] @@ -50573,9 +52980,15 @@ v_cmpx_neq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_neq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_neq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_neq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x04,0x02,0x00] @@ -50672,9 +53085,15 @@ v_cmpx_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nlt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7c,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nlt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7c,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nlt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7c,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nlt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7c,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nlt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7c,0xd0,0x01,0x09,0x00,0x00] @@ -50684,9 +53103,15 @@ v_cmpx_nlt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nlt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nlt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nlt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x04,0x02,0x00] @@ -50783,9 +53208,15 @@ v_cmpx_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_tru_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7e,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_tru_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7e,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_tru_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7e,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_tru_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7e,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_tru_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7e,0xd0,0x01,0x09,0x00,0x00] @@ -50795,9 +53226,15 @@ v_cmpx_tru_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_tru_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_tru_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_tru_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x04,0x02,0x00] @@ -50906,9 +53343,15 @@ v_cmps_f_f32_e64 tma, 0, s2 v_cmps_f_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x80,0xd0,0x80,0x04,0x00,0x00] +v_cmps_f_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x80,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_f_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x80,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_f_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x80,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_f_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x80,0xd0,0x01,0x05,0x00,0x00] @@ -50957,11 +53400,14 @@ v_cmps_f_f32_e64 s[10:11], 0, exec_hi v_cmps_f_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x00,0x01,0x00] +v_cmps_f_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x82,0x01,0x00] + v_cmps_f_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_f_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_f_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xee,0x01,0x00] v_cmps_f_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x04,0x02,0x00] @@ -50969,9 +53415,15 @@ v_cmps_f_f32_e64 s[10:11], 0, v2 v_cmps_f_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_f_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x04,0x00,0x20] + v_cmps_f_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x04,0x00,0x40] +v_cmps_f_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x04,0x00,0x60] + v_cmps_lt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x82,0x7c] @@ -51065,9 +53517,15 @@ v_cmps_lt_f32_e64 tma, 0, s2 v_cmps_lt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x82,0xd0,0x80,0x04,0x00,0x00] +v_cmps_lt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x82,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_lt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x82,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_lt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x82,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_lt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x82,0xd0,0x01,0x05,0x00,0x00] @@ -51116,11 +53574,14 @@ v_cmps_lt_f32_e64 s[10:11], 0, exec_hi v_cmps_lt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x00,0x01,0x00] +v_cmps_lt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x82,0x01,0x00] + v_cmps_lt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_lt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_lt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xee,0x01,0x00] v_cmps_lt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x04,0x02,0x00] @@ -51128,9 +53589,15 @@ v_cmps_lt_f32_e64 s[10:11], 0, v2 v_cmps_lt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_lt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x04,0x00,0x20] + v_cmps_lt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x04,0x00,0x40] +v_cmps_lt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x04,0x00,0x60] + v_cmps_eq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x84,0x7c] @@ -51224,9 +53691,15 @@ v_cmps_eq_f32_e64 tma, 0, s2 v_cmps_eq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x84,0xd0,0x80,0x04,0x00,0x00] +v_cmps_eq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x84,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_eq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x84,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_eq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x84,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_eq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x84,0xd0,0x01,0x05,0x00,0x00] @@ -51275,11 +53748,14 @@ v_cmps_eq_f32_e64 s[10:11], 0, exec_hi v_cmps_eq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x00,0x01,0x00] +v_cmps_eq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x82,0x01,0x00] + v_cmps_eq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_eq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_eq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xee,0x01,0x00] v_cmps_eq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x04,0x02,0x00] @@ -51287,9 +53763,15 @@ v_cmps_eq_f32_e64 s[10:11], 0, v2 v_cmps_eq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_eq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x04,0x00,0x20] + v_cmps_eq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x04,0x00,0x40] +v_cmps_eq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x04,0x00,0x60] + v_cmps_le_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x86,0x7c] @@ -51383,9 +53865,15 @@ v_cmps_le_f32_e64 tma, 0, s2 v_cmps_le_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x86,0xd0,0x80,0x04,0x00,0x00] +v_cmps_le_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x86,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_le_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x86,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_le_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x86,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_le_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x86,0xd0,0x01,0x05,0x00,0x00] @@ -51434,11 +53922,14 @@ v_cmps_le_f32_e64 s[10:11], 0, exec_hi v_cmps_le_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x00,0x01,0x00] +v_cmps_le_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x82,0x01,0x00] + v_cmps_le_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_le_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_le_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xee,0x01,0x00] v_cmps_le_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x04,0x02,0x00] @@ -51446,9 +53937,15 @@ v_cmps_le_f32_e64 s[10:11], 0, v2 v_cmps_le_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_le_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x04,0x00,0x20] + v_cmps_le_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x04,0x00,0x40] +v_cmps_le_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x04,0x00,0x60] + v_cmps_gt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x88,0x7c] @@ -51542,9 +54039,15 @@ v_cmps_gt_f32_e64 tma, 0, s2 v_cmps_gt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x88,0xd0,0x80,0x04,0x00,0x00] +v_cmps_gt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x88,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_gt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x88,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_gt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x88,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_gt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x88,0xd0,0x01,0x05,0x00,0x00] @@ -51593,11 +54096,14 @@ v_cmps_gt_f32_e64 s[10:11], 0, exec_hi v_cmps_gt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x00,0x01,0x00] +v_cmps_gt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x82,0x01,0x00] + v_cmps_gt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_gt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_gt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xee,0x01,0x00] v_cmps_gt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x04,0x02,0x00] @@ -51605,9 +54111,15 @@ v_cmps_gt_f32_e64 s[10:11], 0, v2 v_cmps_gt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_gt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x04,0x00,0x20] + v_cmps_gt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x04,0x00,0x40] +v_cmps_gt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x04,0x00,0x60] + v_cmps_lg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x8a,0x7c] @@ -51701,9 +54213,15 @@ v_cmps_lg_f32_e64 tma, 0, s2 v_cmps_lg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00] +v_cmps_lg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x8a,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_lg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x8a,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_lg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x8a,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_lg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x8a,0xd0,0x01,0x05,0x00,0x00] @@ -51752,11 +54270,14 @@ v_cmps_lg_f32_e64 s[10:11], 0, exec_hi v_cmps_lg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x00,0x01,0x00] +v_cmps_lg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x82,0x01,0x00] + v_cmps_lg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_lg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_lg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xee,0x01,0x00] v_cmps_lg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x04,0x02,0x00] @@ -51764,9 +54285,15 @@ v_cmps_lg_f32_e64 s[10:11], 0, v2 v_cmps_lg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_lg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x20] + v_cmps_lg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x40] +v_cmps_lg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x60] + v_cmps_ge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x8c,0x7c] @@ -51860,9 +54387,15 @@ v_cmps_ge_f32_e64 tma, 0, s2 v_cmps_ge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00] +v_cmps_ge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x8c,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_ge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x8c,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_ge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x8c,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_ge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x8c,0xd0,0x01,0x05,0x00,0x00] @@ -51911,11 +54444,14 @@ v_cmps_ge_f32_e64 s[10:11], 0, exec_hi v_cmps_ge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x00,0x01,0x00] +v_cmps_ge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x82,0x01,0x00] + v_cmps_ge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_ge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_ge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xee,0x01,0x00] v_cmps_ge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x04,0x02,0x00] @@ -51923,9 +54459,15 @@ v_cmps_ge_f32_e64 s[10:11], 0, v2 v_cmps_ge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_ge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x20] + v_cmps_ge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x40] +v_cmps_ge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x60] + v_cmps_o_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x8e,0x7c] @@ -52019,9 +54561,15 @@ v_cmps_o_f32_e64 tma, 0, s2 v_cmps_o_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00] +v_cmps_o_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x8e,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_o_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x8e,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_o_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x8e,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_o_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x8e,0xd0,0x01,0x05,0x00,0x00] @@ -52070,11 +54618,14 @@ v_cmps_o_f32_e64 s[10:11], 0, exec_hi v_cmps_o_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x00,0x01,0x00] +v_cmps_o_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x82,0x01,0x00] + v_cmps_o_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_o_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_o_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xee,0x01,0x00] v_cmps_o_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x04,0x02,0x00] @@ -52082,9 +54633,15 @@ v_cmps_o_f32_e64 s[10:11], 0, v2 v_cmps_o_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_o_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x20] + v_cmps_o_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x40] +v_cmps_o_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x60] + v_cmps_u_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x90,0x7c] @@ -52178,9 +54735,15 @@ v_cmps_u_f32_e64 tma, 0, s2 v_cmps_u_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x90,0xd0,0x80,0x04,0x00,0x00] +v_cmps_u_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x90,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_u_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x90,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_u_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x90,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_u_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x90,0xd0,0x01,0x05,0x00,0x00] @@ -52229,11 +54792,14 @@ v_cmps_u_f32_e64 s[10:11], 0, exec_hi v_cmps_u_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x00,0x01,0x00] +v_cmps_u_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x82,0x01,0x00] + v_cmps_u_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_u_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_u_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xee,0x01,0x00] v_cmps_u_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x04,0x02,0x00] @@ -52241,9 +54807,15 @@ v_cmps_u_f32_e64 s[10:11], 0, v2 v_cmps_u_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_u_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x04,0x00,0x20] + v_cmps_u_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x04,0x00,0x40] +v_cmps_u_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x04,0x00,0x60] + v_cmps_nge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x92,0x7c] @@ -52337,9 +54909,15 @@ v_cmps_nge_f32_e64 tma, 0, s2 v_cmps_nge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x92,0xd0,0x80,0x04,0x00,0x00] +v_cmps_nge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x92,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_nge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x92,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_nge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x92,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_nge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x92,0xd0,0x01,0x05,0x00,0x00] @@ -52388,11 +54966,14 @@ v_cmps_nge_f32_e64 s[10:11], 0, exec_hi v_cmps_nge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x00,0x01,0x00] +v_cmps_nge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x82,0x01,0x00] + v_cmps_nge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_nge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_nge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xee,0x01,0x00] v_cmps_nge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x04,0x02,0x00] @@ -52400,9 +54981,15 @@ v_cmps_nge_f32_e64 s[10:11], 0, v2 v_cmps_nge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_nge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x04,0x00,0x20] + v_cmps_nge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x04,0x00,0x40] +v_cmps_nge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x04,0x00,0x60] + v_cmps_nlg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x94,0x7c] @@ -52496,9 +55083,15 @@ v_cmps_nlg_f32_e64 tma, 0, s2 v_cmps_nlg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x94,0xd0,0x80,0x04,0x00,0x00] +v_cmps_nlg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x94,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_nlg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x94,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_nlg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x94,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_nlg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x94,0xd0,0x01,0x05,0x00,0x00] @@ -52547,11 +55140,14 @@ v_cmps_nlg_f32_e64 s[10:11], 0, exec_hi v_cmps_nlg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x00,0x01,0x00] +v_cmps_nlg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x82,0x01,0x00] + v_cmps_nlg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_nlg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_nlg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xee,0x01,0x00] v_cmps_nlg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x04,0x02,0x00] @@ -52559,9 +55155,15 @@ v_cmps_nlg_f32_e64 s[10:11], 0, v2 v_cmps_nlg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_nlg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x04,0x00,0x20] + v_cmps_nlg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x04,0x00,0x40] +v_cmps_nlg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x04,0x00,0x60] + v_cmps_ngt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x96,0x7c] @@ -52655,9 +55257,15 @@ v_cmps_ngt_f32_e64 tma, 0, s2 v_cmps_ngt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x96,0xd0,0x80,0x04,0x00,0x00] +v_cmps_ngt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x96,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_ngt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x96,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_ngt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x96,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_ngt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x96,0xd0,0x01,0x05,0x00,0x00] @@ -52706,11 +55314,14 @@ v_cmps_ngt_f32_e64 s[10:11], 0, exec_hi v_cmps_ngt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x00,0x01,0x00] +v_cmps_ngt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x82,0x01,0x00] + v_cmps_ngt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_ngt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_ngt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xee,0x01,0x00] v_cmps_ngt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x04,0x02,0x00] @@ -52718,9 +55329,15 @@ v_cmps_ngt_f32_e64 s[10:11], 0, v2 v_cmps_ngt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_ngt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x04,0x00,0x20] + v_cmps_ngt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x04,0x00,0x40] +v_cmps_ngt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x04,0x00,0x60] + v_cmps_nle_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x98,0x7c] @@ -52814,9 +55431,15 @@ v_cmps_nle_f32_e64 tma, 0, s2 v_cmps_nle_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x98,0xd0,0x80,0x04,0x00,0x00] +v_cmps_nle_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x98,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_nle_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x98,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_nle_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x98,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_nle_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x98,0xd0,0x01,0x05,0x00,0x00] @@ -52865,11 +55488,14 @@ v_cmps_nle_f32_e64 s[10:11], 0, exec_hi v_cmps_nle_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x00,0x01,0x00] +v_cmps_nle_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x82,0x01,0x00] + v_cmps_nle_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_nle_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_nle_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xee,0x01,0x00] v_cmps_nle_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x04,0x02,0x00] @@ -52877,9 +55503,15 @@ v_cmps_nle_f32_e64 s[10:11], 0, v2 v_cmps_nle_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_nle_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x04,0x00,0x20] + v_cmps_nle_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x04,0x00,0x40] +v_cmps_nle_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x04,0x00,0x60] + v_cmps_neq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x9a,0x7c] @@ -52973,9 +55605,15 @@ v_cmps_neq_f32_e64 tma, 0, s2 v_cmps_neq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00] +v_cmps_neq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x9a,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_neq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x9a,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_neq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x9a,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_neq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x9a,0xd0,0x01,0x05,0x00,0x00] @@ -53024,11 +55662,14 @@ v_cmps_neq_f32_e64 s[10:11], 0, exec_hi v_cmps_neq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x00,0x01,0x00] +v_cmps_neq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x82,0x01,0x00] + v_cmps_neq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_neq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_neq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xee,0x01,0x00] v_cmps_neq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x04,0x02,0x00] @@ -53036,9 +55677,15 @@ v_cmps_neq_f32_e64 s[10:11], 0, v2 v_cmps_neq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_neq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x20] + v_cmps_neq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x40] +v_cmps_neq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x60] + v_cmps_nlt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x9c,0x7c] @@ -53132,9 +55779,15 @@ v_cmps_nlt_f32_e64 tma, 0, s2 v_cmps_nlt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00] +v_cmps_nlt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x9c,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_nlt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x9c,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_nlt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x9c,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_nlt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x9c,0xd0,0x01,0x05,0x00,0x00] @@ -53183,11 +55836,14 @@ v_cmps_nlt_f32_e64 s[10:11], 0, exec_hi v_cmps_nlt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x00,0x01,0x00] +v_cmps_nlt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x82,0x01,0x00] + v_cmps_nlt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_nlt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_nlt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xee,0x01,0x00] v_cmps_nlt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x04,0x02,0x00] @@ -53195,9 +55851,15 @@ v_cmps_nlt_f32_e64 s[10:11], 0, v2 v_cmps_nlt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_nlt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x20] + v_cmps_nlt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x40] +v_cmps_nlt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x60] + v_cmps_tru_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0x9e,0x7c] @@ -53291,9 +55953,15 @@ v_cmps_tru_f32_e64 tma, 0, s2 v_cmps_tru_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00] +v_cmps_tru_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x9e,0xd0,0xc1,0x04,0x00,0x00] + v_cmps_tru_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x9e,0xd0,0xf0,0x04,0x00,0x00] +v_cmps_tru_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x9e,0xd0,0xf7,0x04,0x00,0x00] + v_cmps_tru_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x9e,0xd0,0x01,0x05,0x00,0x00] @@ -53342,11 +56010,14 @@ v_cmps_tru_f32_e64 s[10:11], 0, exec_hi v_cmps_tru_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x00,0x01,0x00] +v_cmps_tru_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x82,0x01,0x00] + v_cmps_tru_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xe0,0x01,0x00] -v_cmps_tru_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xfa,0x01,0x00] +v_cmps_tru_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xee,0x01,0x00] v_cmps_tru_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x04,0x02,0x00] @@ -53354,9 +56025,15 @@ v_cmps_tru_f32_e64 s[10:11], 0, v2 v_cmps_tru_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xfe,0x03,0x00] +v_cmps_tru_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x20] + v_cmps_tru_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x40] +v_cmps_tru_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_f_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xa0,0x7c] @@ -53450,9 +56127,15 @@ v_cmpsx_f_f32_e64 tma, 0, s2 v_cmpsx_f_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_f_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xa0,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_f_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xa0,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_f_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xa0,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_f_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xa0,0xd0,0x01,0x05,0x00,0x00] @@ -53501,11 +56184,14 @@ v_cmpsx_f_f32_e64 s[10:11], 0, exec_hi v_cmpsx_f_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_f_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_f_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_f_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_f_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_f_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x02,0x00] @@ -53513,9 +56199,15 @@ v_cmpsx_f_f32_e64 s[10:11], 0, v2 v_cmpsx_f_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_f_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_f_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_f_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_lt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xa2,0x7c] @@ -53609,9 +56301,15 @@ v_cmpsx_lt_f32_e64 tma, 0, s2 v_cmpsx_lt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_lt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xa2,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_lt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xa2,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_lt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xa2,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_lt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xa2,0xd0,0x01,0x05,0x00,0x00] @@ -53660,11 +56358,14 @@ v_cmpsx_lt_f32_e64 s[10:11], 0, exec_hi v_cmpsx_lt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_lt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_lt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_lt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_lt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_lt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x02,0x00] @@ -53672,9 +56373,15 @@ v_cmpsx_lt_f32_e64 s[10:11], 0, v2 v_cmpsx_lt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_lt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_lt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_lt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_eq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xa4,0x7c] @@ -53768,9 +56475,15 @@ v_cmpsx_eq_f32_e64 tma, 0, s2 v_cmpsx_eq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_eq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xa4,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_eq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xa4,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_eq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xa4,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_eq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xa4,0xd0,0x01,0x05,0x00,0x00] @@ -53819,11 +56532,14 @@ v_cmpsx_eq_f32_e64 s[10:11], 0, exec_hi v_cmpsx_eq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_eq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_eq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_eq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_eq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_eq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x02,0x00] @@ -53831,9 +56547,15 @@ v_cmpsx_eq_f32_e64 s[10:11], 0, v2 v_cmpsx_eq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_eq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_eq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_eq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_le_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xa6,0x7c] @@ -53927,9 +56649,15 @@ v_cmpsx_le_f32_e64 tma, 0, s2 v_cmpsx_le_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_le_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xa6,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_le_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xa6,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_le_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xa6,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_le_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xa6,0xd0,0x01,0x05,0x00,0x00] @@ -53978,11 +56706,14 @@ v_cmpsx_le_f32_e64 s[10:11], 0, exec_hi v_cmpsx_le_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_le_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_le_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_le_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_le_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_le_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x02,0x00] @@ -53990,9 +56721,15 @@ v_cmpsx_le_f32_e64 s[10:11], 0, v2 v_cmpsx_le_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_le_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_le_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_le_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_gt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xa8,0x7c] @@ -54086,9 +56823,15 @@ v_cmpsx_gt_f32_e64 tma, 0, s2 v_cmpsx_gt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_gt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xa8,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_gt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xa8,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_gt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xa8,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_gt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xa8,0xd0,0x01,0x05,0x00,0x00] @@ -54137,11 +56880,14 @@ v_cmpsx_gt_f32_e64 s[10:11], 0, exec_hi v_cmpsx_gt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_gt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_gt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_gt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_gt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_gt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x02,0x00] @@ -54149,9 +56895,15 @@ v_cmpsx_gt_f32_e64 s[10:11], 0, v2 v_cmpsx_gt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_gt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_gt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_gt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_lg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xaa,0x7c] @@ -54245,9 +56997,15 @@ v_cmpsx_lg_f32_e64 tma, 0, s2 v_cmpsx_lg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_lg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xaa,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_lg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xaa,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_lg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xaa,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_lg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xaa,0xd0,0x01,0x05,0x00,0x00] @@ -54296,11 +57054,14 @@ v_cmpsx_lg_f32_e64 s[10:11], 0, exec_hi v_cmpsx_lg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_lg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_lg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_lg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_lg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_lg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x02,0x00] @@ -54308,9 +57069,15 @@ v_cmpsx_lg_f32_e64 s[10:11], 0, v2 v_cmpsx_lg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_lg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_lg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_lg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_ge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xac,0x7c] @@ -54404,9 +57171,15 @@ v_cmpsx_ge_f32_e64 tma, 0, s2 v_cmpsx_ge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xac,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_ge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xac,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_ge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xac,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_ge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xac,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_ge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xac,0xd0,0x01,0x05,0x00,0x00] @@ -54455,11 +57228,14 @@ v_cmpsx_ge_f32_e64 s[10:11], 0, exec_hi v_cmpsx_ge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_ge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_ge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_ge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_ge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_ge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x02,0x00] @@ -54467,9 +57243,15 @@ v_cmpsx_ge_f32_e64 s[10:11], 0, v2 v_cmpsx_ge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_ge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_ge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_ge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_o_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xae,0x7c] @@ -54563,9 +57345,15 @@ v_cmpsx_o_f32_e64 tma, 0, s2 v_cmpsx_o_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xae,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_o_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xae,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_o_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xae,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_o_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xae,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_o_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xae,0xd0,0x01,0x05,0x00,0x00] @@ -54614,11 +57402,14 @@ v_cmpsx_o_f32_e64 s[10:11], 0, exec_hi v_cmpsx_o_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_o_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_o_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_o_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_o_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_o_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x02,0x00] @@ -54626,9 +57417,15 @@ v_cmpsx_o_f32_e64 s[10:11], 0, v2 v_cmpsx_o_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_o_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_o_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_o_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_u_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xb0,0x7c] @@ -54722,9 +57519,15 @@ v_cmpsx_u_f32_e64 tma, 0, s2 v_cmpsx_u_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_u_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xb0,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_u_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xb0,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_u_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xb0,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_u_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xb0,0xd0,0x01,0x05,0x00,0x00] @@ -54773,11 +57576,14 @@ v_cmpsx_u_f32_e64 s[10:11], 0, exec_hi v_cmpsx_u_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_u_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_u_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_u_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_u_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_u_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x02,0x00] @@ -54785,9 +57591,15 @@ v_cmpsx_u_f32_e64 s[10:11], 0, v2 v_cmpsx_u_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_u_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_u_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_u_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_nge_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xb2,0x7c] @@ -54881,9 +57693,15 @@ v_cmpsx_nge_f32_e64 tma, 0, s2 v_cmpsx_nge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_nge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xb2,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_nge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xb2,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_nge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xb2,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_nge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xb2,0xd0,0x01,0x05,0x00,0x00] @@ -54932,11 +57750,14 @@ v_cmpsx_nge_f32_e64 s[10:11], 0, exec_hi v_cmpsx_nge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_nge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_nge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_nge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_nge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_nge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x02,0x00] @@ -54944,9 +57765,15 @@ v_cmpsx_nge_f32_e64 s[10:11], 0, v2 v_cmpsx_nge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_nge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_nge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_nge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_nlg_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xb4,0x7c] @@ -55040,9 +57867,15 @@ v_cmpsx_nlg_f32_e64 tma, 0, s2 v_cmpsx_nlg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_nlg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xb4,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_nlg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xb4,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_nlg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xb4,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_nlg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xb4,0xd0,0x01,0x05,0x00,0x00] @@ -55091,11 +57924,14 @@ v_cmpsx_nlg_f32_e64 s[10:11], 0, exec_hi v_cmpsx_nlg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_nlg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_nlg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_nlg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_nlg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_nlg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x02,0x00] @@ -55103,9 +57939,15 @@ v_cmpsx_nlg_f32_e64 s[10:11], 0, v2 v_cmpsx_nlg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_nlg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_nlg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_nlg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_ngt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xb6,0x7c] @@ -55199,9 +58041,15 @@ v_cmpsx_ngt_f32_e64 tma, 0, s2 v_cmpsx_ngt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_ngt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xb6,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_ngt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xb6,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_ngt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xb6,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_ngt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xb6,0xd0,0x01,0x05,0x00,0x00] @@ -55250,11 +58098,14 @@ v_cmpsx_ngt_f32_e64 s[10:11], 0, exec_hi v_cmpsx_ngt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_ngt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_ngt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_ngt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_ngt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_ngt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x02,0x00] @@ -55262,9 +58113,15 @@ v_cmpsx_ngt_f32_e64 s[10:11], 0, v2 v_cmpsx_ngt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_ngt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_ngt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_ngt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_nle_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xb8,0x7c] @@ -55358,9 +58215,15 @@ v_cmpsx_nle_f32_e64 tma, 0, s2 v_cmpsx_nle_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_nle_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xb8,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_nle_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xb8,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_nle_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xb8,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_nle_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xb8,0xd0,0x01,0x05,0x00,0x00] @@ -55409,11 +58272,14 @@ v_cmpsx_nle_f32_e64 s[10:11], 0, exec_hi v_cmpsx_nle_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_nle_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_nle_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_nle_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_nle_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_nle_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x02,0x00] @@ -55421,9 +58287,15 @@ v_cmpsx_nle_f32_e64 s[10:11], 0, v2 v_cmpsx_nle_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_nle_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_nle_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_nle_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_neq_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xba,0x7c] @@ -55517,9 +58389,15 @@ v_cmpsx_neq_f32_e64 tma, 0, s2 v_cmpsx_neq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xba,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_neq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xba,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_neq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xba,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_neq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xba,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_neq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xba,0xd0,0x01,0x05,0x00,0x00] @@ -55568,11 +58446,14 @@ v_cmpsx_neq_f32_e64 s[10:11], 0, exec_hi v_cmpsx_neq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_neq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_neq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_neq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_neq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_neq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x02,0x00] @@ -55580,9 +58461,15 @@ v_cmpsx_neq_f32_e64 s[10:11], 0, v2 v_cmpsx_neq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_neq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_neq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_neq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_nlt_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xbc,0x7c] @@ -55676,9 +58563,15 @@ v_cmpsx_nlt_f32_e64 tma, 0, s2 v_cmpsx_nlt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_nlt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xbc,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_nlt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xbc,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_nlt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xbc,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_nlt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xbc,0xd0,0x01,0x05,0x00,0x00] @@ -55727,11 +58620,14 @@ v_cmpsx_nlt_f32_e64 s[10:11], 0, exec_hi v_cmpsx_nlt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_nlt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_nlt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_nlt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_nlt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_nlt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x02,0x00] @@ -55739,9 +58635,15 @@ v_cmpsx_nlt_f32_e64 s[10:11], 0, v2 v_cmpsx_nlt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_nlt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_nlt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_nlt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x60] + v_cmpsx_tru_f32 vcc, s1, v2 // CHECK: [0x01,0x04,0xbe,0x7c] @@ -55835,9 +58737,15 @@ v_cmpsx_tru_f32_e64 tma, 0, s2 v_cmpsx_tru_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00] +v_cmpsx_tru_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0xbe,0xd0,0xc1,0x04,0x00,0x00] + v_cmpsx_tru_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0xbe,0xd0,0xf0,0x04,0x00,0x00] +v_cmpsx_tru_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0xbe,0xd0,0xf7,0x04,0x00,0x00] + v_cmpsx_tru_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0xbe,0xd0,0x01,0x05,0x00,0x00] @@ -55886,11 +58794,14 @@ v_cmpsx_tru_f32_e64 s[10:11], 0, exec_hi v_cmpsx_tru_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x00,0x01,0x00] +v_cmpsx_tru_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x82,0x01,0x00] + v_cmpsx_tru_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xe0,0x01,0x00] -v_cmpsx_tru_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfa,0x01,0x00] +v_cmpsx_tru_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xee,0x01,0x00] v_cmpsx_tru_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x02,0x00] @@ -55898,9 +58809,15 @@ v_cmpsx_tru_f32_e64 s[10:11], 0, v2 v_cmpsx_tru_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfe,0x03,0x00] +v_cmpsx_tru_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x20] + v_cmpsx_tru_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x40] +v_cmpsx_tru_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x60] + v_cmps_f_f64 vcc, s[2:3], v[2:3] // CHECK: [0x02,0x04,0xc0,0x7c] @@ -55982,9 +58899,15 @@ v_cmps_f_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_f_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0x08,0x00,0x00] +v_cmps_f_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xc0,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_f_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xc0,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_f_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xc0,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_f_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xc0,0xd0,0x01,0x09,0x00,0x00] @@ -55994,9 +58917,15 @@ v_cmps_f_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_f_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x00,0x01,0x00] +v_cmps_f_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x82,0x01,0x00] + v_cmps_f_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_f_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0xee,0x01,0x00] + v_cmps_f_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x04,0x02,0x00] @@ -56093,9 +59022,15 @@ v_cmps_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_lt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0x08,0x00,0x00] +v_cmps_lt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xc2,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_lt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xc2,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_lt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xc2,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_lt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xc2,0xd0,0x01,0x09,0x00,0x00] @@ -56105,9 +59040,15 @@ v_cmps_lt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_lt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x00,0x01,0x00] +v_cmps_lt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x82,0x01,0x00] + v_cmps_lt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_lt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0xee,0x01,0x00] + v_cmps_lt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x04,0x02,0x00] @@ -56204,9 +59145,15 @@ v_cmps_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_eq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0x08,0x00,0x00] +v_cmps_eq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xc4,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_eq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xc4,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_eq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xc4,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_eq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xc4,0xd0,0x01,0x09,0x00,0x00] @@ -56216,9 +59163,15 @@ v_cmps_eq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_eq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x00,0x01,0x00] +v_cmps_eq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x82,0x01,0x00] + v_cmps_eq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_eq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0xee,0x01,0x00] + v_cmps_eq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x04,0x02,0x00] @@ -56315,9 +59268,15 @@ v_cmps_le_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_le_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0x08,0x00,0x00] +v_cmps_le_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xc6,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_le_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xc6,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_le_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xc6,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_le_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xc6,0xd0,0x01,0x09,0x00,0x00] @@ -56327,9 +59286,15 @@ v_cmps_le_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_le_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x00,0x01,0x00] +v_cmps_le_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x82,0x01,0x00] + v_cmps_le_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_le_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0xee,0x01,0x00] + v_cmps_le_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x04,0x02,0x00] @@ -56426,9 +59391,15 @@ v_cmps_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_gt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0x08,0x00,0x00] +v_cmps_gt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xc8,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_gt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xc8,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_gt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xc8,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_gt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xc8,0xd0,0x01,0x09,0x00,0x00] @@ -56438,9 +59409,15 @@ v_cmps_gt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_gt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x00,0x01,0x00] +v_cmps_gt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x82,0x01,0x00] + v_cmps_gt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_gt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0xee,0x01,0x00] + v_cmps_gt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x04,0x02,0x00] @@ -56537,9 +59514,15 @@ v_cmps_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_lg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xca,0xd0,0x80,0x08,0x00,0x00] +v_cmps_lg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xca,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_lg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xca,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_lg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xca,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_lg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xca,0xd0,0x01,0x09,0x00,0x00] @@ -56549,9 +59532,15 @@ v_cmps_lg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_lg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x00,0x01,0x00] +v_cmps_lg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x82,0x01,0x00] + v_cmps_lg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xca,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_lg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0xee,0x01,0x00] + v_cmps_lg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x04,0x02,0x00] @@ -56648,9 +59637,15 @@ v_cmps_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_ge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0x08,0x00,0x00] +v_cmps_ge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xcc,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_ge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xcc,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_ge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xcc,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_ge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xcc,0xd0,0x01,0x09,0x00,0x00] @@ -56660,9 +59655,15 @@ v_cmps_ge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_ge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x00,0x01,0x00] +v_cmps_ge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x82,0x01,0x00] + v_cmps_ge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_ge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0xee,0x01,0x00] + v_cmps_ge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x04,0x02,0x00] @@ -56759,9 +59760,15 @@ v_cmps_o_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_o_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xce,0xd0,0x80,0x08,0x00,0x00] +v_cmps_o_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xce,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_o_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xce,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_o_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xce,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_o_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xce,0xd0,0x01,0x09,0x00,0x00] @@ -56771,9 +59778,15 @@ v_cmps_o_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_o_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x00,0x01,0x00] +v_cmps_o_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x82,0x01,0x00] + v_cmps_o_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xce,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_o_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0xee,0x01,0x00] + v_cmps_o_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x04,0x02,0x00] @@ -56870,9 +59883,15 @@ v_cmps_u_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_u_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0x08,0x00,0x00] +v_cmps_u_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xd0,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_u_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xd0,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_u_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xd0,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_u_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xd0,0xd0,0x01,0x09,0x00,0x00] @@ -56882,9 +59901,15 @@ v_cmps_u_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_u_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x00,0x01,0x00] +v_cmps_u_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x82,0x01,0x00] + v_cmps_u_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_u_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0xee,0x01,0x00] + v_cmps_u_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x04,0x02,0x00] @@ -56981,9 +60006,15 @@ v_cmps_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_nge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0x08,0x00,0x00] +v_cmps_nge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xd2,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_nge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xd2,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_nge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xd2,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_nge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xd2,0xd0,0x01,0x09,0x00,0x00] @@ -56993,9 +60024,15 @@ v_cmps_nge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_nge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x00,0x01,0x00] +v_cmps_nge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x82,0x01,0x00] + v_cmps_nge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_nge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0xee,0x01,0x00] + v_cmps_nge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x04,0x02,0x00] @@ -57092,9 +60129,15 @@ v_cmps_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_nlg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0x08,0x00,0x00] +v_cmps_nlg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xd4,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_nlg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xd4,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_nlg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xd4,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_nlg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xd4,0xd0,0x01,0x09,0x00,0x00] @@ -57104,9 +60147,15 @@ v_cmps_nlg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_nlg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x00,0x01,0x00] +v_cmps_nlg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x82,0x01,0x00] + v_cmps_nlg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_nlg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0xee,0x01,0x00] + v_cmps_nlg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x04,0x02,0x00] @@ -57203,9 +60252,15 @@ v_cmps_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_ngt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0x08,0x00,0x00] +v_cmps_ngt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xd6,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_ngt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xd6,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_ngt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xd6,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_ngt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xd6,0xd0,0x01,0x09,0x00,0x00] @@ -57215,9 +60270,15 @@ v_cmps_ngt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_ngt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x00,0x01,0x00] +v_cmps_ngt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x82,0x01,0x00] + v_cmps_ngt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_ngt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0xee,0x01,0x00] + v_cmps_ngt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x04,0x02,0x00] @@ -57314,9 +60375,15 @@ v_cmps_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_nle_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0x08,0x00,0x00] +v_cmps_nle_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xd8,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_nle_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xd8,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_nle_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xd8,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_nle_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xd8,0xd0,0x01,0x09,0x00,0x00] @@ -57326,9 +60393,15 @@ v_cmps_nle_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_nle_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x00,0x01,0x00] +v_cmps_nle_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x82,0x01,0x00] + v_cmps_nle_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_nle_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0xee,0x01,0x00] + v_cmps_nle_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x04,0x02,0x00] @@ -57425,9 +60498,15 @@ v_cmps_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_neq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xda,0xd0,0x80,0x08,0x00,0x00] +v_cmps_neq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xda,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_neq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xda,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_neq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xda,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_neq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xda,0xd0,0x01,0x09,0x00,0x00] @@ -57437,9 +60516,15 @@ v_cmps_neq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_neq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x00,0x01,0x00] +v_cmps_neq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x82,0x01,0x00] + v_cmps_neq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xda,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_neq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0xee,0x01,0x00] + v_cmps_neq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x04,0x02,0x00] @@ -57536,9 +60621,15 @@ v_cmps_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_nlt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0x08,0x00,0x00] +v_cmps_nlt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xdc,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_nlt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xdc,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_nlt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xdc,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_nlt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xdc,0xd0,0x01,0x09,0x00,0x00] @@ -57548,9 +60639,15 @@ v_cmps_nlt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_nlt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x00,0x01,0x00] +v_cmps_nlt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x82,0x01,0x00] + v_cmps_nlt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_nlt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0xee,0x01,0x00] + v_cmps_nlt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x04,0x02,0x00] @@ -57647,9 +60744,15 @@ v_cmps_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmps_tru_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xde,0xd0,0x80,0x08,0x00,0x00] +v_cmps_tru_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xde,0xd0,0xc1,0x08,0x00,0x00] + v_cmps_tru_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xde,0xd0,0xf0,0x08,0x00,0x00] +v_cmps_tru_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xde,0xd0,0xf7,0x08,0x00,0x00] + v_cmps_tru_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xde,0xd0,0x01,0x09,0x00,0x00] @@ -57659,9 +60762,15 @@ v_cmps_tru_f64_e64 s[10:11], v[254:255], s[4:5] v_cmps_tru_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x00,0x01,0x00] +v_cmps_tru_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x82,0x01,0x00] + v_cmps_tru_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xde,0xd0,0x04,0xe0,0x01,0x00] +v_cmps_tru_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0xee,0x01,0x00] + v_cmps_tru_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x04,0x02,0x00] @@ -57758,9 +60867,15 @@ v_cmpsx_f_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_f_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xe0,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_f_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xe0,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_f_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xe0,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_f_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xe0,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_f_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xe0,0xd0,0x01,0x09,0x00,0x00] @@ -57770,9 +60885,15 @@ v_cmpsx_f_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_f_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_f_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_f_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_f_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_f_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x04,0x02,0x00] @@ -57869,9 +60990,15 @@ v_cmpsx_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_lt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xe2,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_lt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xe2,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_lt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xe2,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_lt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xe2,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_lt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xe2,0xd0,0x01,0x09,0x00,0x00] @@ -57881,9 +61008,15 @@ v_cmpsx_lt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_lt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_lt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_lt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_lt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_lt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x04,0x02,0x00] @@ -57980,9 +61113,15 @@ v_cmpsx_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_eq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xe4,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_eq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xe4,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_eq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xe4,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_eq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xe4,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_eq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xe4,0xd0,0x01,0x09,0x00,0x00] @@ -57992,9 +61131,15 @@ v_cmpsx_eq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_eq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_eq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_eq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_eq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_eq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x04,0x02,0x00] @@ -58091,9 +61236,15 @@ v_cmpsx_le_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_le_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xe6,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_le_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xe6,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_le_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xe6,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_le_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xe6,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_le_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xe6,0xd0,0x01,0x09,0x00,0x00] @@ -58103,9 +61254,15 @@ v_cmpsx_le_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_le_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_le_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_le_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_le_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_le_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x04,0x02,0x00] @@ -58202,9 +61359,15 @@ v_cmpsx_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_gt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xe8,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_gt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xe8,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_gt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xe8,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_gt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xe8,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_gt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xe8,0xd0,0x01,0x09,0x00,0x00] @@ -58214,9 +61377,15 @@ v_cmpsx_gt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_gt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_gt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_gt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_gt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_gt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x04,0x02,0x00] @@ -58313,9 +61482,15 @@ v_cmpsx_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_lg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xea,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_lg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xea,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_lg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xea,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_lg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xea,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_lg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xea,0xd0,0x01,0x09,0x00,0x00] @@ -58325,9 +61500,15 @@ v_cmpsx_lg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_lg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_lg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_lg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xea,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_lg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_lg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x04,0x02,0x00] @@ -58424,9 +61605,15 @@ v_cmpsx_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_ge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xec,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_ge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xec,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_ge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xec,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_ge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xec,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_ge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xec,0xd0,0x01,0x09,0x00,0x00] @@ -58436,9 +61623,15 @@ v_cmpsx_ge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_ge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_ge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_ge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xec,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_ge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_ge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x04,0x02,0x00] @@ -58535,9 +61728,15 @@ v_cmpsx_o_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_o_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xee,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_o_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xee,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_o_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xee,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_o_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xee,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_o_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xee,0xd0,0x01,0x09,0x00,0x00] @@ -58547,9 +61746,15 @@ v_cmpsx_o_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_o_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_o_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_o_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xee,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_o_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_o_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x04,0x02,0x00] @@ -58646,9 +61851,15 @@ v_cmpsx_u_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_u_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xf0,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_u_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xf0,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_u_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xf0,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_u_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xf0,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_u_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xf0,0xd0,0x01,0x09,0x00,0x00] @@ -58658,9 +61869,15 @@ v_cmpsx_u_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_u_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_u_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_u_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_u_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_u_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x04,0x02,0x00] @@ -58757,9 +61974,15 @@ v_cmpsx_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_nge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xf2,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_nge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xf2,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_nge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xf2,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_nge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xf2,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_nge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xf2,0xd0,0x01,0x09,0x00,0x00] @@ -58769,9 +61992,15 @@ v_cmpsx_nge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_nge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_nge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_nge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_nge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_nge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x04,0x02,0x00] @@ -58868,9 +62097,15 @@ v_cmpsx_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_nlg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xf4,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_nlg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xf4,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_nlg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xf4,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_nlg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xf4,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_nlg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xf4,0xd0,0x01,0x09,0x00,0x00] @@ -58880,9 +62115,15 @@ v_cmpsx_nlg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x04,0x02,0x00] @@ -58979,9 +62220,15 @@ v_cmpsx_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_ngt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xf6,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_ngt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xf6,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_ngt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xf6,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_ngt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xf6,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_ngt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xf6,0xd0,0x01,0x09,0x00,0x00] @@ -58991,9 +62238,15 @@ v_cmpsx_ngt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x04,0x02,0x00] @@ -59090,9 +62343,15 @@ v_cmpsx_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_nle_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xf8,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_nle_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xf8,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_nle_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xf8,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_nle_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xf8,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_nle_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xf8,0xd0,0x01,0x09,0x00,0x00] @@ -59102,9 +62361,15 @@ v_cmpsx_nle_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_nle_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_nle_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_nle_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_nle_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_nle_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x04,0x02,0x00] @@ -59201,9 +62466,15 @@ v_cmpsx_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_neq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xfa,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_neq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xfa,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_neq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xfa,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_neq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xfa,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_neq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xfa,0xd0,0x01,0x09,0x00,0x00] @@ -59213,9 +62484,15 @@ v_cmpsx_neq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_neq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_neq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_neq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_neq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_neq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x04,0x02,0x00] @@ -59312,9 +62589,15 @@ v_cmpsx_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_nlt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xfc,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_nlt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xfc,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_nlt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xfc,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_nlt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xfc,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_nlt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xfc,0xd0,0x01,0x09,0x00,0x00] @@ -59324,9 +62607,15 @@ v_cmpsx_nlt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x04,0x02,0x00] @@ -59423,9 +62712,15 @@ v_cmpsx_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpsx_tru_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0xfe,0xd0,0x80,0x08,0x00,0x00] +v_cmpsx_tru_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0xfe,0xd0,0xc1,0x08,0x00,0x00] + v_cmpsx_tru_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0xfe,0xd0,0xf0,0x08,0x00,0x00] +v_cmpsx_tru_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0xfe,0xd0,0xf7,0x08,0x00,0x00] + v_cmpsx_tru_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0xfe,0xd0,0x01,0x09,0x00,0x00] @@ -59435,9 +62730,15 @@ v_cmpsx_tru_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpsx_tru_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x00,0x01,0x00] +v_cmpsx_tru_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x82,0x01,0x00] + v_cmpsx_tru_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0xe0,0x01,0x00] +v_cmpsx_tru_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0xee,0x01,0x00] + v_cmpsx_tru_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x04,0x02,0x00] diff --git a/test/MC/AMDGPU/gfx8_asm_all.s b/test/MC/AMDGPU/gfx8_asm_all.s index 0a0d42c208f91..458427e988c97 100644 --- a/test/MC/AMDGPU/gfx8_asm_all.s +++ b/test/MC/AMDGPU/gfx8_asm_all.s @@ -1,7 +1,5 @@ // RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s -// *** GENERATED BY TESTGEN, DO NOT EDIT! *** - ds_add_u32 v1, v2 offset:65535 // CHECK: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00] @@ -455,6 +453,9 @@ ds_max_f32 v1, v2 offset:4 ds_max_f32 v1, v2 offset:65535 gds // CHECK: [0xff,0xff,0x27,0xd8,0x01,0x02,0x00,0x00] +ds_nop +// CHECK: [0x00,0x00,0x28,0xd8,0x00,0x00,0x00,0x00] + ds_add_f32 v1, v2 offset:65535 // CHECK: [0xff,0xff,0x2a,0xd8,0x01,0x02,0x00,0x00] @@ -2678,89 +2679,89 @@ ds_max_src2_f64 v1 offset:4 ds_max_src2_f64 v1 offset:65535 gds // CHECK: [0xff,0xff,0xa7,0xd9,0x01,0x00,0x00,0x00] -ds_and_src2_b32 v1 -// CHECK: [0x00,0x00,0x12,0xd9,0x01,0x00,0x00,0x00] +ds_write_b96 v1, v[2:4] offset:65535 +// CHECK: [0xff,0xff,0xbc,0xd9,0x01,0x02,0x00,0x00] -ds_and_src2_b32 v1 gds -// CHECK: [0x00,0x00,0x13,0xd9,0x01,0x00,0x00,0x00] +ds_write_b96 v255, v[2:4] offset:65535 +// CHECK: [0xff,0xff,0xbc,0xd9,0xff,0x02,0x00,0x00] -ds_and_src2_b32 v255 offset:65535 -// CHECK: [0xff,0xff,0x12,0xd9,0xff,0x00,0x00,0x00] +ds_write_b96 v1, v[253:255] offset:65535 +// CHECK: [0xff,0xff,0xbc,0xd9,0x01,0xfd,0x00,0x00] -ds_append v5 -// CHECK: [0x00,0x00,0x7c,0xd9,0x00,0x00,0x00,0x05] +ds_write_b96 v1, v[2:4] +// CHECK: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x00,0x00] -ds_append v5 gds -// CHECK: [0x00,0x00,0x7d,0xd9,0x00,0x00,0x00,0x05] +ds_write_b96 v1, v[2:4] offset:0 +// CHECK: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x00,0x00] -ds_append v255 offset:65535 -// CHECK: [0xff,0xff,0x7c,0xd9,0x00,0x00,0x00,0xff] +ds_write_b96 v1, v[2:4] offset:4 +// CHECK: [0x04,0x00,0xbc,0xd9,0x01,0x02,0x00,0x00] -ds_consume v5 -// CHECK: [0x00,0x00,0x7a,0xd9,0x00,0x00,0x00,0x05] +ds_write_b96 v1, v[2:4] offset:65535 gds +// CHECK: [0xff,0xff,0xbd,0xd9,0x01,0x02,0x00,0x00] -ds_consume v5 gds -// CHECK: [0x00,0x00,0x7b,0xd9,0x00,0x00,0x00,0x05] +ds_write_b128 v1, v[2:5] offset:65535 +// CHECK: [0xff,0xff,0xbe,0xd9,0x01,0x02,0x00,0x00] -ds_consume v255 offset:65535 -// CHECK: [0xff,0xff,0x7a,0xd9,0x00,0x00,0x00,0xff] +ds_write_b128 v255, v[2:5] offset:65535 +// CHECK: [0xff,0xff,0xbe,0xd9,0xff,0x02,0x00,0x00] -ds_ordered_count v5, v1 gds -// CHECK: [0x00,0x00,0x7f,0xd9,0x01,0x00,0x00,0x05] +ds_write_b128 v1, v[252:255] offset:65535 +// CHECK: [0xff,0xff,0xbe,0xd9,0x01,0xfc,0x00,0x00] -ds_ordered_count v5, v255 offset:65535 gds -// CHECK: [0xff,0xff,0x7f,0xd9,0xff,0x00,0x00,0x05] +ds_write_b128 v1, v[2:5] +// CHECK: [0x00,0x00,0xbe,0xd9,0x01,0x02,0x00,0x00] -ds_ordered_count v5, v255 gds -// CHECK: [0x00,0x00,0x7f,0xd9,0xff,0x00,0x00,0x05] +ds_write_b128 v1, v[2:5] offset:0 +// CHECK: [0x00,0x00,0xbe,0xd9,0x01,0x02,0x00,0x00] -ds_gws_barrier v1 gds -// CHECK: [0x00,0x00,0x3b,0xd9,0x00,0x01,0x00,0x00] +ds_write_b128 v1, v[2:5] offset:4 +// CHECK: [0x04,0x00,0xbe,0xd9,0x01,0x02,0x00,0x00] -ds_gws_barrier v255 offset:65535 gds -// CHECK: [0xff,0xff,0x3b,0xd9,0x00,0xff,0x00,0x00] +ds_write_b128 v1, v[2:5] offset:65535 gds +// CHECK: [0xff,0xff,0xbf,0xd9,0x01,0x02,0x00,0x00] -ds_gws_init v1 gds -// CHECK: [0x00,0x00,0x33,0xd9,0x00,0x01,0x00,0x00] +ds_read_b96 v[5:7], v1 offset:65535 +// CHECK: [0xff,0xff,0xfc,0xd9,0x01,0x00,0x00,0x05] -ds_gws_init v255 offset:65535 gds -// CHECK: [0xff,0xff,0x33,0xd9,0x00,0xff,0x00,0x00] +ds_read_b96 v[253:255], v1 offset:65535 +// CHECK: [0xff,0xff,0xfc,0xd9,0x01,0x00,0x00,0xfd] -ds_gws_sema_br v1 gds -// CHECK: [0x00,0x00,0x37,0xd9,0x00,0x01,0x00,0x00] +ds_read_b96 v[5:7], v255 offset:65535 +// CHECK: [0xff,0xff,0xfc,0xd9,0xff,0x00,0x00,0x05] -ds_gws_sema_br v255 offset:65535 gds -// CHECK: [0xff,0xff,0x37,0xd9,0x00,0xff,0x00,0x00] +ds_read_b96 v[5:7], v1 +// CHECK: [0x00,0x00,0xfc,0xd9,0x01,0x00,0x00,0x05] -ds_gws_sema_p offset:65535 gds -// CHECK: [0xff,0xff,0x39,0xd9,0x00,0x00,0x00,0x00] +ds_read_b96 v[5:7], v1 offset:0 +// CHECK: [0x00,0x00,0xfc,0xd9,0x01,0x00,0x00,0x05] -ds_gws_sema_p gds -// CHECK: [0x00,0x00,0x39,0xd9,0x00,0x00,0x00,0x00] +ds_read_b96 v[5:7], v1 offset:4 +// CHECK: [0x04,0x00,0xfc,0xd9,0x01,0x00,0x00,0x05] -ds_gws_sema_release_all offset:65535 gds -// CHECK: [0xff,0xff,0x31,0xd9,0x00,0x00,0x00,0x00] +ds_read_b96 v[5:7], v1 offset:65535 gds +// CHECK: [0xff,0xff,0xfd,0xd9,0x01,0x00,0x00,0x05] -ds_gws_sema_release_all gds -// CHECK: [0x00,0x00,0x31,0xd9,0x00,0x00,0x00,0x00] +ds_read_b128 v[5:8], v1 offset:65535 +// CHECK: [0xff,0xff,0xfe,0xd9,0x01,0x00,0x00,0x05] -ds_gws_sema_v offset:65535 gds -// CHECK: [0xff,0xff,0x35,0xd9,0x00,0x00,0x00,0x00] +ds_read_b128 v[252:255], v1 offset:65535 +// CHECK: [0xff,0xff,0xfe,0xd9,0x01,0x00,0x00,0xfc] -ds_gws_sema_v gds -// CHECK: [0x00,0x00,0x35,0xd9,0x00,0x00,0x00,0x00] +ds_read_b128 v[5:8], v255 offset:65535 +// CHECK: [0xff,0xff,0xfe,0xd9,0xff,0x00,0x00,0x05] -ds_wrap_rtn_b32 v5, v255, v2, v3 gds -// CHECK: [0x00,0x00,0x69,0xd8,0xff,0x02,0x03,0x05] +ds_read_b128 v[5:8], v1 +// CHECK: [0x00,0x00,0xfe,0xd9,0x01,0x00,0x00,0x05] -ds_wrap_rtn_b32 v5, v255, v2, v255 offset:65535 -// CHECK: [0xff,0xff,0x68,0xd8,0xff,0x02,0xff,0x05] +ds_read_b128 v[5:8], v1 offset:0 +// CHECK: [0x00,0x00,0xfe,0xd9,0x01,0x00,0x00,0x05] -ds_condxchg32_rtn_b64 v[5:6], v1, v[254:255] offset:65535 gds -// CHECK: [0xff,0xff,0xfd,0xd8,0x01,0xfe,0x00,0x05] +ds_read_b128 v[5:8], v1 offset:4 +// CHECK: [0x04,0x00,0xfe,0xd9,0x01,0x00,0x00,0x05] -ds_condxchg32_rtn_b64 v[5:6], v1, v[254:255] -// CHECK: [0x00,0x00,0xfc,0xd8,0x01,0xfe,0x00,0x05] +ds_read_b128 v[5:8], v1 offset:65535 gds +// CHECK: [0xff,0xff,0xff,0xd9,0x01,0x00,0x00,0x05] exp mrt0, v0, v0, v0, v0 // CHECK: [0x0f,0x00,0x00,0xc4,0x00,0x00,0x00,0x00] @@ -23561,8 +23562,17 @@ v_cvt_i32_f64_e64 v5, ttmp[10:11] v_cvt_i32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x43,0xd1,0x7e,0x00,0x00,0x00] -v_cvt_i32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x43,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_i32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x43,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_i32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x43,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_i32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x43,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_i32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x43,0xd1,0xf7,0x00,0x00,0x00] v_cvt_i32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x43,0xd1,0x01,0x01,0x00,0x00] @@ -24089,8 +24099,17 @@ v_cvt_u32_f32_e64 v5, exec_lo v_cvt_u32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x47,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_u32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x47,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_u32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x47,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_u32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x47,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_u32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x47,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_u32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x47,0xd1,0xf7,0x00,0x00,0x00] v_cvt_u32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x47,0xd1,0x01,0x01,0x00,0x00] @@ -24221,8 +24240,17 @@ v_cvt_i32_f32_e64 v5, exec_lo v_cvt_i32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x48,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x48,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_i32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x48,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x48,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_i32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x48,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x48,0xd1,0xf7,0x00,0x00,0x00] v_cvt_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x48,0xd1,0x01,0x01,0x00,0x00] @@ -24353,8 +24381,17 @@ v_cvt_f16_f32_e64 v5, exec_lo v_cvt_f16_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x4a,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x4a,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x4a,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_f16_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x4a,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_f16_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x4a,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_f16_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4a,0xd1,0xf7,0x00,0x00,0x00] v_cvt_f16_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x4a,0xd1,0x01,0x01,0x00,0x00] @@ -24494,8 +24531,17 @@ v_cvt_f32_f16_e64 v5, exec_lo v_cvt_f32_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x4b,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_f32_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x4b,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_f32_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x4b,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_f32_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x4b,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_f32_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x4b,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_f32_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4b,0xd1,0xf7,0x00,0x00,0x00] v_cvt_f32_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x4b,0xd1,0x01,0x01,0x00,0x00] @@ -24635,8 +24681,17 @@ v_cvt_rpi_i32_f32_e64 v5, exec_lo v_cvt_rpi_i32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x4c,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_rpi_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x4c,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_rpi_i32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x4c,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_rpi_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x4c,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_rpi_i32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x4c,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_rpi_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4c,0xd1,0xf7,0x00,0x00,0x00] v_cvt_rpi_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x4c,0xd1,0x01,0x01,0x00,0x00] @@ -24767,8 +24822,17 @@ v_cvt_flr_i32_f32_e64 v5, exec_lo v_cvt_flr_i32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x4d,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_flr_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x4d,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_flr_i32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x4d,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_flr_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x4d,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_flr_i32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x4d,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_flr_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4d,0xd1,0xf7,0x00,0x00,0x00] v_cvt_flr_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x4d,0xd1,0x01,0x01,0x00,0x00] @@ -25001,8 +25065,17 @@ v_cvt_f32_f64_e64 v5, ttmp[10:11] v_cvt_f32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x4f,0xd1,0x7e,0x00,0x00,0x00] -v_cvt_f32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x4f,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_f32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x4f,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_f32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x4f,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_f32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x4f,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_f32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x4f,0xd1,0xf7,0x00,0x00,0x00] v_cvt_f32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x4f,0xd1,0x01,0x01,0x00,0x00] @@ -25142,8 +25215,17 @@ v_cvt_f64_f32_e64 v[5:6], exec_lo v_cvt_f64_f32_e64 v[5:6], exec_hi // CHECK: [0x05,0x00,0x50,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_f64_f32_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x50,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_f64_f32_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x50,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_f64_f32_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x50,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_f64_f32_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x50,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_f64_f32_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x50,0xd1,0xf7,0x00,0x00,0x00] v_cvt_f64_f32_e64 v[5:6], v1 // CHECK: [0x05,0x00,0x50,0xd1,0x01,0x01,0x00,0x00] @@ -25781,8 +25863,17 @@ v_cvt_u32_f64_e64 v5, ttmp[10:11] v_cvt_u32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x55,0xd1,0x7e,0x00,0x00,0x00] -v_cvt_u32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x55,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_u32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x55,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_u32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x55,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_u32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x55,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_u32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x55,0xd1,0xf7,0x00,0x00,0x00] v_cvt_u32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x55,0xd1,0x01,0x01,0x00,0x00] @@ -26015,8 +26106,17 @@ v_trunc_f64_e64 v[5:6], ttmp[10:11] v_trunc_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x57,0xd1,0x7e,0x00,0x00,0x00] -v_trunc_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x57,0xd1,0xfd,0x00,0x00,0x00] +v_trunc_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x57,0xd1,0x80,0x00,0x00,0x00] + +v_trunc_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x57,0xd1,0xc1,0x00,0x00,0x00] + +v_trunc_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x57,0xd1,0xf0,0x00,0x00,0x00] + +v_trunc_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x57,0xd1,0xf7,0x00,0x00,0x00] v_trunc_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x57,0xd1,0x01,0x01,0x00,0x00] @@ -26126,8 +26226,17 @@ v_ceil_f64_e64 v[5:6], ttmp[10:11] v_ceil_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x58,0xd1,0x7e,0x00,0x00,0x00] -v_ceil_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x58,0xd1,0xfd,0x00,0x00,0x00] +v_ceil_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x58,0xd1,0x80,0x00,0x00,0x00] + +v_ceil_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x58,0xd1,0xc1,0x00,0x00,0x00] + +v_ceil_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x58,0xd1,0xf0,0x00,0x00,0x00] + +v_ceil_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x58,0xd1,0xf7,0x00,0x00,0x00] v_ceil_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x58,0xd1,0x01,0x01,0x00,0x00] @@ -26240,11 +26349,14 @@ v_rndne_f64_e64 v[5:6], exec v_rndne_f64_e64 v[5:6], 0 // CHECK: [0x05,0x00,0x59,0xd1,0x80,0x00,0x00,0x00] +v_rndne_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x59,0xd1,0xc1,0x00,0x00,0x00] + v_rndne_f64_e64 v[5:6], 0.5 // CHECK: [0x05,0x00,0x59,0xd1,0xf0,0x00,0x00,0x00] -v_rndne_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x59,0xd1,0xfd,0x00,0x00,0x00] +v_rndne_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x59,0xd1,0xf7,0x00,0x00,0x00] v_rndne_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x59,0xd1,0x01,0x01,0x00,0x00] @@ -26354,11 +26466,14 @@ v_floor_f64_e64 v[5:6], exec v_floor_f64_e64 v[5:6], 0 // CHECK: [0x05,0x00,0x5a,0xd1,0x80,0x00,0x00,0x00] +v_floor_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x5a,0xd1,0xc1,0x00,0x00,0x00] + v_floor_f64_e64 v[5:6], 0.5 // CHECK: [0x05,0x00,0x5a,0xd1,0xf0,0x00,0x00,0x00] -v_floor_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x5a,0xd1,0xfd,0x00,0x00,0x00] +v_floor_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x5a,0xd1,0xf7,0x00,0x00,0x00] v_floor_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x5a,0xd1,0x01,0x01,0x00,0x00] @@ -26498,11 +26613,14 @@ v_fract_f32_e64 v5, exec_hi v_fract_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x5b,0xd1,0x80,0x00,0x00,0x00] +v_fract_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x5b,0xd1,0xc1,0x00,0x00,0x00] + v_fract_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x5b,0xd1,0xf0,0x00,0x00,0x00] -v_fract_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x5b,0xd1,0xfd,0x00,0x00,0x00] +v_fract_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x5b,0xd1,0xf7,0x00,0x00,0x00] v_fract_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x5b,0xd1,0x01,0x01,0x00,0x00] @@ -26642,11 +26760,14 @@ v_trunc_f32_e64 v5, exec_hi v_trunc_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x5c,0xd1,0x80,0x00,0x00,0x00] +v_trunc_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x5c,0xd1,0xc1,0x00,0x00,0x00] + v_trunc_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x5c,0xd1,0xf0,0x00,0x00,0x00] -v_trunc_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x5c,0xd1,0xfd,0x00,0x00,0x00] +v_trunc_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x5c,0xd1,0xf7,0x00,0x00,0x00] v_trunc_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x5c,0xd1,0x01,0x01,0x00,0x00] @@ -26786,11 +26907,14 @@ v_ceil_f32_e64 v5, exec_hi v_ceil_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x5d,0xd1,0x80,0x00,0x00,0x00] +v_ceil_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x5d,0xd1,0xc1,0x00,0x00,0x00] + v_ceil_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x5d,0xd1,0xf0,0x00,0x00,0x00] -v_ceil_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x5d,0xd1,0xfd,0x00,0x00,0x00] +v_ceil_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x5d,0xd1,0xf7,0x00,0x00,0x00] v_ceil_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x5d,0xd1,0x01,0x01,0x00,0x00] @@ -26930,11 +27054,14 @@ v_rndne_f32_e64 v5, exec_hi v_rndne_f32_e64 v5, 0 // CHECK: [0x05,0x00,0x5e,0xd1,0x80,0x00,0x00,0x00] +v_rndne_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x5e,0xd1,0xc1,0x00,0x00,0x00] + v_rndne_f32_e64 v5, 0.5 // CHECK: [0x05,0x00,0x5e,0xd1,0xf0,0x00,0x00,0x00] -v_rndne_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x5e,0xd1,0xfd,0x00,0x00,0x00] +v_rndne_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x5e,0xd1,0xf7,0x00,0x00,0x00] v_rndne_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x5e,0xd1,0x01,0x01,0x00,0x00] @@ -27071,8 +27198,17 @@ v_floor_f32_e64 v5, exec_lo v_floor_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x5f,0xd1,0x7f,0x00,0x00,0x00] -v_floor_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x5f,0xd1,0xfd,0x00,0x00,0x00] +v_floor_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x5f,0xd1,0x80,0x00,0x00,0x00] + +v_floor_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x5f,0xd1,0xc1,0x00,0x00,0x00] + +v_floor_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x5f,0xd1,0xf0,0x00,0x00,0x00] + +v_floor_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x5f,0xd1,0xf7,0x00,0x00,0x00] v_floor_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x5f,0xd1,0x01,0x01,0x00,0x00] @@ -27212,8 +27348,17 @@ v_exp_f32_e64 v5, exec_lo v_exp_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x60,0xd1,0x7f,0x00,0x00,0x00] -v_exp_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x60,0xd1,0xfd,0x00,0x00,0x00] +v_exp_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x60,0xd1,0x80,0x00,0x00,0x00] + +v_exp_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x60,0xd1,0xc1,0x00,0x00,0x00] + +v_exp_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x60,0xd1,0xf0,0x00,0x00,0x00] + +v_exp_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x60,0xd1,0xf7,0x00,0x00,0x00] v_exp_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x60,0xd1,0x01,0x01,0x00,0x00] @@ -27353,8 +27498,17 @@ v_log_f32_e64 v5, exec_lo v_log_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x61,0xd1,0x7f,0x00,0x00,0x00] -v_log_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x61,0xd1,0xfd,0x00,0x00,0x00] +v_log_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x61,0xd1,0x80,0x00,0x00,0x00] + +v_log_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x61,0xd1,0xc1,0x00,0x00,0x00] + +v_log_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x61,0xd1,0xf0,0x00,0x00,0x00] + +v_log_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x61,0xd1,0xf7,0x00,0x00,0x00] v_log_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x61,0xd1,0x01,0x01,0x00,0x00] @@ -27494,8 +27648,17 @@ v_rcp_f32_e64 v5, exec_lo v_rcp_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x62,0xd1,0x7f,0x00,0x00,0x00] -v_rcp_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x62,0xd1,0xfd,0x00,0x00,0x00] +v_rcp_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x62,0xd1,0x80,0x00,0x00,0x00] + +v_rcp_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x62,0xd1,0xc1,0x00,0x00,0x00] + +v_rcp_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x62,0xd1,0xf0,0x00,0x00,0x00] + +v_rcp_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x62,0xd1,0xf7,0x00,0x00,0x00] v_rcp_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x62,0xd1,0x01,0x01,0x00,0x00] @@ -27635,8 +27798,17 @@ v_rcp_iflag_f32_e64 v5, exec_lo v_rcp_iflag_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x63,0xd1,0x7f,0x00,0x00,0x00] -v_rcp_iflag_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x63,0xd1,0xfd,0x00,0x00,0x00] +v_rcp_iflag_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x63,0xd1,0x80,0x00,0x00,0x00] + +v_rcp_iflag_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x63,0xd1,0xc1,0x00,0x00,0x00] + +v_rcp_iflag_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x63,0xd1,0xf0,0x00,0x00,0x00] + +v_rcp_iflag_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x63,0xd1,0xf7,0x00,0x00,0x00] v_rcp_iflag_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x63,0xd1,0x01,0x01,0x00,0x00] @@ -27776,8 +27948,17 @@ v_rsq_f32_e64 v5, exec_lo v_rsq_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x64,0xd1,0x7f,0x00,0x00,0x00] -v_rsq_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x64,0xd1,0xfd,0x00,0x00,0x00] +v_rsq_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x64,0xd1,0x80,0x00,0x00,0x00] + +v_rsq_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x64,0xd1,0xc1,0x00,0x00,0x00] + +v_rsq_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x64,0xd1,0xf0,0x00,0x00,0x00] + +v_rsq_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x64,0xd1,0xf7,0x00,0x00,0x00] v_rsq_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x64,0xd1,0x01,0x01,0x00,0x00] @@ -27887,8 +28068,17 @@ v_rcp_f64_e64 v[5:6], ttmp[10:11] v_rcp_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x65,0xd1,0x7e,0x00,0x00,0x00] -v_rcp_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x65,0xd1,0xfd,0x00,0x00,0x00] +v_rcp_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x65,0xd1,0x80,0x00,0x00,0x00] + +v_rcp_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x65,0xd1,0xc1,0x00,0x00,0x00] + +v_rcp_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x65,0xd1,0xf0,0x00,0x00,0x00] + +v_rcp_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x65,0xd1,0xf7,0x00,0x00,0x00] v_rcp_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x65,0xd1,0x01,0x01,0x00,0x00] @@ -27998,8 +28188,17 @@ v_rsq_f64_e64 v[5:6], ttmp[10:11] v_rsq_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x66,0xd1,0x7e,0x00,0x00,0x00] -v_rsq_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x66,0xd1,0xfd,0x00,0x00,0x00] +v_rsq_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x66,0xd1,0x80,0x00,0x00,0x00] + +v_rsq_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x66,0xd1,0xc1,0x00,0x00,0x00] + +v_rsq_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x66,0xd1,0xf0,0x00,0x00,0x00] + +v_rsq_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x66,0xd1,0xf7,0x00,0x00,0x00] v_rsq_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x66,0xd1,0x01,0x01,0x00,0x00] @@ -28139,8 +28338,17 @@ v_sqrt_f32_e64 v5, exec_lo v_sqrt_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x67,0xd1,0x7f,0x00,0x00,0x00] -v_sqrt_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x67,0xd1,0xfd,0x00,0x00,0x00] +v_sqrt_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x67,0xd1,0x80,0x00,0x00,0x00] + +v_sqrt_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x67,0xd1,0xc1,0x00,0x00,0x00] + +v_sqrt_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x67,0xd1,0xf0,0x00,0x00,0x00] + +v_sqrt_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x67,0xd1,0xf7,0x00,0x00,0x00] v_sqrt_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x67,0xd1,0x01,0x01,0x00,0x00] @@ -28250,8 +28458,17 @@ v_sqrt_f64_e64 v[5:6], ttmp[10:11] v_sqrt_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x68,0xd1,0x7e,0x00,0x00,0x00] -v_sqrt_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x68,0xd1,0xfd,0x00,0x00,0x00] +v_sqrt_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x68,0xd1,0x80,0x00,0x00,0x00] + +v_sqrt_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x68,0xd1,0xc1,0x00,0x00,0x00] + +v_sqrt_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x68,0xd1,0xf0,0x00,0x00,0x00] + +v_sqrt_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x68,0xd1,0xf7,0x00,0x00,0x00] v_sqrt_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x68,0xd1,0x01,0x01,0x00,0x00] @@ -28391,8 +28608,17 @@ v_sin_f32_e64 v5, exec_lo v_sin_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x69,0xd1,0x7f,0x00,0x00,0x00] -v_sin_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x69,0xd1,0xfd,0x00,0x00,0x00] +v_sin_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x69,0xd1,0x80,0x00,0x00,0x00] + +v_sin_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x69,0xd1,0xc1,0x00,0x00,0x00] + +v_sin_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x69,0xd1,0xf0,0x00,0x00,0x00] + +v_sin_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x69,0xd1,0xf7,0x00,0x00,0x00] v_sin_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x69,0xd1,0x01,0x01,0x00,0x00] @@ -28532,8 +28758,17 @@ v_cos_f32_e64 v5, exec_lo v_cos_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x6a,0xd1,0x7f,0x00,0x00,0x00] -v_cos_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x6a,0xd1,0xfd,0x00,0x00,0x00] +v_cos_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x6a,0xd1,0x80,0x00,0x00,0x00] + +v_cos_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x6a,0xd1,0xc1,0x00,0x00,0x00] + +v_cos_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x6a,0xd1,0xf0,0x00,0x00,0x00] + +v_cos_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x6a,0xd1,0xf7,0x00,0x00,0x00] v_cos_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x6a,0xd1,0x01,0x01,0x00,0x00] @@ -29303,8 +29538,17 @@ v_frexp_exp_i32_f64_e64 v5, ttmp[10:11] v_frexp_exp_i32_f64_e64 v5, exec // CHECK: [0x05,0x00,0x70,0xd1,0x7e,0x00,0x00,0x00] -v_frexp_exp_i32_f64_e64 v5, scc -// CHECK: [0x05,0x00,0x70,0xd1,0xfd,0x00,0x00,0x00] +v_frexp_exp_i32_f64_e64 v5, 0 +// CHECK: [0x05,0x00,0x70,0xd1,0x80,0x00,0x00,0x00] + +v_frexp_exp_i32_f64_e64 v5, -1 +// CHECK: [0x05,0x00,0x70,0xd1,0xc1,0x00,0x00,0x00] + +v_frexp_exp_i32_f64_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x70,0xd1,0xf0,0x00,0x00,0x00] + +v_frexp_exp_i32_f64_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x70,0xd1,0xf7,0x00,0x00,0x00] v_frexp_exp_i32_f64_e64 v5, v[1:2] // CHECK: [0x05,0x00,0x70,0xd1,0x01,0x01,0x00,0x00] @@ -29405,8 +29649,17 @@ v_frexp_mant_f64_e64 v[5:6], ttmp[10:11] v_frexp_mant_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x71,0xd1,0x7e,0x00,0x00,0x00] -v_frexp_mant_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x71,0xd1,0xfd,0x00,0x00,0x00] +v_frexp_mant_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x71,0xd1,0x80,0x00,0x00,0x00] + +v_frexp_mant_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x71,0xd1,0xc1,0x00,0x00,0x00] + +v_frexp_mant_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x71,0xd1,0xf0,0x00,0x00,0x00] + +v_frexp_mant_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x71,0xd1,0xf7,0x00,0x00,0x00] v_frexp_mant_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x71,0xd1,0x01,0x01,0x00,0x00] @@ -29516,8 +29769,17 @@ v_fract_f64_e64 v[5:6], ttmp[10:11] v_fract_f64_e64 v[5:6], exec // CHECK: [0x05,0x00,0x72,0xd1,0x7e,0x00,0x00,0x00] -v_fract_f64_e64 v[5:6], scc -// CHECK: [0x05,0x00,0x72,0xd1,0xfd,0x00,0x00,0x00] +v_fract_f64_e64 v[5:6], 0 +// CHECK: [0x05,0x00,0x72,0xd1,0x80,0x00,0x00,0x00] + +v_fract_f64_e64 v[5:6], -1 +// CHECK: [0x05,0x00,0x72,0xd1,0xc1,0x00,0x00,0x00] + +v_fract_f64_e64 v[5:6], 0.5 +// CHECK: [0x05,0x00,0x72,0xd1,0xf0,0x00,0x00,0x00] + +v_fract_f64_e64 v[5:6], -4.0 +// CHECK: [0x05,0x00,0x72,0xd1,0xf7,0x00,0x00,0x00] v_fract_f64_e64 v[5:6], v[1:2] // CHECK: [0x05,0x00,0x72,0xd1,0x01,0x01,0x00,0x00] @@ -29657,8 +29919,17 @@ v_frexp_exp_i32_f32_e64 v5, exec_lo v_frexp_exp_i32_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x73,0xd1,0x7f,0x00,0x00,0x00] -v_frexp_exp_i32_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x73,0xd1,0xfd,0x00,0x00,0x00] +v_frexp_exp_i32_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x73,0xd1,0x80,0x00,0x00,0x00] + +v_frexp_exp_i32_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x73,0xd1,0xc1,0x00,0x00,0x00] + +v_frexp_exp_i32_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x73,0xd1,0xf0,0x00,0x00,0x00] + +v_frexp_exp_i32_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x73,0xd1,0xf7,0x00,0x00,0x00] v_frexp_exp_i32_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x73,0xd1,0x01,0x01,0x00,0x00] @@ -29789,8 +30060,17 @@ v_frexp_mant_f32_e64 v5, exec_lo v_frexp_mant_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x74,0xd1,0x7f,0x00,0x00,0x00] -v_frexp_mant_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x74,0xd1,0xfd,0x00,0x00,0x00] +v_frexp_mant_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x74,0xd1,0x80,0x00,0x00,0x00] + +v_frexp_mant_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x74,0xd1,0xc1,0x00,0x00,0x00] + +v_frexp_mant_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x74,0xd1,0xf0,0x00,0x00,0x00] + +v_frexp_mant_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x74,0xd1,0xf7,0x00,0x00,0x00] v_frexp_mant_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x74,0xd1,0x01,0x01,0x00,0x00] @@ -30284,8 +30564,17 @@ v_cvt_u16_f16_e64 v5, exec_lo v_cvt_u16_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x7b,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x7b,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x7b,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_u16_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x7b,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_u16_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x7b,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_u16_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x7b,0xd1,0xf7,0x00,0x00,0x00] v_cvt_u16_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x7b,0xd1,0x01,0x01,0x00,0x00] @@ -30416,8 +30705,17 @@ v_cvt_i16_f16_e64 v5, exec_lo v_cvt_i16_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x7c,0xd1,0x7f,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x7c,0xd1,0xfd,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x7c,0xd1,0x80,0x00,0x00,0x00] + +v_cvt_i16_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x7c,0xd1,0xc1,0x00,0x00,0x00] + +v_cvt_i16_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x7c,0xd1,0xf0,0x00,0x00,0x00] + +v_cvt_i16_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x7c,0xd1,0xf7,0x00,0x00,0x00] v_cvt_i16_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x7c,0xd1,0x01,0x01,0x00,0x00] @@ -30548,8 +30846,17 @@ v_rcp_f16_e64 v5, exec_lo v_rcp_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x7d,0xd1,0x7f,0x00,0x00,0x00] -v_rcp_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x7d,0xd1,0xfd,0x00,0x00,0x00] +v_rcp_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x7d,0xd1,0x80,0x00,0x00,0x00] + +v_rcp_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x7d,0xd1,0xc1,0x00,0x00,0x00] + +v_rcp_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x7d,0xd1,0xf0,0x00,0x00,0x00] + +v_rcp_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x7d,0xd1,0xf7,0x00,0x00,0x00] v_rcp_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x7d,0xd1,0x01,0x01,0x00,0x00] @@ -30680,8 +30987,17 @@ v_sqrt_f16_e64 v5, exec_lo v_sqrt_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x7e,0xd1,0x7f,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x7e,0xd1,0xfd,0x00,0x00,0x00] +v_sqrt_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x7e,0xd1,0x80,0x00,0x00,0x00] + +v_sqrt_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x7e,0xd1,0xc1,0x00,0x00,0x00] + +v_sqrt_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x7e,0xd1,0xf0,0x00,0x00,0x00] + +v_sqrt_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x7e,0xd1,0xf7,0x00,0x00,0x00] v_sqrt_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x7e,0xd1,0x01,0x01,0x00,0x00] @@ -30812,8 +31128,17 @@ v_rsq_f16_e64 v5, exec_lo v_rsq_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x7f,0xd1,0x7f,0x00,0x00,0x00] -v_rsq_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x7f,0xd1,0xfd,0x00,0x00,0x00] +v_rsq_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x7f,0xd1,0x80,0x00,0x00,0x00] + +v_rsq_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x7f,0xd1,0xc1,0x00,0x00,0x00] + +v_rsq_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x7f,0xd1,0xf0,0x00,0x00,0x00] + +v_rsq_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x7f,0xd1,0xf7,0x00,0x00,0x00] v_rsq_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x7f,0xd1,0x01,0x01,0x00,0x00] @@ -30944,8 +31269,17 @@ v_log_f16_e64 v5, exec_lo v_log_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x80,0xd1,0x7f,0x00,0x00,0x00] -v_log_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x80,0xd1,0xfd,0x00,0x00,0x00] +v_log_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x80,0xd1,0x80,0x00,0x00,0x00] + +v_log_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x80,0xd1,0xc1,0x00,0x00,0x00] + +v_log_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x80,0xd1,0xf0,0x00,0x00,0x00] + +v_log_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x80,0xd1,0xf7,0x00,0x00,0x00] v_log_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x80,0xd1,0x01,0x01,0x00,0x00] @@ -31076,8 +31410,17 @@ v_exp_f16_e64 v5, exec_lo v_exp_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x81,0xd1,0x7f,0x00,0x00,0x00] -v_exp_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x81,0xd1,0xfd,0x00,0x00,0x00] +v_exp_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x81,0xd1,0x80,0x00,0x00,0x00] + +v_exp_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x81,0xd1,0xc1,0x00,0x00,0x00] + +v_exp_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x81,0xd1,0xf0,0x00,0x00,0x00] + +v_exp_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x81,0xd1,0xf7,0x00,0x00,0x00] v_exp_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x81,0xd1,0x01,0x01,0x00,0x00] @@ -31208,8 +31551,17 @@ v_frexp_mant_f16_e64 v5, exec_lo v_frexp_mant_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x82,0xd1,0x7f,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x82,0xd1,0xfd,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x82,0xd1,0x80,0x00,0x00,0x00] + +v_frexp_mant_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x82,0xd1,0xc1,0x00,0x00,0x00] + +v_frexp_mant_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x82,0xd1,0xf0,0x00,0x00,0x00] + +v_frexp_mant_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x82,0xd1,0xf7,0x00,0x00,0x00] v_frexp_mant_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x82,0xd1,0x01,0x01,0x00,0x00] @@ -31340,8 +31692,17 @@ v_frexp_exp_i16_f16_e64 v5, exec_lo v_frexp_exp_i16_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x83,0xd1,0x7f,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x83,0xd1,0xfd,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x83,0xd1,0x80,0x00,0x00,0x00] + +v_frexp_exp_i16_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x83,0xd1,0xc1,0x00,0x00,0x00] + +v_frexp_exp_i16_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x83,0xd1,0xf0,0x00,0x00,0x00] + +v_frexp_exp_i16_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x83,0xd1,0xf7,0x00,0x00,0x00] v_frexp_exp_i16_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x83,0xd1,0x01,0x01,0x00,0x00] @@ -31472,8 +31833,17 @@ v_floor_f16_e64 v5, exec_lo v_floor_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x84,0xd1,0x7f,0x00,0x00,0x00] -v_floor_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x84,0xd1,0xfd,0x00,0x00,0x00] +v_floor_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x84,0xd1,0x80,0x00,0x00,0x00] + +v_floor_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x84,0xd1,0xc1,0x00,0x00,0x00] + +v_floor_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x84,0xd1,0xf0,0x00,0x00,0x00] + +v_floor_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x84,0xd1,0xf7,0x00,0x00,0x00] v_floor_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x84,0xd1,0x01,0x01,0x00,0x00] @@ -31604,8 +31974,17 @@ v_ceil_f16_e64 v5, exec_lo v_ceil_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x85,0xd1,0x7f,0x00,0x00,0x00] -v_ceil_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x85,0xd1,0xfd,0x00,0x00,0x00] +v_ceil_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x85,0xd1,0x80,0x00,0x00,0x00] + +v_ceil_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x85,0xd1,0xc1,0x00,0x00,0x00] + +v_ceil_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x85,0xd1,0xf0,0x00,0x00,0x00] + +v_ceil_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x85,0xd1,0xf7,0x00,0x00,0x00] v_ceil_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x85,0xd1,0x01,0x01,0x00,0x00] @@ -31736,8 +32115,17 @@ v_trunc_f16_e64 v5, exec_lo v_trunc_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x86,0xd1,0x7f,0x00,0x00,0x00] -v_trunc_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x86,0xd1,0xfd,0x00,0x00,0x00] +v_trunc_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x86,0xd1,0x80,0x00,0x00,0x00] + +v_trunc_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x86,0xd1,0xc1,0x00,0x00,0x00] + +v_trunc_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x86,0xd1,0xf0,0x00,0x00,0x00] + +v_trunc_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x86,0xd1,0xf7,0x00,0x00,0x00] v_trunc_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x86,0xd1,0x01,0x01,0x00,0x00] @@ -31868,8 +32256,17 @@ v_rndne_f16_e64 v5, exec_lo v_rndne_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x87,0xd1,0x7f,0x00,0x00,0x00] -v_rndne_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x87,0xd1,0xfd,0x00,0x00,0x00] +v_rndne_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x87,0xd1,0x80,0x00,0x00,0x00] + +v_rndne_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x87,0xd1,0xc1,0x00,0x00,0x00] + +v_rndne_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x87,0xd1,0xf0,0x00,0x00,0x00] + +v_rndne_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x87,0xd1,0xf7,0x00,0x00,0x00] v_rndne_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x87,0xd1,0x01,0x01,0x00,0x00] @@ -32000,8 +32397,17 @@ v_fract_f16_e64 v5, exec_lo v_fract_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x88,0xd1,0x7f,0x00,0x00,0x00] -v_fract_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x88,0xd1,0xfd,0x00,0x00,0x00] +v_fract_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x88,0xd1,0x80,0x00,0x00,0x00] + +v_fract_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x88,0xd1,0xc1,0x00,0x00,0x00] + +v_fract_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x88,0xd1,0xf0,0x00,0x00,0x00] + +v_fract_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x88,0xd1,0xf7,0x00,0x00,0x00] v_fract_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x88,0xd1,0x01,0x01,0x00,0x00] @@ -32132,8 +32538,17 @@ v_sin_f16_e64 v5, exec_lo v_sin_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x89,0xd1,0x7f,0x00,0x00,0x00] -v_sin_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x89,0xd1,0xfd,0x00,0x00,0x00] +v_sin_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x89,0xd1,0x80,0x00,0x00,0x00] + +v_sin_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x89,0xd1,0xc1,0x00,0x00,0x00] + +v_sin_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x89,0xd1,0xf0,0x00,0x00,0x00] + +v_sin_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x89,0xd1,0xf7,0x00,0x00,0x00] v_sin_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x89,0xd1,0x01,0x01,0x00,0x00] @@ -32264,8 +32679,17 @@ v_cos_f16_e64 v5, exec_lo v_cos_f16_e64 v5, exec_hi // CHECK: [0x05,0x00,0x8a,0xd1,0x7f,0x00,0x00,0x00] -v_cos_f16_e64 v5, scc -// CHECK: [0x05,0x00,0x8a,0xd1,0xfd,0x00,0x00,0x00] +v_cos_f16_e64 v5, 0 +// CHECK: [0x05,0x00,0x8a,0xd1,0x80,0x00,0x00,0x00] + +v_cos_f16_e64 v5, -1 +// CHECK: [0x05,0x00,0x8a,0xd1,0xc1,0x00,0x00,0x00] + +v_cos_f16_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x8a,0xd1,0xf0,0x00,0x00,0x00] + +v_cos_f16_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x8a,0xd1,0xf7,0x00,0x00,0x00] v_cos_f16_e64 v5, v1 // CHECK: [0x05,0x00,0x8a,0xd1,0x01,0x01,0x00,0x00] @@ -32396,8 +32820,17 @@ v_exp_legacy_f32_e64 v5, exec_lo v_exp_legacy_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x8b,0xd1,0x7f,0x00,0x00,0x00] -v_exp_legacy_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x8b,0xd1,0xfd,0x00,0x00,0x00] +v_exp_legacy_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x8b,0xd1,0x80,0x00,0x00,0x00] + +v_exp_legacy_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x8b,0xd1,0xc1,0x00,0x00,0x00] + +v_exp_legacy_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x8b,0xd1,0xf0,0x00,0x00,0x00] + +v_exp_legacy_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x8b,0xd1,0xf7,0x00,0x00,0x00] v_exp_legacy_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x8b,0xd1,0x01,0x01,0x00,0x00] @@ -32537,8 +32970,17 @@ v_log_legacy_f32_e64 v5, exec_lo v_log_legacy_f32_e64 v5, exec_hi // CHECK: [0x05,0x00,0x8c,0xd1,0x7f,0x00,0x00,0x00] -v_log_legacy_f32_e64 v5, scc -// CHECK: [0x05,0x00,0x8c,0xd1,0xfd,0x00,0x00,0x00] +v_log_legacy_f32_e64 v5, 0 +// CHECK: [0x05,0x00,0x8c,0xd1,0x80,0x00,0x00,0x00] + +v_log_legacy_f32_e64 v5, -1 +// CHECK: [0x05,0x00,0x8c,0xd1,0xc1,0x00,0x00,0x00] + +v_log_legacy_f32_e64 v5, 0.5 +// CHECK: [0x05,0x00,0x8c,0xd1,0xf0,0x00,0x00,0x00] + +v_log_legacy_f32_e64 v5, -4.0 +// CHECK: [0x05,0x00,0x8c,0xd1,0xf7,0x00,0x00,0x00] v_log_legacy_f32_e64 v5, v1 // CHECK: [0x05,0x00,0x8c,0xd1,0x01,0x01,0x00,0x00] @@ -32717,92 +33159,113 @@ v_add_f32 v5, v255, v2 v_add_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x02] +v_add_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x00,0x00] + +v_add_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x01,0xd1,0x80,0x04,0x00,0x00] + +v_add_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x01,0xd1,0xc1,0x04,0x00,0x00] + +v_add_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x01,0xd1,0xf0,0x04,0x00,0x00] + +v_add_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x01,0xd1,0xf7,0x04,0x00,0x00] + v_add_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x00] -v_add_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x01,0xd1,0x01,0x05,0x00,0x00] - v_add_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x01,0xd1,0xff,0x05,0x00,0x00] -v_add_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xcb,0x00,0x00] +v_add_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xca,0x00,0x00] + +v_add_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xcc,0x00,0x00] -v_add_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xcd,0x00,0x00] +v_add_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xce,0x00,0x00] -v_add_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xcf,0x00,0x00] +v_add_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xd4,0x00,0x00] -v_add_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xd5,0x00,0x00] +v_add_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xd6,0x00,0x00] -v_add_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xd7,0x00,0x00] +v_add_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xd8,0x00,0x00] -v_add_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xd9,0x00,0x00] +v_add_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xda,0x00,0x00] -v_add_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xdb,0x00,0x00] +v_add_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xdc,0x00,0x00] -v_add_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xdd,0x00,0x00] +v_add_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xde,0x00,0x00] -v_add_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xdf,0x00,0x00] +v_add_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xf6,0x00,0x00] -v_add_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xf7,0x00,0x00] +v_add_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xf8,0x00,0x00] -v_add_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xf9,0x00,0x00] +v_add_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xfc,0x00,0x00] -v_add_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xfd,0x00,0x00] +v_add_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xfe,0x00,0x00] -v_add_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xff,0x00,0x00] +v_add_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x00,0x01,0x00] -v_add_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xfb,0x01,0x00] +v_add_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x82,0x01,0x00] -v_add_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x02,0x00] +v_add_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xe0,0x01,0x00] -v_add_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xff,0x03,0x00] +v_add_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xee,0x01,0x00] -v_add_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x20] +v_add_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x02,0x00] -v_add_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x40] +v_add_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0xfe,0x03,0x00] -v_add_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x60] +v_add_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x00,0x20] -v_add_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x01,0xd1,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x00,0x40] -v_add_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x01,0xd1,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x00,0x60] -v_add_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x01,0xd1,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x01,0xd1,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x01,0xd1,0x01,0x05,0x00,0x00] +v_add_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x01,0xd1,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x08] +v_add_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x01,0xd1,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x10] +v_add_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x01,0xd1,0x80,0x04,0x00,0x00] -v_add_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x18] +v_add_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x00,0x08] + +v_add_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x00,0x10] + +v_add_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x01,0xd1,0x80,0x04,0x00,0x18] v_sub_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x04] @@ -32876,92 +33339,113 @@ v_sub_f32 v5, v255, v2 v_sub_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x04] +v_sub_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x00,0x00] + +v_sub_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x02,0xd1,0x80,0x04,0x00,0x00] + +v_sub_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x02,0xd1,0xc1,0x04,0x00,0x00] + +v_sub_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x02,0xd1,0xf0,0x04,0x00,0x00] + +v_sub_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x02,0xd1,0xf7,0x04,0x00,0x00] + v_sub_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x00] -v_sub_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x02,0xd1,0x01,0x05,0x00,0x00] - v_sub_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x02,0xd1,0xff,0x05,0x00,0x00] -v_sub_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xcb,0x00,0x00] +v_sub_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xca,0x00,0x00] + +v_sub_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xcc,0x00,0x00] -v_sub_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xcd,0x00,0x00] +v_sub_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xce,0x00,0x00] -v_sub_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xcf,0x00,0x00] +v_sub_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xd4,0x00,0x00] -v_sub_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xd5,0x00,0x00] +v_sub_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xd6,0x00,0x00] -v_sub_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xd7,0x00,0x00] +v_sub_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xd8,0x00,0x00] -v_sub_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xd9,0x00,0x00] +v_sub_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xda,0x00,0x00] -v_sub_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xdb,0x00,0x00] +v_sub_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xdc,0x00,0x00] -v_sub_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xdd,0x00,0x00] +v_sub_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xde,0x00,0x00] -v_sub_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xdf,0x00,0x00] +v_sub_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xf6,0x00,0x00] -v_sub_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xf7,0x00,0x00] +v_sub_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xf8,0x00,0x00] -v_sub_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xf9,0x00,0x00] +v_sub_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xfc,0x00,0x00] -v_sub_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xfd,0x00,0x00] +v_sub_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xfe,0x00,0x00] -v_sub_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xff,0x00,0x00] +v_sub_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x00,0x01,0x00] -v_sub_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xfb,0x01,0x00] +v_sub_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x82,0x01,0x00] -v_sub_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x02,0x00] +v_sub_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xe0,0x01,0x00] -v_sub_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xff,0x03,0x00] +v_sub_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xee,0x01,0x00] -v_sub_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x20] +v_sub_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x02,0x00] -v_sub_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x40] +v_sub_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0xfe,0x03,0x00] -v_sub_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x60] +v_sub_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x00,0x20] -v_sub_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x02,0xd1,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x00,0x40] -v_sub_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x02,0xd1,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x00,0x60] -v_sub_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x02,0xd1,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x02,0xd1,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x02,0xd1,0x01,0x05,0x00,0x00] +v_sub_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x02,0xd1,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x08] +v_sub_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x02,0xd1,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x10] +v_sub_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x02,0xd1,0x80,0x04,0x00,0x00] -v_sub_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x18] +v_sub_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x00,0x08] + +v_sub_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x00,0x10] + +v_sub_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x02,0xd1,0x80,0x04,0x00,0x18] v_subrev_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x06] @@ -33035,92 +33519,113 @@ v_subrev_f32 v5, v255, v2 v_subrev_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x06] +v_subrev_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x00,0x00] + +v_subrev_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x03,0xd1,0x80,0x04,0x00,0x00] + +v_subrev_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x03,0xd1,0xc1,0x04,0x00,0x00] + +v_subrev_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x03,0xd1,0xf0,0x04,0x00,0x00] + +v_subrev_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x03,0xd1,0xf7,0x04,0x00,0x00] + v_subrev_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x00] -v_subrev_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x03,0xd1,0x01,0x05,0x00,0x00] - v_subrev_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x03,0xd1,0xff,0x05,0x00,0x00] -v_subrev_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xcb,0x00,0x00] +v_subrev_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xca,0x00,0x00] -v_subrev_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xcd,0x00,0x00] +v_subrev_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xcc,0x00,0x00] -v_subrev_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xcf,0x00,0x00] +v_subrev_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xce,0x00,0x00] -v_subrev_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xd5,0x00,0x00] +v_subrev_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xd4,0x00,0x00] -v_subrev_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xd7,0x00,0x00] +v_subrev_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xd6,0x00,0x00] -v_subrev_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xd9,0x00,0x00] +v_subrev_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xd8,0x00,0x00] -v_subrev_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xdb,0x00,0x00] +v_subrev_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xda,0x00,0x00] -v_subrev_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xdd,0x00,0x00] +v_subrev_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xdc,0x00,0x00] -v_subrev_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xdf,0x00,0x00] +v_subrev_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xde,0x00,0x00] -v_subrev_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xf7,0x00,0x00] +v_subrev_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xf6,0x00,0x00] -v_subrev_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xf9,0x00,0x00] +v_subrev_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xf8,0x00,0x00] -v_subrev_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xfd,0x00,0x00] +v_subrev_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xfc,0x00,0x00] -v_subrev_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xff,0x00,0x00] +v_subrev_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xfe,0x00,0x00] -v_subrev_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xfb,0x01,0x00] +v_subrev_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x00,0x01,0x00] -v_subrev_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x02,0x00] +v_subrev_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x82,0x01,0x00] -v_subrev_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xff,0x03,0x00] +v_subrev_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xe0,0x01,0x00] -v_subrev_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x20] +v_subrev_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xee,0x01,0x00] -v_subrev_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x40] +v_subrev_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x02,0x00] -v_subrev_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x60] +v_subrev_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0xfe,0x03,0x00] -v_subrev_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x03,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x00,0x20] -v_subrev_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x03,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x00,0x40] -v_subrev_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x03,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x00,0x60] -v_subrev_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x03,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x03,0xd1,0x80,0x04,0x00,0x00] -v_subrev_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x08] +v_subrev_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x03,0xd1,0x80,0x04,0x00,0x00] -v_subrev_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x10] +v_subrev_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x03,0xd1,0x80,0x04,0x00,0x00] -v_subrev_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x18] +v_subrev_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x03,0xd1,0x80,0x04,0x00,0x00] + +v_subrev_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x00,0x08] + +v_subrev_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x00,0x10] + +v_subrev_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x03,0xd1,0x80,0x04,0x00,0x18] v_mul_legacy_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x08] @@ -33194,92 +33699,113 @@ v_mul_legacy_f32 v5, v255, v2 v_mul_legacy_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x08] +v_mul_legacy_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x04,0xd1,0x80,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x04,0xd1,0xc1,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x04,0xd1,0xf0,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x04,0xd1,0xf7,0x04,0x00,0x00] + v_mul_legacy_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x00] -v_mul_legacy_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x04,0xd1,0x01,0x05,0x00,0x00] - v_mul_legacy_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x04,0xd1,0xff,0x05,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xcb,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xca,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xcd,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xcc,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xcf,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xce,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xd5,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xd4,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xd7,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xd6,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xd9,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xd8,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xdb,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xda,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xdd,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xdc,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xdf,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xde,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xf7,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xf6,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xf9,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xf8,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xfd,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xfc,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xff,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xfe,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xfb,0x01,0x00] +v_mul_legacy_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x00,0x01,0x00] -v_mul_legacy_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x02,0x00] +v_mul_legacy_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x82,0x01,0x00] -v_mul_legacy_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xff,0x03,0x00] +v_mul_legacy_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xe0,0x01,0x00] -v_mul_legacy_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x20] +v_mul_legacy_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xee,0x01,0x00] -v_mul_legacy_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x40] +v_mul_legacy_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x02,0x00] -v_mul_legacy_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x60] +v_mul_legacy_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0xfe,0x03,0x00] -v_mul_legacy_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x04,0xd1,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x00,0x20] -v_mul_legacy_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x04,0xd1,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x00,0x40] -v_mul_legacy_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x04,0xd1,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x00,0x60] -v_mul_legacy_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x04,0xd1,0x01,0x05,0x00,0x00] +v_mul_legacy_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x04,0xd1,0x80,0x04,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x08] +v_mul_legacy_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x04,0xd1,0x80,0x04,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x10] +v_mul_legacy_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x04,0xd1,0x80,0x04,0x00,0x00] -v_mul_legacy_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x18] +v_mul_legacy_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x04,0xd1,0x80,0x04,0x00,0x00] + +v_mul_legacy_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x00,0x08] + +v_mul_legacy_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x00,0x10] + +v_mul_legacy_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x04,0xd1,0x80,0x04,0x00,0x18] v_mul_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x0a] @@ -33353,92 +33879,113 @@ v_mul_f32 v5, v255, v2 v_mul_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x0a] +v_mul_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x00,0x00] + +v_mul_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x05,0xd1,0x80,0x04,0x00,0x00] + +v_mul_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x05,0xd1,0xc1,0x04,0x00,0x00] + +v_mul_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x05,0xd1,0xf0,0x04,0x00,0x00] + +v_mul_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x05,0xd1,0xf7,0x04,0x00,0x00] + v_mul_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x00] -v_mul_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x05,0xd1,0x01,0x05,0x00,0x00] - v_mul_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x05,0xd1,0xff,0x05,0x00,0x00] -v_mul_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xcb,0x00,0x00] +v_mul_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xca,0x00,0x00] + +v_mul_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xcc,0x00,0x00] + +v_mul_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xce,0x00,0x00] -v_mul_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xcd,0x00,0x00] +v_mul_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xd4,0x00,0x00] -v_mul_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xcf,0x00,0x00] +v_mul_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xd6,0x00,0x00] -v_mul_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xd5,0x00,0x00] +v_mul_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xd8,0x00,0x00] -v_mul_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xd7,0x00,0x00] +v_mul_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xda,0x00,0x00] -v_mul_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xd9,0x00,0x00] +v_mul_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xdc,0x00,0x00] -v_mul_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xdb,0x00,0x00] +v_mul_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xde,0x00,0x00] -v_mul_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xdd,0x00,0x00] +v_mul_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xf6,0x00,0x00] -v_mul_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xdf,0x00,0x00] +v_mul_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xf8,0x00,0x00] -v_mul_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xf7,0x00,0x00] +v_mul_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xfc,0x00,0x00] -v_mul_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xf9,0x00,0x00] +v_mul_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xfe,0x00,0x00] -v_mul_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xfd,0x00,0x00] +v_mul_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x00,0x01,0x00] -v_mul_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xff,0x00,0x00] +v_mul_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x82,0x01,0x00] -v_mul_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xfb,0x01,0x00] +v_mul_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xe0,0x01,0x00] -v_mul_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x02,0x00] +v_mul_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xee,0x01,0x00] -v_mul_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xff,0x03,0x00] +v_mul_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x02,0x00] -v_mul_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x20] +v_mul_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0xfe,0x03,0x00] -v_mul_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x40] +v_mul_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x00,0x20] -v_mul_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x60] +v_mul_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x00,0x40] -v_mul_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x05,0xd1,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x00,0x60] -v_mul_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x05,0xd1,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x05,0xd1,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x05,0xd1,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x05,0xd1,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x05,0xd1,0x01,0x05,0x00,0x00] +v_mul_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x05,0xd1,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x08] +v_mul_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x05,0xd1,0x80,0x04,0x00,0x00] -v_mul_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x10] +v_mul_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x00,0x08] -v_mul_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x18] +v_mul_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x00,0x10] + +v_mul_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x05,0xd1,0x80,0x04,0x00,0x18] v_mul_i32_i24 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x0c] @@ -34112,92 +34659,113 @@ v_min_f32 v5, v255, v2 v_min_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x14] +v_min_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00] + +v_min_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00] + +v_min_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x0a,0xd1,0xc1,0x04,0x00,0x00] + +v_min_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x0a,0xd1,0xf0,0x04,0x00,0x00] + +v_min_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x0a,0xd1,0xf7,0x04,0x00,0x00] + v_min_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x00] -v_min_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x0a,0xd1,0x01,0x05,0x00,0x00] - v_min_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x0a,0xd1,0xff,0x05,0x00,0x00] -v_min_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xcb,0x00,0x00] +v_min_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xca,0x00,0x00] + +v_min_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xcc,0x00,0x00] + +v_min_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xce,0x00,0x00] + +v_min_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xd4,0x00,0x00] -v_min_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xcd,0x00,0x00] +v_min_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xd6,0x00,0x00] -v_min_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xcf,0x00,0x00] +v_min_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xd8,0x00,0x00] -v_min_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xd5,0x00,0x00] +v_min_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xda,0x00,0x00] -v_min_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xd7,0x00,0x00] +v_min_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xdc,0x00,0x00] -v_min_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xd9,0x00,0x00] +v_min_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xde,0x00,0x00] -v_min_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xdb,0x00,0x00] +v_min_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xf6,0x00,0x00] -v_min_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xdd,0x00,0x00] +v_min_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xf8,0x00,0x00] -v_min_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xdf,0x00,0x00] +v_min_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xfc,0x00,0x00] -v_min_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xf7,0x00,0x00] +v_min_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xfe,0x00,0x00] -v_min_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xf9,0x00,0x00] +v_min_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x00,0x01,0x00] -v_min_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xfd,0x00,0x00] +v_min_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x82,0x01,0x00] -v_min_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xff,0x00,0x00] +v_min_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xe0,0x01,0x00] -v_min_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xfb,0x01,0x00] +v_min_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xee,0x01,0x00] -v_min_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x02,0x00] +v_min_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x02,0x00] -v_min_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xff,0x03,0x00] +v_min_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0xfe,0x03,0x00] -v_min_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x20] +v_min_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x00,0x20] -v_min_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x40] +v_min_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x00,0x40] -v_min_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x60] +v_min_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x00,0x60] -v_min_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x0a,0xd1,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x0a,0xd1,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x0a,0xd1,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x0a,0xd1,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x0a,0xd1,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x0a,0xd1,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x0a,0xd1,0x01,0x05,0x00,0x00] +v_min_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x0a,0xd1,0x80,0x04,0x00,0x00] -v_min_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x08] +v_min_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x00,0x08] -v_min_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x10] +v_min_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x00,0x10] -v_min_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x18] +v_min_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x0a,0xd1,0x80,0x04,0x00,0x18] v_max_f32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x16] @@ -34271,92 +34839,113 @@ v_max_f32 v5, v255, v2 v_max_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x16] +v_max_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x00,0x00] + +v_max_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x0b,0xd1,0x80,0x04,0x00,0x00] + +v_max_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x0b,0xd1,0xc1,0x04,0x00,0x00] + +v_max_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x0b,0xd1,0xf0,0x04,0x00,0x00] + +v_max_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x0b,0xd1,0xf7,0x04,0x00,0x00] + v_max_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x00] -v_max_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x0b,0xd1,0x01,0x05,0x00,0x00] - v_max_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x0b,0xd1,0xff,0x05,0x00,0x00] -v_max_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xcb,0x00,0x00] +v_max_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xca,0x00,0x00] + +v_max_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xcc,0x00,0x00] + +v_max_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xce,0x00,0x00] + +v_max_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xd4,0x00,0x00] -v_max_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xcd,0x00,0x00] +v_max_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xd6,0x00,0x00] -v_max_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xcf,0x00,0x00] +v_max_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xd8,0x00,0x00] -v_max_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xd5,0x00,0x00] +v_max_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xda,0x00,0x00] -v_max_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xd7,0x00,0x00] +v_max_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xdc,0x00,0x00] -v_max_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xd9,0x00,0x00] +v_max_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xde,0x00,0x00] -v_max_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xdb,0x00,0x00] +v_max_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xf6,0x00,0x00] -v_max_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xdd,0x00,0x00] +v_max_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xf8,0x00,0x00] -v_max_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xdf,0x00,0x00] +v_max_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xfc,0x00,0x00] -v_max_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xf7,0x00,0x00] +v_max_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xfe,0x00,0x00] -v_max_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xf9,0x00,0x00] +v_max_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x00,0x01,0x00] -v_max_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xfd,0x00,0x00] +v_max_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x82,0x01,0x00] -v_max_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xff,0x00,0x00] +v_max_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xe0,0x01,0x00] -v_max_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xfb,0x01,0x00] +v_max_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xee,0x01,0x00] -v_max_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x02,0x00] +v_max_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x02,0x00] -v_max_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xff,0x03,0x00] +v_max_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0xfe,0x03,0x00] -v_max_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x20] +v_max_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x00,0x20] -v_max_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x40] +v_max_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x00,0x40] -v_max_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x60] +v_max_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x00,0x60] -v_max_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x0b,0xd1,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x0b,0xd1,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x0b,0xd1,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x0b,0xd1,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x0b,0xd1,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x0b,0xd1,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x0b,0xd1,0x01,0x05,0x00,0x00] +v_max_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x0b,0xd1,0x80,0x04,0x00,0x00] -v_max_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x08] +v_max_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x00,0x08] -v_max_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x10] +v_max_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x00,0x10] -v_max_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x18] +v_max_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x0b,0xd1,0x80,0x04,0x00,0x18] v_min_i32 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x18] @@ -35930,92 +36519,113 @@ v_mac_f32 v5, v255, v2 v_mac_f32 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x2c] +v_mac_f32_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x00,0x00] + +v_mac_f32_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x16,0xd1,0x80,0x04,0x00,0x00] + +v_mac_f32_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x16,0xd1,0xc1,0x04,0x00,0x00] + +v_mac_f32_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x16,0xd1,0xf0,0x04,0x00,0x00] + +v_mac_f32_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x16,0xd1,0xf7,0x04,0x00,0x00] + v_mac_f32_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x00] -v_mac_f32_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x16,0xd1,0x01,0x05,0x00,0x00] - v_mac_f32_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x16,0xd1,0xff,0x05,0x00,0x00] -v_mac_f32_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xcb,0x00,0x00] +v_mac_f32_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xca,0x00,0x00] + +v_mac_f32_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xcc,0x00,0x00] + +v_mac_f32_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xce,0x00,0x00] + +v_mac_f32_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xd4,0x00,0x00] -v_mac_f32_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xcd,0x00,0x00] +v_mac_f32_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xd6,0x00,0x00] -v_mac_f32_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xcf,0x00,0x00] +v_mac_f32_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xd8,0x00,0x00] -v_mac_f32_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xd5,0x00,0x00] +v_mac_f32_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xda,0x00,0x00] -v_mac_f32_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xd7,0x00,0x00] +v_mac_f32_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xdc,0x00,0x00] -v_mac_f32_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xd9,0x00,0x00] +v_mac_f32_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xde,0x00,0x00] -v_mac_f32_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xdb,0x00,0x00] +v_mac_f32_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xf6,0x00,0x00] -v_mac_f32_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xdd,0x00,0x00] +v_mac_f32_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xf8,0x00,0x00] -v_mac_f32_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xdf,0x00,0x00] +v_mac_f32_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xfc,0x00,0x00] -v_mac_f32_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xf7,0x00,0x00] +v_mac_f32_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xfe,0x00,0x00] -v_mac_f32_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xf9,0x00,0x00] +v_mac_f32_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x00,0x01,0x00] -v_mac_f32_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xfd,0x00,0x00] +v_mac_f32_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x82,0x01,0x00] -v_mac_f32_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xff,0x00,0x00] +v_mac_f32_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xe0,0x01,0x00] -v_mac_f32_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xfb,0x01,0x00] +v_mac_f32_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xee,0x01,0x00] -v_mac_f32_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x02,0x00] +v_mac_f32_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x02,0x00] -v_mac_f32_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xff,0x03,0x00] +v_mac_f32_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0xfe,0x03,0x00] -v_mac_f32_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x20] +v_mac_f32_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x00,0x20] -v_mac_f32_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x40] +v_mac_f32_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x00,0x40] -v_mac_f32_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x60] +v_mac_f32_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x00,0x60] -v_mac_f32_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x16,0xd1,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x16,0xd1,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x16,0xd1,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x16,0xd1,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x16,0xd1,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x16,0xd1,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x16,0xd1,0x01,0x05,0x00,0x00] +v_mac_f32_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x16,0xd1,0x80,0x04,0x00,0x00] -v_mac_f32_e64 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x08] +v_mac_f32_e64 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x00,0x08] -v_mac_f32_e64 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x10] +v_mac_f32_e64 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x00,0x10] -v_mac_f32_e64 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x18] +v_mac_f32_e64 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x16,0xd1,0x80,0x04,0x00,0x18] v_madmk_f32 v5, 0, 0x11213141, v3 // CHECK: [0x80,0x06,0x0a,0x2e,0x41,0x31,0x21,0x11] @@ -36962,83 +37572,104 @@ v_add_f16 v5, v255, v2 v_add_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x3e] +v_add_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0x04,0x00,0x00] + +v_add_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x1f,0xd1,0x80,0x04,0x00,0x00] + +v_add_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x1f,0xd1,0xc1,0x04,0x00,0x00] + +v_add_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x1f,0xd1,0xf0,0x04,0x00,0x00] + +v_add_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x1f,0xd1,0xf7,0x04,0x00,0x00] + v_add_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x00] -v_add_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x1f,0xd1,0x01,0x05,0x00,0x00] - v_add_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x1f,0xd1,0xff,0x05,0x00,0x00] -v_add_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xcb,0x00,0x00] +v_add_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xca,0x00,0x00] + +v_add_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xcc,0x00,0x00] + +v_add_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xce,0x00,0x00] + +v_add_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xd4,0x00,0x00] -v_add_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xcd,0x00,0x00] +v_add_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xd6,0x00,0x00] -v_add_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xcf,0x00,0x00] +v_add_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xd8,0x00,0x00] -v_add_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xd5,0x00,0x00] +v_add_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xda,0x00,0x00] -v_add_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xd7,0x00,0x00] +v_add_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xdc,0x00,0x00] -v_add_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xd9,0x00,0x00] +v_add_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xde,0x00,0x00] -v_add_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xdb,0x00,0x00] +v_add_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xf6,0x00,0x00] -v_add_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xdd,0x00,0x00] +v_add_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xf8,0x00,0x00] -v_add_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xdf,0x00,0x00] +v_add_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xfc,0x00,0x00] -v_add_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xf7,0x00,0x00] +v_add_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xfe,0x00,0x00] -v_add_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xf9,0x00,0x00] +v_add_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0x00,0x01,0x00] -v_add_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xfd,0x00,0x00] +v_add_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0x82,0x01,0x00] -v_add_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xff,0x00,0x00] +v_add_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xe0,0x01,0x00] -v_add_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xfb,0x01,0x00] +v_add_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xee,0x01,0x00] -v_add_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x02,0x00] +v_add_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0x04,0x02,0x00] -v_add_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xff,0x03,0x00] +v_add_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0xfe,0x03,0x00] -v_add_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x20] +v_add_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0x04,0x00,0x20] -v_add_f16_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x40] +v_add_f16_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0x04,0x00,0x40] -v_add_f16_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x60] +v_add_f16_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x1f,0xd1,0x80,0x04,0x00,0x60] -v_add_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x1f,0xd1,0x01,0x05,0x00,0x00] +v_add_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x1f,0xd1,0x80,0x04,0x00,0x00] -v_add_f16_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x1f,0xd1,0x01,0x05,0x00,0x00] +v_add_f16_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x1f,0xd1,0x80,0x04,0x00,0x00] -v_add_f16_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x1f,0xd1,0x01,0x05,0x00,0x00] +v_add_f16_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x1f,0xd1,0x80,0x04,0x00,0x00] -v_add_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x1f,0xd1,0x01,0x05,0x00,0x00] +v_add_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x1f,0xd1,0x80,0x04,0x00,0x00] v_sub_f16 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x40] @@ -37112,83 +37743,104 @@ v_sub_f16 v5, v255, v2 v_sub_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x40] +v_sub_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0x04,0x00,0x00] + +v_sub_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x20,0xd1,0x80,0x04,0x00,0x00] + +v_sub_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x20,0xd1,0xc1,0x04,0x00,0x00] + +v_sub_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x20,0xd1,0xf0,0x04,0x00,0x00] + +v_sub_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x20,0xd1,0xf7,0x04,0x00,0x00] + v_sub_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x00] -v_sub_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x20,0xd1,0x01,0x05,0x00,0x00] - v_sub_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x20,0xd1,0xff,0x05,0x00,0x00] -v_sub_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xcb,0x00,0x00] +v_sub_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xca,0x00,0x00] + +v_sub_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xcc,0x00,0x00] + +v_sub_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xce,0x00,0x00] -v_sub_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xcd,0x00,0x00] +v_sub_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xd4,0x00,0x00] -v_sub_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xcf,0x00,0x00] +v_sub_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xd6,0x00,0x00] -v_sub_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xd5,0x00,0x00] +v_sub_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xd8,0x00,0x00] -v_sub_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xd7,0x00,0x00] +v_sub_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xda,0x00,0x00] -v_sub_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xd9,0x00,0x00] +v_sub_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xdc,0x00,0x00] -v_sub_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xdb,0x00,0x00] +v_sub_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xde,0x00,0x00] -v_sub_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xdd,0x00,0x00] +v_sub_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xf6,0x00,0x00] -v_sub_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xdf,0x00,0x00] +v_sub_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xf8,0x00,0x00] -v_sub_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xf7,0x00,0x00] +v_sub_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xfc,0x00,0x00] -v_sub_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xf9,0x00,0x00] +v_sub_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xfe,0x00,0x00] -v_sub_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xfd,0x00,0x00] +v_sub_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0x00,0x01,0x00] -v_sub_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xff,0x00,0x00] +v_sub_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0x82,0x01,0x00] -v_sub_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xfb,0x01,0x00] +v_sub_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xe0,0x01,0x00] -v_sub_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x02,0x00] +v_sub_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xee,0x01,0x00] -v_sub_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xff,0x03,0x00] +v_sub_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0x04,0x02,0x00] -v_sub_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x20] +v_sub_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0xfe,0x03,0x00] -v_sub_f16_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x40] +v_sub_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0x04,0x00,0x20] -v_sub_f16_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x60] +v_sub_f16_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0x04,0x00,0x40] -v_sub_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x20,0xd1,0x01,0x05,0x00,0x00] +v_sub_f16_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x20,0xd1,0x80,0x04,0x00,0x60] -v_sub_f16_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x20,0xd1,0x01,0x05,0x00,0x00] +v_sub_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x20,0xd1,0x80,0x04,0x00,0x00] -v_sub_f16_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x20,0xd1,0x01,0x05,0x00,0x00] +v_sub_f16_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x20,0xd1,0x80,0x04,0x00,0x00] -v_sub_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x20,0xd1,0x01,0x05,0x00,0x00] +v_sub_f16_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x20,0xd1,0x80,0x04,0x00,0x00] + +v_sub_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x20,0xd1,0x80,0x04,0x00,0x00] v_subrev_f16 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x42] @@ -37262,83 +37914,104 @@ v_subrev_f16 v5, v255, v2 v_subrev_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x42] +v_subrev_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0x04,0x00,0x00] + +v_subrev_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x21,0xd1,0x80,0x04,0x00,0x00] + +v_subrev_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x21,0xd1,0xc1,0x04,0x00,0x00] + +v_subrev_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x21,0xd1,0xf0,0x04,0x00,0x00] + +v_subrev_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x21,0xd1,0xf7,0x04,0x00,0x00] + v_subrev_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x00] -v_subrev_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x21,0xd1,0x01,0x05,0x00,0x00] - v_subrev_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x21,0xd1,0xff,0x05,0x00,0x00] -v_subrev_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xcb,0x00,0x00] +v_subrev_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xca,0x00,0x00] + +v_subrev_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xcc,0x00,0x00] + +v_subrev_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xce,0x00,0x00] -v_subrev_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xcd,0x00,0x00] +v_subrev_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xd4,0x00,0x00] -v_subrev_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xcf,0x00,0x00] +v_subrev_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xd6,0x00,0x00] -v_subrev_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xd5,0x00,0x00] +v_subrev_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xd8,0x00,0x00] -v_subrev_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xd7,0x00,0x00] +v_subrev_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xda,0x00,0x00] -v_subrev_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xd9,0x00,0x00] +v_subrev_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xdc,0x00,0x00] -v_subrev_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xdb,0x00,0x00] +v_subrev_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xde,0x00,0x00] -v_subrev_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xdd,0x00,0x00] +v_subrev_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xf6,0x00,0x00] -v_subrev_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xdf,0x00,0x00] +v_subrev_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xf8,0x00,0x00] -v_subrev_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xf7,0x00,0x00] +v_subrev_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xfc,0x00,0x00] -v_subrev_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xf9,0x00,0x00] +v_subrev_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xfe,0x00,0x00] -v_subrev_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xfd,0x00,0x00] +v_subrev_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0x00,0x01,0x00] -v_subrev_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xff,0x00,0x00] +v_subrev_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0x82,0x01,0x00] -v_subrev_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xfb,0x01,0x00] +v_subrev_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xe0,0x01,0x00] -v_subrev_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x02,0x00] +v_subrev_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xee,0x01,0x00] -v_subrev_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xff,0x03,0x00] +v_subrev_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0x04,0x02,0x00] -v_subrev_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x20] +v_subrev_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0xfe,0x03,0x00] -v_subrev_f16_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x40] +v_subrev_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0x04,0x00,0x20] -v_subrev_f16_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x60] +v_subrev_f16_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0x04,0x00,0x40] -v_subrev_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x21,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f16_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x21,0xd1,0x80,0x04,0x00,0x60] -v_subrev_f16_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x21,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x21,0xd1,0x80,0x04,0x00,0x00] -v_subrev_f16_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x21,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f16_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x21,0xd1,0x80,0x04,0x00,0x00] -v_subrev_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x21,0xd1,0x01,0x05,0x00,0x00] +v_subrev_f16_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x21,0xd1,0x80,0x04,0x00,0x00] + +v_subrev_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x21,0xd1,0x80,0x04,0x00,0x00] v_mul_f16 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x44] @@ -37412,83 +38085,104 @@ v_mul_f16 v5, v255, v2 v_mul_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x44] +v_mul_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0x04,0x00,0x00] + +v_mul_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x22,0xd1,0x80,0x04,0x00,0x00] + +v_mul_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x22,0xd1,0xc1,0x04,0x00,0x00] + +v_mul_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x22,0xd1,0xf0,0x04,0x00,0x00] + +v_mul_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x22,0xd1,0xf7,0x04,0x00,0x00] + v_mul_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x00] -v_mul_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x22,0xd1,0x01,0x05,0x00,0x00] - v_mul_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x22,0xd1,0xff,0x05,0x00,0x00] -v_mul_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xcb,0x00,0x00] +v_mul_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xca,0x00,0x00] + +v_mul_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xcc,0x00,0x00] -v_mul_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xcd,0x00,0x00] +v_mul_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xce,0x00,0x00] -v_mul_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xcf,0x00,0x00] +v_mul_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xd4,0x00,0x00] -v_mul_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xd5,0x00,0x00] +v_mul_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xd6,0x00,0x00] -v_mul_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xd7,0x00,0x00] +v_mul_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xd8,0x00,0x00] -v_mul_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xd9,0x00,0x00] +v_mul_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xda,0x00,0x00] -v_mul_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xdb,0x00,0x00] +v_mul_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xdc,0x00,0x00] -v_mul_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xdd,0x00,0x00] +v_mul_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xde,0x00,0x00] -v_mul_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xdf,0x00,0x00] +v_mul_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xf6,0x00,0x00] -v_mul_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xf7,0x00,0x00] +v_mul_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xf8,0x00,0x00] -v_mul_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xf9,0x00,0x00] +v_mul_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xfc,0x00,0x00] -v_mul_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xfd,0x00,0x00] +v_mul_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xfe,0x00,0x00] -v_mul_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xff,0x00,0x00] +v_mul_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0x00,0x01,0x00] -v_mul_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xfb,0x01,0x00] +v_mul_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0x82,0x01,0x00] -v_mul_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x02,0x00] +v_mul_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xe0,0x01,0x00] -v_mul_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xff,0x03,0x00] +v_mul_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xee,0x01,0x00] -v_mul_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x20] +v_mul_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0x04,0x02,0x00] -v_mul_f16_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x40] +v_mul_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0xfe,0x03,0x00] -v_mul_f16_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x60] +v_mul_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0x04,0x00,0x20] -v_mul_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x22,0xd1,0x01,0x05,0x00,0x00] +v_mul_f16_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0x04,0x00,0x40] -v_mul_f16_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x22,0xd1,0x01,0x05,0x00,0x00] +v_mul_f16_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x22,0xd1,0x80,0x04,0x00,0x60] -v_mul_f16_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x22,0xd1,0x01,0x05,0x00,0x00] +v_mul_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x22,0xd1,0x80,0x04,0x00,0x00] -v_mul_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x22,0xd1,0x01,0x05,0x00,0x00] +v_mul_f16_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x22,0xd1,0x80,0x04,0x00,0x00] + +v_mul_f16_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x22,0xd1,0x80,0x04,0x00,0x00] + +v_mul_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x22,0xd1,0x80,0x04,0x00,0x00] v_mac_f16 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x46] @@ -37562,83 +38256,104 @@ v_mac_f16 v5, v255, v2 v_mac_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x46] +v_mac_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0x04,0x00,0x00] + +v_mac_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x23,0xd1,0x80,0x04,0x00,0x00] + +v_mac_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x23,0xd1,0xc1,0x04,0x00,0x00] + +v_mac_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x23,0xd1,0xf0,0x04,0x00,0x00] + +v_mac_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x23,0xd1,0xf7,0x04,0x00,0x00] + v_mac_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x00] -v_mac_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x23,0xd1,0x01,0x05,0x00,0x00] - v_mac_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x23,0xd1,0xff,0x05,0x00,0x00] -v_mac_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xcb,0x00,0x00] +v_mac_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xca,0x00,0x00] + +v_mac_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xcc,0x00,0x00] -v_mac_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xcd,0x00,0x00] +v_mac_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xce,0x00,0x00] -v_mac_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xcf,0x00,0x00] +v_mac_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xd4,0x00,0x00] -v_mac_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xd5,0x00,0x00] +v_mac_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xd6,0x00,0x00] -v_mac_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xd7,0x00,0x00] +v_mac_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xd8,0x00,0x00] -v_mac_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xd9,0x00,0x00] +v_mac_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xda,0x00,0x00] -v_mac_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xdb,0x00,0x00] +v_mac_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xdc,0x00,0x00] -v_mac_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xdd,0x00,0x00] +v_mac_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xde,0x00,0x00] -v_mac_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xdf,0x00,0x00] +v_mac_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xf6,0x00,0x00] -v_mac_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xf7,0x00,0x00] +v_mac_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xf8,0x00,0x00] -v_mac_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xf9,0x00,0x00] +v_mac_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xfc,0x00,0x00] -v_mac_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xfd,0x00,0x00] +v_mac_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xfe,0x00,0x00] -v_mac_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xff,0x00,0x00] +v_mac_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0x00,0x01,0x00] -v_mac_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xfb,0x01,0x00] +v_mac_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0x82,0x01,0x00] -v_mac_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x02,0x00] +v_mac_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xe0,0x01,0x00] -v_mac_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xff,0x03,0x00] +v_mac_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xee,0x01,0x00] -v_mac_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x20] +v_mac_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0x04,0x02,0x00] -v_mac_f16_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x40] +v_mac_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0xfe,0x03,0x00] -v_mac_f16_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x60] +v_mac_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0x04,0x00,0x20] -v_mac_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x23,0xd1,0x01,0x05,0x00,0x00] +v_mac_f16_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0x04,0x00,0x40] -v_mac_f16_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x23,0xd1,0x01,0x05,0x00,0x00] +v_mac_f16_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x23,0xd1,0x80,0x04,0x00,0x60] -v_mac_f16_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x23,0xd1,0x01,0x05,0x00,0x00] +v_mac_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x23,0xd1,0x80,0x04,0x00,0x00] -v_mac_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x23,0xd1,0x01,0x05,0x00,0x00] +v_mac_f16_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x23,0xd1,0x80,0x04,0x00,0x00] + +v_mac_f16_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x23,0xd1,0x80,0x04,0x00,0x00] + +v_mac_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x23,0xd1,0x80,0x04,0x00,0x00] v_madmk_f16 v5, 0, 0x1121, v3 // CHECK: [0x80,0x06,0x0a,0x48,0x21,0x11,0x00,0x00] @@ -38816,83 +39531,104 @@ v_max_f16 v5, v255, v2 v_max_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x5a] +v_max_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0x04,0x00,0x00] + +v_max_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x2d,0xd1,0x80,0x04,0x00,0x00] + +v_max_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x2d,0xd1,0xc1,0x04,0x00,0x00] + +v_max_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x2d,0xd1,0xf0,0x04,0x00,0x00] + +v_max_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x2d,0xd1,0xf7,0x04,0x00,0x00] + v_max_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x00] -v_max_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x2d,0xd1,0x01,0x05,0x00,0x00] - v_max_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x2d,0xd1,0xff,0x05,0x00,0x00] -v_max_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xcb,0x00,0x00] +v_max_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xca,0x00,0x00] -v_max_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xcd,0x00,0x00] +v_max_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xcc,0x00,0x00] -v_max_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xcf,0x00,0x00] +v_max_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xce,0x00,0x00] -v_max_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xd5,0x00,0x00] +v_max_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xd4,0x00,0x00] -v_max_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xd7,0x00,0x00] +v_max_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xd6,0x00,0x00] -v_max_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xd9,0x00,0x00] +v_max_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xd8,0x00,0x00] -v_max_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xdb,0x00,0x00] +v_max_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xda,0x00,0x00] -v_max_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xdd,0x00,0x00] +v_max_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xdc,0x00,0x00] -v_max_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xdf,0x00,0x00] +v_max_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xde,0x00,0x00] -v_max_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xf7,0x00,0x00] +v_max_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xf6,0x00,0x00] -v_max_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xf9,0x00,0x00] +v_max_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xf8,0x00,0x00] -v_max_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xfd,0x00,0x00] +v_max_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xfc,0x00,0x00] -v_max_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xff,0x00,0x00] +v_max_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xfe,0x00,0x00] -v_max_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xfb,0x01,0x00] +v_max_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0x00,0x01,0x00] -v_max_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x02,0x00] +v_max_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0x82,0x01,0x00] -v_max_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xff,0x03,0x00] +v_max_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xe0,0x01,0x00] -v_max_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x20] +v_max_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xee,0x01,0x00] -v_max_f16_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x40] +v_max_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0x04,0x02,0x00] -v_max_f16_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x60] +v_max_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0xfe,0x03,0x00] -v_max_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x2d,0xd1,0x01,0x05,0x00,0x00] +v_max_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0x04,0x00,0x20] -v_max_f16_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x2d,0xd1,0x01,0x05,0x00,0x00] +v_max_f16_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0x04,0x00,0x40] -v_max_f16_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x2d,0xd1,0x01,0x05,0x00,0x00] +v_max_f16_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x2d,0xd1,0x80,0x04,0x00,0x60] -v_max_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x2d,0xd1,0x01,0x05,0x00,0x00] +v_max_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x2d,0xd1,0x80,0x04,0x00,0x00] + +v_max_f16_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x2d,0xd1,0x80,0x04,0x00,0x00] + +v_max_f16_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x2d,0xd1,0x80,0x04,0x00,0x00] + +v_max_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x2d,0xd1,0x80,0x04,0x00,0x00] v_min_f16 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x5c] @@ -38966,83 +39702,104 @@ v_min_f16 v5, v255, v2 v_min_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x5c] +v_min_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00] + +v_min_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00] + +v_min_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x2e,0xd1,0xc1,0x04,0x00,0x00] + +v_min_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x2e,0xd1,0xf0,0x04,0x00,0x00] + +v_min_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x2e,0xd1,0xf7,0x04,0x00,0x00] + v_min_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x00] -v_min_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x2e,0xd1,0x01,0x05,0x00,0x00] - v_min_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x2e,0xd1,0xff,0x05,0x00,0x00] -v_min_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xcb,0x00,0x00] +v_min_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xca,0x00,0x00] -v_min_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xcd,0x00,0x00] +v_min_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xcc,0x00,0x00] -v_min_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xcf,0x00,0x00] +v_min_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xce,0x00,0x00] -v_min_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xd5,0x00,0x00] +v_min_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xd4,0x00,0x00] -v_min_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xd7,0x00,0x00] +v_min_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xd6,0x00,0x00] -v_min_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xd9,0x00,0x00] +v_min_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xd8,0x00,0x00] -v_min_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xdb,0x00,0x00] +v_min_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xda,0x00,0x00] -v_min_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xdd,0x00,0x00] +v_min_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xdc,0x00,0x00] -v_min_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xdf,0x00,0x00] +v_min_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xde,0x00,0x00] -v_min_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xf7,0x00,0x00] +v_min_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xf6,0x00,0x00] -v_min_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xf9,0x00,0x00] +v_min_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xf8,0x00,0x00] -v_min_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xfd,0x00,0x00] +v_min_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xfc,0x00,0x00] -v_min_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xff,0x00,0x00] +v_min_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xfe,0x00,0x00] -v_min_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xfb,0x01,0x00] +v_min_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0x00,0x01,0x00] -v_min_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x02,0x00] +v_min_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0x82,0x01,0x00] -v_min_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xff,0x03,0x00] +v_min_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xe0,0x01,0x00] -v_min_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x20] +v_min_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xee,0x01,0x00] -v_min_f16_e64 v5, v1, -s2 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x40] +v_min_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0x04,0x02,0x00] -v_min_f16_e64 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x60] +v_min_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0xfe,0x03,0x00] -v_min_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x2e,0xd1,0x01,0x05,0x00,0x00] +v_min_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0x04,0x00,0x20] -v_min_f16_e64 v5, v1, |s2| -// CHECK: [0x05,0x02,0x2e,0xd1,0x01,0x05,0x00,0x00] +v_min_f16_e64 v5, 0, -s2 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0x04,0x00,0x40] -v_min_f16_e64 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x2e,0xd1,0x01,0x05,0x00,0x00] +v_min_f16_e64 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x2e,0xd1,0x80,0x04,0x00,0x60] -v_min_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x2e,0xd1,0x01,0x05,0x00,0x00] +v_min_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x2e,0xd1,0x80,0x04,0x00,0x00] + +v_min_f16_e64 v5, 0, |s2| +// CHECK: [0x05,0x02,0x2e,0xd1,0x80,0x04,0x00,0x00] + +v_min_f16_e64 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x2e,0xd1,0x80,0x04,0x00,0x00] + +v_min_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x2e,0xd1,0x80,0x04,0x00,0x00] v_max_u16 v5, s1, v2 // CHECK: [0x01,0x04,0x0a,0x5e] @@ -39716,275 +40473,350 @@ v_ldexp_f16 v5, v255, v2 v_ldexp_f16 v5, s1, v255 // CHECK: [0x01,0xfe,0x0b,0x66] +v_ldexp_f16_e64 v5, 0, s2 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0x04,0x00,0x00] + +v_ldexp_f16_e64 v255, 0, s2 +// CHECK: [0xff,0x00,0x33,0xd1,0x80,0x04,0x00,0x00] + +v_ldexp_f16_e64 v5, -1, s2 +// CHECK: [0x05,0x00,0x33,0xd1,0xc1,0x04,0x00,0x00] + +v_ldexp_f16_e64 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x33,0xd1,0xf0,0x04,0x00,0x00] + +v_ldexp_f16_e64 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x33,0xd1,0xf7,0x04,0x00,0x00] + v_ldexp_f16_e64 v5, v1, s2 // CHECK: [0x05,0x00,0x33,0xd1,0x01,0x05,0x00,0x00] -v_ldexp_f16_e64 v255, v1, s2 -// CHECK: [0xff,0x00,0x33,0xd1,0x01,0x05,0x00,0x00] - v_ldexp_f16_e64 v5, v255, s2 // CHECK: [0x05,0x00,0x33,0xd1,0xff,0x05,0x00,0x00] -v_ldexp_f16_e64 v5, v1, s101 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xcb,0x00,0x00] +v_ldexp_f16_e64 v5, 0, s101 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xca,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xcc,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xce,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xd4,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xd6,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xd8,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xda,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xdc,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xde,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xf6,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, m0 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xf8,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xfc,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xfe,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, 0 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0x00,0x01,0x00] + +v_ldexp_f16_e64 v5, 0, -1 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0x82,0x01,0x00] + +v_ldexp_f16_e64 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xe0,0x01,0x00] + +v_ldexp_f16_e64 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xee,0x01,0x00] + +v_ldexp_f16_e64 v5, 0, v2 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0x04,0x02,0x00] + +v_ldexp_f16_e64 v5, 0, v255 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0xfe,0x03,0x00] + +v_ldexp_f16_e64 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x33,0xd1,0x80,0x04,0x00,0x20] + +v_ldexp_f16_e64 v5, |0|, s2 +// CHECK: [0x05,0x01,0x33,0xd1,0x80,0x04,0x00,0x00] + +v_ldexp_f16_e64 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x33,0xd1,0x80,0x04,0x00,0x00] -v_ldexp_f16_e64 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xcd,0x00,0x00] +v_mad_legacy_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xcf,0x00,0x00] +v_mad_legacy_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xc0,0xd1,0x01,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xd5,0x00,0x00] +v_mad_legacy_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x65,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xd7,0x00,0x00] +v_mad_legacy_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x66,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xd9,0x00,0x00] +v_mad_legacy_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x67,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xdb,0x00,0x00] +v_mad_legacy_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x6a,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xdd,0x00,0x00] +v_mad_legacy_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x6b,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xdf,0x00,0x00] +v_mad_legacy_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x6c,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xf7,0x00,0x00] +v_mad_legacy_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x6d,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, m0 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xf9,0x00,0x00] +v_mad_legacy_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x6e,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xfd,0x00,0x00] +v_mad_legacy_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x6f,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xff,0x00,0x00] +v_mad_legacy_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x7b,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, 0 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x01,0x01,0x00] +v_mad_legacy_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x7c,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, -1 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x83,0x01,0x00] +v_mad_legacy_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x7e,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, 0.5 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xe1,0x01,0x00] +v_mad_legacy_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x7f,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, -4.0 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xef,0x01,0x00] +v_mad_legacy_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x80,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, scc -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xfb,0x01,0x00] +v_mad_legacy_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0xc1,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, v2 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x05,0x02,0x00] +v_mad_legacy_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0xf0,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, v1, v255 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xff,0x03,0x00] +v_mad_legacy_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0xf7,0x00,0x01,0x02] -v_ldexp_f16_e64 v5, -v1, s2 -// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x05,0x00,0x20] +v_mad_legacy_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x01,0x01,0x02] -v_ldexp_f16_e64 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x33,0xd1,0x01,0x05,0x00,0x00] +v_mad_legacy_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0xff,0x01,0x01,0x02] -v_ldexp_f16_e64 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x33,0xd1,0x01,0x05,0x00,0x00] +v_mad_legacy_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x82,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0xe0,0x01,0x02] -v_mad_legacy_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0xee,0x01,0x02] -v_mad_legacy_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x65,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x02,0x02] -v_mad_legacy_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x66,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0xfe,0x03,0x02] -v_mad_legacy_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x67,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x05,0x03] -v_mad_legacy_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x6a,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0xc1,0x03] -v_mad_legacy_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x6b,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0xdd,0x03] -v_mad_legacy_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x6c,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x0d,0x04] -v_mad_legacy_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x6d,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0xfd,0x07] -v_mad_legacy_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x6e,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0x22] -v_mad_legacy_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x6f,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0x42] -v_mad_legacy_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x7b,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0x82] -v_mad_legacy_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x7c,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0xe2] -v_mad_legacy_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x7e,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xc0,0xd1,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x7f,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xc0,0xd1,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0xfd,0x04,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xc0,0xd1,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x05,0x0e,0x04] +v_mad_legacy_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xc0,0xd1,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0xff,0x05,0x0e,0x04] +v_mad_legacy_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xc0,0xd1,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0xfe,0x0f,0x04] +v_mad_legacy_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0x0a] -v_mad_legacy_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0xfe,0x07] +v_mad_legacy_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0x12] -v_mad_legacy_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x24] +v_mad_legacy_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x00,0x01,0x1a] -v_mad_legacy_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x44] +v_mad_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x84] +v_mad_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xc1,0xd1,0x01,0x00,0x01,0x02] -v_mad_legacy_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0xe4] +v_mad_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x65,0x00,0x01,0x02] -v_mad_legacy_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xc0,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x66,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xc0,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x67,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xc0,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x6a,0x00,0x01,0x02] -v_mad_legacy_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xc0,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x6b,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xc0,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x6c,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x0c] +v_mad_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x6d,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x14] +v_mad_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x6e,0x00,0x01,0x02] -v_mad_legacy_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x1c] +v_mad_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x6f,0x00,0x01,0x02] -v_mad_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x7b,0x00,0x01,0x02] -v_mad_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x7c,0x00,0x01,0x02] -v_mad_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x65,0x04,0x0e,0x04] +v_mad_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x7e,0x00,0x01,0x02] -v_mad_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x66,0x04,0x0e,0x04] +v_mad_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x7f,0x00,0x01,0x02] -v_mad_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x67,0x04,0x0e,0x04] +v_mad_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x80,0x00,0x01,0x02] -v_mad_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x6a,0x04,0x0e,0x04] +v_mad_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0xc1,0x00,0x01,0x02] -v_mad_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x6b,0x04,0x0e,0x04] +v_mad_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0xf0,0x00,0x01,0x02] -v_mad_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x6c,0x04,0x0e,0x04] +v_mad_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0xf7,0x00,0x01,0x02] -v_mad_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x6d,0x04,0x0e,0x04] +v_mad_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x01,0x01,0x02] -v_mad_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x6e,0x04,0x0e,0x04] +v_mad_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0xff,0x01,0x01,0x02] -v_mad_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x6f,0x04,0x0e,0x04] +v_mad_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x82,0x01,0x02] -v_mad_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x7b,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0xe0,0x01,0x02] -v_mad_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x7c,0x04,0x0e,0x04] +v_mad_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0xee,0x01,0x02] -v_mad_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x7e,0x04,0x0e,0x04] +v_mad_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x02,0x02] -v_mad_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x7f,0x04,0x0e,0x04] +v_mad_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0xfe,0x03,0x02] -v_mad_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0xfd,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x05,0x03] -v_mad_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x05,0x0e,0x04] +v_mad_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0xc1,0x03] -v_mad_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0xff,0x05,0x0e,0x04] +v_mad_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0xdd,0x03] -v_mad_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0xfe,0x0f,0x04] +v_mad_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x0d,0x04] -v_mad_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0xfe,0x07] +v_mad_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0xfd,0x07] -v_mad_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x24] +v_mad_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0x22] -v_mad_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x44] +v_mad_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0x42] -v_mad_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x84] +v_mad_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0x82] -v_mad_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0xe4] +v_mad_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0xe2] -v_mad_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xc1,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xc1,0xd1,0x01,0x00,0x01,0x02] -v_mad_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xc1,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xc1,0xd1,0x01,0x00,0x01,0x02] -v_mad_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xc1,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xc1,0xd1,0x01,0x00,0x01,0x02] -v_mad_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xc1,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xc1,0xd1,0x01,0x00,0x01,0x02] -v_mad_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xc1,0xd1,0x01,0x04,0x0e,0x04] +v_mad_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xc1,0xd1,0x01,0x00,0x01,0x02] -v_mad_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x0c] +v_mad_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0x0a] -v_mad_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x14] +v_mad_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0x12] -v_mad_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x1c] +v_mad_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x00,0x01,0x1a] v_mad_i32_i24 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x00,0x01,0x02] @@ -40172,389 +41004,521 @@ v_mad_u32_u24 v5, s1, 0, v3 v_mad_u32_u24 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x00,0xfd,0x07] -v_cubeid_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0x02] + +v_cubeid_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xc4,0xd1,0x01,0x00,0x01,0x02] + +v_cubeid_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x65,0x00,0x01,0x02] + +v_cubeid_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x66,0x00,0x01,0x02] + +v_cubeid_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x67,0x00,0x01,0x02] + +v_cubeid_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x6a,0x00,0x01,0x02] + +v_cubeid_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x6b,0x00,0x01,0x02] + +v_cubeid_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x6c,0x00,0x01,0x02] + +v_cubeid_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x6d,0x00,0x01,0x02] + +v_cubeid_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x6e,0x00,0x01,0x02] + +v_cubeid_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x6f,0x00,0x01,0x02] + +v_cubeid_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x7b,0x00,0x01,0x02] + +v_cubeid_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x7c,0x00,0x01,0x02] + +v_cubeid_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x7e,0x00,0x01,0x02] + +v_cubeid_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x7f,0x00,0x01,0x02] + +v_cubeid_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x80,0x00,0x01,0x02] + +v_cubeid_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0xc1,0x00,0x01,0x02] + +v_cubeid_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0xf0,0x00,0x01,0x02] + +v_cubeid_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0xf7,0x00,0x01,0x02] + +v_cubeid_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x01,0x01,0x02] + +v_cubeid_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0xff,0x01,0x01,0x02] + +v_cubeid_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x82,0x01,0x02] + +v_cubeid_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0xe0,0x01,0x02] + +v_cubeid_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0xee,0x01,0x02] + +v_cubeid_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x02,0x02] + +v_cubeid_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0xfe,0x03,0x02] + +v_cubeid_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x05,0x03] + +v_cubeid_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0xc1,0x03] + +v_cubeid_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0xdd,0x03] + +v_cubeid_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x0d,0x04] + +v_cubeid_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0xfd,0x07] + +v_cubeid_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0x22] + +v_cubeid_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0x42] + +v_cubeid_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0x82] + +v_cubeid_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0xe2] -v_cubeid_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x04] +v_cubeid_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xc4,0xd1,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x65,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xc4,0xd1,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x66,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xc4,0xd1,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x67,0x04,0x0e,0x04] +v_cubeid_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xc4,0xd1,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x6a,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xc4,0xd1,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x6b,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0x0a] -v_cubeid_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x6c,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0x12] -v_cubeid_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x6d,0x04,0x0e,0x04] +v_cubeid_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x00,0x01,0x1a] -v_cubeid_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x6e,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x6f,0x04,0x0e,0x04] +v_cubesc_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xc5,0xd1,0x01,0x00,0x01,0x02] -v_cubeid_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x7b,0x04,0x0e,0x04] +v_cubesc_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x65,0x00,0x01,0x02] -v_cubeid_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x7c,0x04,0x0e,0x04] +v_cubesc_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x66,0x00,0x01,0x02] -v_cubeid_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x7e,0x04,0x0e,0x04] +v_cubesc_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x67,0x00,0x01,0x02] -v_cubeid_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x7f,0x04,0x0e,0x04] +v_cubesc_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x6a,0x00,0x01,0x02] -v_cubeid_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0xfd,0x04,0x0e,0x04] +v_cubesc_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x6b,0x00,0x01,0x02] -v_cubeid_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x05,0x0e,0x04] +v_cubesc_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x6c,0x00,0x01,0x02] -v_cubeid_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0xff,0x05,0x0e,0x04] +v_cubesc_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x6d,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0xfe,0x0f,0x04] +v_cubesc_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x6e,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0xfe,0x07] +v_cubesc_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x6f,0x00,0x01,0x02] -v_cubeid_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x24] +v_cubesc_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x7b,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x44] +v_cubesc_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x7c,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x84] +v_cubesc_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x7e,0x00,0x01,0x02] -v_cubeid_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0xe4] +v_cubesc_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x7f,0x00,0x01,0x02] -v_cubeid_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xc4,0xd1,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x80,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xc4,0xd1,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0xc1,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xc4,0xd1,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0xf0,0x00,0x01,0x02] -v_cubeid_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xc4,0xd1,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0xf7,0x00,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xc4,0xd1,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x01,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x0c] +v_cubesc_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0xff,0x01,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x14] +v_cubesc_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x82,0x01,0x02] -v_cubeid_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x1c] +v_cubesc_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0xe0,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0xee,0x01,0x02] -v_cubesc_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x02,0x02] -v_cubesc_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x65,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0xfe,0x03,0x02] -v_cubesc_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x66,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x05,0x03] -v_cubesc_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x67,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0xc1,0x03] -v_cubesc_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x6a,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0xdd,0x03] -v_cubesc_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x6b,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x0d,0x04] -v_cubesc_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x6c,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0xfd,0x07] -v_cubesc_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x6d,0x04,0x0e,0x04] +v_cubesc_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0x22] -v_cubesc_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x6e,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0x42] -v_cubesc_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x6f,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0x82] -v_cubesc_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x7b,0x04,0x0e,0x04] +v_cubesc_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0xe2] -v_cubesc_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x7c,0x04,0x0e,0x04] +v_cubesc_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xc5,0xd1,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x7e,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xc5,0xd1,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x7f,0x04,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xc5,0xd1,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0xfd,0x04,0x0e,0x04] +v_cubesc_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xc5,0xd1,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x05,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xc5,0xd1,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0xff,0x05,0x0e,0x04] +v_cubesc_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0x0a] -v_cubesc_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0xfe,0x0f,0x04] +v_cubesc_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0x12] -v_cubesc_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0xfe,0x07] +v_cubesc_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x00,0x01,0x1a] -v_cubesc_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x24] +v_cubetc_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x44] +v_cubetc_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xc6,0xd1,0x01,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x84] +v_cubetc_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x65,0x00,0x01,0x02] -v_cubesc_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0xe4] +v_cubetc_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x66,0x00,0x01,0x02] -v_cubesc_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xc5,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x67,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xc5,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x6a,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xc5,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x6b,0x00,0x01,0x02] -v_cubesc_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xc5,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x6c,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xc5,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x6d,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x0c] +v_cubetc_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x6e,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x14] +v_cubetc_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x6f,0x00,0x01,0x02] -v_cubesc_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x1c] +v_cubetc_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x7b,0x00,0x01,0x02] -v_cubetc_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x7c,0x00,0x01,0x02] -v_cubetc_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x7e,0x00,0x01,0x02] -v_cubetc_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x65,0x04,0x0e,0x04] +v_cubetc_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x7f,0x00,0x01,0x02] -v_cubetc_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x66,0x04,0x0e,0x04] +v_cubetc_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x80,0x00,0x01,0x02] -v_cubetc_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x67,0x04,0x0e,0x04] +v_cubetc_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0xc1,0x00,0x01,0x02] -v_cubetc_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x6a,0x04,0x0e,0x04] +v_cubetc_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0xf0,0x00,0x01,0x02] -v_cubetc_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x6b,0x04,0x0e,0x04] +v_cubetc_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0xf7,0x00,0x01,0x02] -v_cubetc_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x6c,0x04,0x0e,0x04] +v_cubetc_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x01,0x01,0x02] -v_cubetc_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x6d,0x04,0x0e,0x04] +v_cubetc_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0xff,0x01,0x01,0x02] -v_cubetc_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x6e,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x82,0x01,0x02] -v_cubetc_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x6f,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0xe0,0x01,0x02] -v_cubetc_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x7b,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0xee,0x01,0x02] -v_cubetc_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x7c,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x02,0x02] -v_cubetc_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x7e,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0xfe,0x03,0x02] -v_cubetc_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x7f,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x05,0x03] -v_cubetc_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0xfd,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0xc1,0x03] -v_cubetc_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x05,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0xdd,0x03] -v_cubetc_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0xff,0x05,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x0d,0x04] -v_cubetc_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0xfe,0x0f,0x04] +v_cubetc_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0xfd,0x07] -v_cubetc_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0xfe,0x07] +v_cubetc_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0x22] -v_cubetc_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x24] +v_cubetc_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0x42] -v_cubetc_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x44] +v_cubetc_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0x82] -v_cubetc_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x84] +v_cubetc_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0xe2] -v_cubetc_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0xe4] +v_cubetc_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xc6,0xd1,0x01,0x00,0x01,0x02] -v_cubetc_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xc6,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xc6,0xd1,0x01,0x00,0x01,0x02] -v_cubetc_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xc6,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xc6,0xd1,0x01,0x00,0x01,0x02] -v_cubetc_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xc6,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xc6,0xd1,0x01,0x00,0x01,0x02] -v_cubetc_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xc6,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xc6,0xd1,0x01,0x00,0x01,0x02] -v_cubetc_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xc6,0xd1,0x01,0x04,0x0e,0x04] +v_cubetc_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0x0a] -v_cubetc_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x0c] +v_cubetc_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0x12] -v_cubetc_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x14] +v_cubetc_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x00,0x01,0x1a] -v_cubetc_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x1c] +v_cubema_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0x02] -v_cubema_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x04] +v_cubema_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xc7,0xd1,0x01,0x00,0x01,0x02] -v_cubema_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x65,0x00,0x01,0x02] -v_cubema_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x65,0x04,0x0e,0x04] +v_cubema_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x66,0x00,0x01,0x02] -v_cubema_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x66,0x04,0x0e,0x04] +v_cubema_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x67,0x00,0x01,0x02] -v_cubema_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x67,0x04,0x0e,0x04] +v_cubema_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x6a,0x00,0x01,0x02] -v_cubema_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x6a,0x04,0x0e,0x04] +v_cubema_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x6b,0x00,0x01,0x02] -v_cubema_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x6b,0x04,0x0e,0x04] +v_cubema_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x6c,0x00,0x01,0x02] -v_cubema_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x6c,0x04,0x0e,0x04] +v_cubema_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x6d,0x00,0x01,0x02] -v_cubema_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x6d,0x04,0x0e,0x04] +v_cubema_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x6e,0x00,0x01,0x02] -v_cubema_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x6e,0x04,0x0e,0x04] +v_cubema_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x6f,0x00,0x01,0x02] -v_cubema_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x6f,0x04,0x0e,0x04] +v_cubema_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x7b,0x00,0x01,0x02] -v_cubema_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x7b,0x04,0x0e,0x04] +v_cubema_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x7c,0x00,0x01,0x02] -v_cubema_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x7c,0x04,0x0e,0x04] +v_cubema_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x7e,0x00,0x01,0x02] -v_cubema_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x7e,0x04,0x0e,0x04] +v_cubema_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x7f,0x00,0x01,0x02] -v_cubema_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x7f,0x04,0x0e,0x04] +v_cubema_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x80,0x00,0x01,0x02] -v_cubema_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0xfd,0x04,0x0e,0x04] +v_cubema_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0xc1,0x00,0x01,0x02] -v_cubema_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x05,0x0e,0x04] +v_cubema_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0xf0,0x00,0x01,0x02] -v_cubema_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0xff,0x05,0x0e,0x04] +v_cubema_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0xf7,0x00,0x01,0x02] -v_cubema_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0xfe,0x0f,0x04] +v_cubema_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x01,0x01,0x02] -v_cubema_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0xfe,0x07] +v_cubema_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0xff,0x01,0x01,0x02] -v_cubema_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x24] +v_cubema_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x82,0x01,0x02] -v_cubema_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x44] +v_cubema_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0xe0,0x01,0x02] -v_cubema_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x84] +v_cubema_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0xee,0x01,0x02] -v_cubema_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0xe4] +v_cubema_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x02,0x02] -v_cubema_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xc7,0xd1,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0xfe,0x03,0x02] -v_cubema_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xc7,0xd1,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x05,0x03] -v_cubema_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xc7,0xd1,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0xc1,0x03] -v_cubema_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xc7,0xd1,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0xdd,0x03] -v_cubema_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xc7,0xd1,0x01,0x04,0x0e,0x04] +v_cubema_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x0d,0x04] -v_cubema_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x0c] +v_cubema_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0xfd,0x07] -v_cubema_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x14] +v_cubema_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0x22] -v_cubema_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x1c] +v_cubema_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0x42] + +v_cubema_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0x82] + +v_cubema_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0xe2] + +v_cubema_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xc7,0xd1,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xc7,0xd1,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xc7,0xd1,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xc7,0xd1,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xc7,0xd1,0x01,0x00,0x01,0x02] + +v_cubema_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0x0a] + +v_cubema_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0x12] + +v_cubema_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x00,0x01,0x1a] v_bfe_u32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x00,0x01,0x02] @@ -40835,182 +41799,248 @@ v_bfi_b32 v5, s1, 0, v3 v_bfi_b32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xca,0xd1,0x01,0x00,0xfd,0x07] -v_fma_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0x02] + +v_fma_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xcb,0xd1,0x01,0x00,0x01,0x02] + +v_fma_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x65,0x00,0x01,0x02] + +v_fma_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x66,0x00,0x01,0x02] + +v_fma_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x67,0x00,0x01,0x02] + +v_fma_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x6a,0x00,0x01,0x02] + +v_fma_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x6b,0x00,0x01,0x02] + +v_fma_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x6c,0x00,0x01,0x02] + +v_fma_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x6d,0x00,0x01,0x02] -v_fma_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x6e,0x00,0x01,0x02] -v_fma_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x65,0x04,0x0e,0x04] +v_fma_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x6f,0x00,0x01,0x02] -v_fma_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x66,0x04,0x0e,0x04] +v_fma_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x7b,0x00,0x01,0x02] -v_fma_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x67,0x04,0x0e,0x04] +v_fma_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x7c,0x00,0x01,0x02] -v_fma_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x6a,0x04,0x0e,0x04] +v_fma_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x7e,0x00,0x01,0x02] -v_fma_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x6b,0x04,0x0e,0x04] +v_fma_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x7f,0x00,0x01,0x02] -v_fma_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x6c,0x04,0x0e,0x04] +v_fma_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x80,0x00,0x01,0x02] -v_fma_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x6d,0x04,0x0e,0x04] +v_fma_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0xc1,0x00,0x01,0x02] -v_fma_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x6e,0x04,0x0e,0x04] +v_fma_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0xf0,0x00,0x01,0x02] -v_fma_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x6f,0x04,0x0e,0x04] +v_fma_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0xf7,0x00,0x01,0x02] -v_fma_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x7b,0x04,0x0e,0x04] +v_fma_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x01,0x01,0x02] -v_fma_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x7c,0x04,0x0e,0x04] +v_fma_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0xff,0x01,0x01,0x02] -v_fma_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x7e,0x04,0x0e,0x04] +v_fma_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x82,0x01,0x02] -v_fma_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x7f,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0xe0,0x01,0x02] -v_fma_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0xfd,0x04,0x0e,0x04] +v_fma_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0xee,0x01,0x02] -v_fma_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x05,0x0e,0x04] +v_fma_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x02,0x02] -v_fma_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0xff,0x05,0x0e,0x04] +v_fma_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0xfe,0x03,0x02] -v_fma_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0xfe,0x0f,0x04] +v_fma_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x05,0x03] -v_fma_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0xfe,0x07] +v_fma_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0xc1,0x03] -v_fma_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x24] +v_fma_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0xdd,0x03] -v_fma_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x44] +v_fma_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x0d,0x04] -v_fma_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x84] +v_fma_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0xfd,0x07] -v_fma_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0xe4] +v_fma_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0x22] -v_fma_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xcb,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0x42] -v_fma_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xcb,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0x82] -v_fma_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xcb,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0xe2] -v_fma_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xcb,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xcb,0xd1,0x01,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xcb,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xcb,0xd1,0x01,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x0c] +v_fma_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xcb,0xd1,0x01,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x14] +v_fma_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xcb,0xd1,0x01,0x00,0x01,0x02] -v_fma_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x1c] +v_fma_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xcb,0xd1,0x01,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0x0a] -v_fma_f64 v[254:255], s[2:3], v[2:3], v[3:4] -// CHECK: [0xfe,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0x12] -v_fma_f64 v[5:6], s[4:5], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x04,0x04,0x0e,0x04] +v_fma_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x00,0x01,0x1a] -v_fma_f64 v[5:6], s[100:101], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x64,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0x02] -v_fma_f64 v[5:6], flat_scratch, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x66,0x04,0x0e,0x04] +v_fma_f64 v[254:255], s[2:3], 0, 0 +// CHECK: [0xfe,0x00,0xcc,0xd1,0x02,0x00,0x01,0x02] -v_fma_f64 v[5:6], vcc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x6a,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[4:5], 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x04,0x00,0x01,0x02] -v_fma_f64 v[5:6], tba, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x6c,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[100:101], 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x64,0x00,0x01,0x02] -v_fma_f64 v[5:6], tma, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x6e,0x04,0x0e,0x04] +v_fma_f64 v[5:6], flat_scratch, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x66,0x00,0x01,0x02] -v_fma_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x7a,0x04,0x0e,0x04] +v_fma_f64 v[5:6], vcc, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x6a,0x00,0x01,0x02] -v_fma_f64 v[5:6], exec, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x7e,0x04,0x0e,0x04] +v_fma_f64 v[5:6], tba, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x6c,0x00,0x01,0x02] -v_fma_f64 v[5:6], scc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0xfd,0x04,0x0e,0x04] +v_fma_f64 v[5:6], tma, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x6e,0x00,0x01,0x02] -v_fma_f64 v[5:6], v[1:2], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x01,0x05,0x0e,0x04] +v_fma_f64 v[5:6], ttmp[10:11], 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x7a,0x00,0x01,0x02] -v_fma_f64 v[5:6], v[254:255], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0xfe,0x05,0x0e,0x04] +v_fma_f64 v[5:6], exec, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x7e,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[254:255], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0xfc,0x0f,0x04] +v_fma_f64 v[5:6], 0, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x80,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[254:255] -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0xfa,0x07] +v_fma_f64 v[5:6], -1, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0xc1,0x00,0x01,0x02] -v_fma_f64 v[5:6], -s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x24] +v_fma_f64 v[5:6], 0.5, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0xf0,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], -v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x44] +v_fma_f64 v[5:6], -4.0, 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0xf7,0x00,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x84] +v_fma_f64 v[5:6], v[1:2], 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x01,0x01,0x01,0x02] -v_fma_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0xe4] +v_fma_f64 v[5:6], v[254:255], 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0xfe,0x01,0x01,0x02] -v_fma_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4] -// CHECK: [0x05,0x01,0xcc,0xd1,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], -1, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x82,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4] -// CHECK: [0x05,0x02,0xcc,0xd1,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], 0.5, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0xe0,0x01,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], |v[3:4]| -// CHECK: [0x05,0x04,0xcc,0xd1,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], -4.0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0xee,0x01,0x02] -v_fma_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]| -// CHECK: [0x05,0x07,0xcc,0xd1,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], v[2:3], 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x02,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp -// CHECK: [0x05,0x80,0xcc,0xd1,0x02,0x04,0x0e,0x04] +v_fma_f64 v[5:6], s[2:3], v[254:255], 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0xfc,0x03,0x02] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2 -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x0c] +v_fma_f64 v[5:6], s[2:3], 0, -1 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x05,0x03] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4 -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x14] +v_fma_f64 v[5:6], s[2:3], 0, 0.5 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0xc1,0x03] -v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2 -// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x1c] +v_fma_f64 v[5:6], s[2:3], 0, -4.0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0xdd,0x03] + +v_fma_f64 v[5:6], s[2:3], 0, v[3:4] +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x0d,0x04] + +v_fma_f64 v[5:6], s[2:3], 0, v[254:255] +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0xf9,0x07] + +v_fma_f64 v[5:6], -s[2:3], 0, 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0x22] + +v_fma_f64 v[5:6], s[2:3], neg(0), 0 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0x42] + +v_fma_f64 v[5:6], s[2:3], 0, neg(0) +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0x82] + +v_fma_f64 v[5:6], -s[2:3], neg(0), neg(0) +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0xe2] + +v_fma_f64 v[5:6], |s[2:3]|, 0, 0 +// CHECK: [0x05,0x01,0xcc,0xd1,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], s[2:3], |0|, 0 +// CHECK: [0x05,0x02,0xcc,0xd1,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], s[2:3], 0, |0| +// CHECK: [0x05,0x04,0xcc,0xd1,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], |s[2:3]|, |0|, |0| +// CHECK: [0x05,0x07,0xcc,0xd1,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], s[2:3], 0, 0 clamp +// CHECK: [0x05,0x80,0xcc,0xd1,0x02,0x00,0x01,0x02] + +v_fma_f64 v[5:6], s[2:3], 0, 0 mul:2 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0x0a] + +v_fma_f64 v[5:6], s[2:3], 0, 0 mul:4 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0x12] + +v_fma_f64 v[5:6], s[2:3], 0, 0 div:2 +// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x00,0x01,0x1a] v_lerp_u8 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x00,0x01,0x02] @@ -41291,101 +42321,134 @@ v_alignbyte_b32 v5, s1, 0, v3 v_alignbyte_b32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x00,0xfd,0x07] -v_min3_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0x02] + +v_min3_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xd0,0xd1,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x65,0x00,0x01,0x02] + +v_min3_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x66,0x00,0x01,0x02] -v_min3_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x67,0x00,0x01,0x02] -v_min3_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x65,0x04,0x0e,0x04] +v_min3_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x6a,0x00,0x01,0x02] -v_min3_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x66,0x04,0x0e,0x04] +v_min3_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x6b,0x00,0x01,0x02] -v_min3_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x67,0x04,0x0e,0x04] +v_min3_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x6c,0x00,0x01,0x02] -v_min3_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x6a,0x04,0x0e,0x04] +v_min3_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x6d,0x00,0x01,0x02] -v_min3_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x6b,0x04,0x0e,0x04] +v_min3_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x6e,0x00,0x01,0x02] -v_min3_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x6c,0x04,0x0e,0x04] +v_min3_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x6f,0x00,0x01,0x02] -v_min3_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x6d,0x04,0x0e,0x04] +v_min3_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x7b,0x00,0x01,0x02] -v_min3_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x6e,0x04,0x0e,0x04] +v_min3_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x7c,0x00,0x01,0x02] -v_min3_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x6f,0x04,0x0e,0x04] +v_min3_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x7e,0x00,0x01,0x02] -v_min3_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x7b,0x04,0x0e,0x04] +v_min3_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x7f,0x00,0x01,0x02] -v_min3_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x7c,0x04,0x0e,0x04] +v_min3_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x80,0x00,0x01,0x02] -v_min3_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x7e,0x04,0x0e,0x04] +v_min3_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0xc1,0x00,0x01,0x02] -v_min3_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x7f,0x04,0x0e,0x04] +v_min3_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0xf0,0x00,0x01,0x02] -v_min3_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0xfd,0x04,0x0e,0x04] +v_min3_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0xf7,0x00,0x01,0x02] -v_min3_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x05,0x0e,0x04] +v_min3_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x01,0x01,0x02] -v_min3_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0xff,0x05,0x0e,0x04] +v_min3_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0xff,0x01,0x01,0x02] -v_min3_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0xfe,0x0f,0x04] +v_min3_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x82,0x01,0x02] -v_min3_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0xfe,0x07] +v_min3_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0xe0,0x01,0x02] -v_min3_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x24] +v_min3_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0xee,0x01,0x02] -v_min3_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x44] +v_min3_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x02,0x02] -v_min3_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x84] +v_min3_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0xfe,0x03,0x02] -v_min3_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0xe4] +v_min3_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x05,0x03] -v_min3_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xd0,0xd1,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0xc1,0x03] -v_min3_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xd0,0xd1,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0xdd,0x03] -v_min3_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xd0,0xd1,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x0d,0x04] -v_min3_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xd0,0xd1,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0xfd,0x07] -v_min3_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xd0,0xd1,0x01,0x04,0x0e,0x04] +v_min3_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0x22] -v_min3_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x0c] +v_min3_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0x42] -v_min3_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x14] +v_min3_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0x82] -v_min3_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x1c] +v_min3_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0xe2] + +v_min3_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xd0,0xd1,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xd0,0xd1,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xd0,0xd1,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xd0,0xd1,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xd0,0xd1,0x01,0x00,0x01,0x02] + +v_min3_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0x0a] + +v_min3_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0x12] + +v_min3_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x00,0x01,0x1a] v_min3_i32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x00,0x01,0x02] @@ -41573,101 +42636,134 @@ v_min3_u32 v5, s1, 0, v3 v_min3_u32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x00,0xfd,0x07] -v_max3_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0x02] + +v_max3_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xd3,0xd1,0x01,0x00,0x01,0x02] + +v_max3_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x65,0x00,0x01,0x02] + +v_max3_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x66,0x00,0x01,0x02] + +v_max3_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x67,0x00,0x01,0x02] + +v_max3_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x6a,0x00,0x01,0x02] + +v_max3_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x6b,0x00,0x01,0x02] + +v_max3_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x6c,0x00,0x01,0x02] + +v_max3_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x6d,0x00,0x01,0x02] + +v_max3_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x6e,0x00,0x01,0x02] -v_max3_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x6f,0x00,0x01,0x02] -v_max3_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x65,0x04,0x0e,0x04] +v_max3_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x7b,0x00,0x01,0x02] -v_max3_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x66,0x04,0x0e,0x04] +v_max3_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x7c,0x00,0x01,0x02] -v_max3_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x67,0x04,0x0e,0x04] +v_max3_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x7e,0x00,0x01,0x02] -v_max3_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x6a,0x04,0x0e,0x04] +v_max3_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x7f,0x00,0x01,0x02] -v_max3_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x6b,0x04,0x0e,0x04] +v_max3_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x80,0x00,0x01,0x02] -v_max3_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x6c,0x04,0x0e,0x04] +v_max3_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0xc1,0x00,0x01,0x02] -v_max3_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x6d,0x04,0x0e,0x04] +v_max3_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0xf0,0x00,0x01,0x02] -v_max3_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x6e,0x04,0x0e,0x04] +v_max3_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0xf7,0x00,0x01,0x02] -v_max3_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x6f,0x04,0x0e,0x04] +v_max3_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x01,0x01,0x02] -v_max3_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x7b,0x04,0x0e,0x04] +v_max3_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0xff,0x01,0x01,0x02] -v_max3_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x7c,0x04,0x0e,0x04] +v_max3_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x82,0x01,0x02] -v_max3_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x7e,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0xe0,0x01,0x02] -v_max3_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x7f,0x04,0x0e,0x04] +v_max3_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0xee,0x01,0x02] -v_max3_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0xfd,0x04,0x0e,0x04] +v_max3_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x02,0x02] -v_max3_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x05,0x0e,0x04] +v_max3_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0xfe,0x03,0x02] -v_max3_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0xff,0x05,0x0e,0x04] +v_max3_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x05,0x03] -v_max3_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0xfe,0x0f,0x04] +v_max3_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0xc1,0x03] -v_max3_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0xfe,0x07] +v_max3_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0xdd,0x03] -v_max3_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x24] +v_max3_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x0d,0x04] -v_max3_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x44] +v_max3_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0xfd,0x07] -v_max3_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x84] +v_max3_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0x22] -v_max3_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0xe4] +v_max3_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0x42] -v_max3_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xd3,0xd1,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0x82] -v_max3_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xd3,0xd1,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0xe2] -v_max3_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xd3,0xd1,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xd3,0xd1,0x01,0x00,0x01,0x02] -v_max3_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xd3,0xd1,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xd3,0xd1,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xd3,0xd1,0x01,0x04,0x0e,0x04] +v_max3_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xd3,0xd1,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x0c] +v_max3_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xd3,0xd1,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x14] +v_max3_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xd3,0xd1,0x01,0x00,0x01,0x02] -v_max3_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x1c] +v_max3_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0x0a] + +v_max3_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0x12] + +v_max3_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x00,0x01,0x1a] v_max3_i32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x00,0x01,0x02] @@ -41855,101 +42951,134 @@ v_max3_u32 v5, s1, 0, v3 v_max3_u32 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x00,0xfd,0x07] -v_med3_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0x02] + +v_med3_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xd6,0xd1,0x01,0x00,0x01,0x02] + +v_med3_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x65,0x00,0x01,0x02] + +v_med3_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x66,0x00,0x01,0x02] + +v_med3_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x67,0x00,0x01,0x02] + +v_med3_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x6a,0x00,0x01,0x02] + +v_med3_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x6b,0x00,0x01,0x02] + +v_med3_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x6c,0x00,0x01,0x02] -v_med3_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x6d,0x00,0x01,0x02] -v_med3_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x65,0x04,0x0e,0x04] +v_med3_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x6e,0x00,0x01,0x02] -v_med3_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x66,0x04,0x0e,0x04] +v_med3_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x6f,0x00,0x01,0x02] -v_med3_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x67,0x04,0x0e,0x04] +v_med3_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x7b,0x00,0x01,0x02] -v_med3_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x6a,0x04,0x0e,0x04] +v_med3_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x7c,0x00,0x01,0x02] -v_med3_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x6b,0x04,0x0e,0x04] +v_med3_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x7e,0x00,0x01,0x02] -v_med3_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x6c,0x04,0x0e,0x04] +v_med3_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x7f,0x00,0x01,0x02] -v_med3_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x6d,0x04,0x0e,0x04] +v_med3_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x80,0x00,0x01,0x02] -v_med3_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x6e,0x04,0x0e,0x04] +v_med3_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0xc1,0x00,0x01,0x02] -v_med3_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x6f,0x04,0x0e,0x04] +v_med3_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0xf0,0x00,0x01,0x02] -v_med3_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x7b,0x04,0x0e,0x04] +v_med3_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0xf7,0x00,0x01,0x02] -v_med3_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x7c,0x04,0x0e,0x04] +v_med3_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x01,0x01,0x02] -v_med3_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x7e,0x04,0x0e,0x04] +v_med3_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0xff,0x01,0x01,0x02] -v_med3_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x7f,0x04,0x0e,0x04] +v_med3_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x82,0x01,0x02] -v_med3_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0xfd,0x04,0x0e,0x04] +v_med3_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0xe0,0x01,0x02] -v_med3_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x05,0x0e,0x04] +v_med3_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0xee,0x01,0x02] -v_med3_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0xff,0x05,0x0e,0x04] +v_med3_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x02,0x02] -v_med3_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0xfe,0x0f,0x04] +v_med3_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0xfe,0x03,0x02] -v_med3_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0xfe,0x07] +v_med3_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x05,0x03] -v_med3_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x24] +v_med3_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0xc1,0x03] -v_med3_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x44] +v_med3_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0xdd,0x03] -v_med3_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x84] +v_med3_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x0d,0x04] -v_med3_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0xe4] +v_med3_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0xfd,0x07] -v_med3_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xd6,0xd1,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0x22] -v_med3_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xd6,0xd1,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0x42] -v_med3_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xd6,0xd1,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0x82] -v_med3_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xd6,0xd1,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0xe2] -v_med3_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xd6,0xd1,0x01,0x04,0x0e,0x04] +v_med3_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xd6,0xd1,0x01,0x00,0x01,0x02] -v_med3_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x0c] +v_med3_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xd6,0xd1,0x01,0x00,0x01,0x02] -v_med3_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x14] +v_med3_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xd6,0xd1,0x01,0x00,0x01,0x02] -v_med3_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x1c] +v_med3_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xd6,0xd1,0x01,0x00,0x01,0x02] + +v_med3_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xd6,0xd1,0x01,0x00,0x01,0x02] + +v_med3_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0x0a] + +v_med3_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0x12] + +v_med3_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x00,0x01,0x1a] v_med3_i32 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x00,0x01,0x02] @@ -42554,8 +43683,17 @@ v_cvt_pk_u8_f32 v5, exec_lo, 0, 0 v_cvt_pk_u8_f32 v5, exec_hi, 0, 0 // CHECK: [0x05,0x00,0xdd,0xd1,0x7f,0x00,0x01,0x02] -v_cvt_pk_u8_f32 v5, scc, 0, 0 -// CHECK: [0x05,0x00,0xdd,0xd1,0xfd,0x00,0x01,0x02] +v_cvt_pk_u8_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xdd,0xd1,0x80,0x00,0x01,0x02] + +v_cvt_pk_u8_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xdd,0xd1,0xc1,0x00,0x01,0x02] + +v_cvt_pk_u8_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xdd,0xd1,0xf0,0x00,0x01,0x02] + +v_cvt_pk_u8_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xdd,0xd1,0xf7,0x00,0x01,0x02] v_cvt_pk_u8_f32 v5, v1, 0, 0 // CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x01,0x01,0x02] @@ -42602,182 +43740,248 @@ v_cvt_pk_u8_f32 v5, |s1|, 0, 0 v_cvt_pk_u8_f32 v5, s1, 0, 0 clamp // CHECK: [0x05,0x80,0xdd,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0x02] + +v_div_fixup_f32 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xde,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f32 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xde,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x65,0x00,0x01,0x02] -v_div_fixup_f32 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x65,0x04,0x0e,0x04] +v_div_fixup_f32 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x66,0x00,0x01,0x02] -v_div_fixup_f32 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x66,0x04,0x0e,0x04] +v_div_fixup_f32 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x67,0x00,0x01,0x02] -v_div_fixup_f32 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x67,0x04,0x0e,0x04] +v_div_fixup_f32 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x6a,0x00,0x01,0x02] -v_div_fixup_f32 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x6a,0x04,0x0e,0x04] +v_div_fixup_f32 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x6b,0x00,0x01,0x02] -v_div_fixup_f32 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x6b,0x04,0x0e,0x04] +v_div_fixup_f32 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x6c,0x00,0x01,0x02] -v_div_fixup_f32 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x6c,0x04,0x0e,0x04] +v_div_fixup_f32 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x6d,0x00,0x01,0x02] -v_div_fixup_f32 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x6d,0x04,0x0e,0x04] +v_div_fixup_f32 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x6e,0x00,0x01,0x02] -v_div_fixup_f32 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x6e,0x04,0x0e,0x04] +v_div_fixup_f32 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x6f,0x00,0x01,0x02] -v_div_fixup_f32 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x6f,0x04,0x0e,0x04] +v_div_fixup_f32 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x7b,0x00,0x01,0x02] -v_div_fixup_f32 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x7b,0x04,0x0e,0x04] +v_div_fixup_f32 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x7c,0x00,0x01,0x02] -v_div_fixup_f32 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x7c,0x04,0x0e,0x04] +v_div_fixup_f32 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x7e,0x00,0x01,0x02] -v_div_fixup_f32 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x7e,0x04,0x0e,0x04] +v_div_fixup_f32 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x7f,0x00,0x01,0x02] -v_div_fixup_f32 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x7f,0x04,0x0e,0x04] +v_div_fixup_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x80,0x00,0x01,0x02] -v_div_fixup_f32 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0xfd,0x04,0x0e,0x04] +v_div_fixup_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0xc1,0x00,0x01,0x02] -v_div_fixup_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x05,0x0e,0x04] +v_div_fixup_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0xf0,0x00,0x01,0x02] -v_div_fixup_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0xff,0x05,0x0e,0x04] +v_div_fixup_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0xf7,0x00,0x01,0x02] -v_div_fixup_f32 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0xfe,0x0f,0x04] +v_div_fixup_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x01,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0xfe,0x07] +v_div_fixup_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0xff,0x01,0x01,0x02] -v_div_fixup_f32 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x24] +v_div_fixup_f32 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x82,0x01,0x02] -v_div_fixup_f32 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x44] +v_div_fixup_f32 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0xe0,0x01,0x02] -v_div_fixup_f32 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x84] +v_div_fixup_f32 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0xee,0x01,0x02] -v_div_fixup_f32 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0xe4] +v_div_fixup_f32 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x02,0x02] -v_div_fixup_f32 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xde,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0xfe,0x03,0x02] -v_div_fixup_f32 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xde,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x05,0x03] -v_div_fixup_f32 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xde,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0xc1,0x03] -v_div_fixup_f32 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xde,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0xdd,0x03] -v_div_fixup_f32 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xde,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x0d,0x04] -v_div_fixup_f32 v5, s1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x0c] +v_div_fixup_f32 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0xfd,0x07] -v_div_fixup_f32 v5, s1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x14] +v_div_fixup_f32 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0x22] -v_div_fixup_f32 v5, s1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x1c] +v_div_fixup_f32 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0x42] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0x82] -v_div_fixup_f64 v[254:255], s[2:3], v[2:3], v[3:4] -// CHECK: [0xfe,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x04] +v_div_fixup_f32 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0xe2] -v_div_fixup_f64 v[5:6], s[4:5], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x04,0x04,0x0e,0x04] +v_div_fixup_f32 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xde,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[100:101], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x64,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xde,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], flat_scratch, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x66,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xde,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], vcc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x6a,0x04,0x0e,0x04] +v_div_fixup_f32 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xde,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], tba, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x6c,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xde,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], tma, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x6e,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0x0a] -v_div_fixup_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x7a,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0x12] -v_div_fixup_f64 v[5:6], exec, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x7e,0x04,0x0e,0x04] +v_div_fixup_f32 v5, s1, 0, 0 div:2 +// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x00,0x01,0x1a] -v_div_fixup_f64 v[5:6], scc, v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0xfd,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], v[1:2], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x01,0x05,0x0e,0x04] +v_div_fixup_f64 v[254:255], s[2:3], 0, 0 +// CHECK: [0xfe,0x00,0xdf,0xd1,0x02,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], v[254:255], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0xfe,0x05,0x0e,0x04] +v_div_fixup_f64 v[5:6], s[4:5], 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x04,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[254:255], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0xfc,0x0f,0x04] +v_div_fixup_f64 v[5:6], s[100:101], 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x64,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[254:255] -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0xfa,0x07] +v_div_fixup_f64 v[5:6], flat_scratch, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x66,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], -s[2:3], v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x24] +v_div_fixup_f64 v[5:6], vcc, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x6a,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], -v[2:3], v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x44] +v_div_fixup_f64 v[5:6], tba, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x6c,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x84] +v_div_fixup_f64 v[5:6], tma, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x6e,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4] -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0xe4] +v_div_fixup_f64 v[5:6], ttmp[10:11], 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x7a,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4] -// CHECK: [0x05,0x01,0xdf,0xd1,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], exec, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x7e,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4] -// CHECK: [0x05,0x02,0xdf,0xd1,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], 0, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x80,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], |v[3:4]| -// CHECK: [0x05,0x04,0xdf,0xd1,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], -1, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0xc1,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]| -// CHECK: [0x05,0x07,0xdf,0xd1,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], 0.5, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0xf0,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp -// CHECK: [0x05,0x80,0xdf,0xd1,0x02,0x04,0x0e,0x04] +v_div_fixup_f64 v[5:6], -4.0, 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0xf7,0x00,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2 -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x0c] +v_div_fixup_f64 v[5:6], v[1:2], 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x01,0x01,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4 -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x14] +v_div_fixup_f64 v[5:6], v[254:255], 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0xfe,0x01,0x01,0x02] -v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2 -// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x1c] +v_div_fixup_f64 v[5:6], s[2:3], -1, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x82,0x01,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], 0.5, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0xe0,0x01,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], -4.0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0xee,0x01,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], v[2:3], 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x02,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], v[254:255], 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0xfc,0x03,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], 0, -1 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x05,0x03] + +v_div_fixup_f64 v[5:6], s[2:3], 0, 0.5 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0xc1,0x03] + +v_div_fixup_f64 v[5:6], s[2:3], 0, -4.0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0xdd,0x03] + +v_div_fixup_f64 v[5:6], s[2:3], 0, v[3:4] +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x0d,0x04] + +v_div_fixup_f64 v[5:6], s[2:3], 0, v[254:255] +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0xf9,0x07] + +v_div_fixup_f64 v[5:6], -s[2:3], 0, 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0x22] + +v_div_fixup_f64 v[5:6], s[2:3], neg(0), 0 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0x42] + +v_div_fixup_f64 v[5:6], s[2:3], 0, neg(0) +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0x82] + +v_div_fixup_f64 v[5:6], -s[2:3], neg(0), neg(0) +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0xe2] + +v_div_fixup_f64 v[5:6], |s[2:3]|, 0, 0 +// CHECK: [0x05,0x01,0xdf,0xd1,0x02,0x00,0x01,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], |0|, 0 +// CHECK: [0x05,0x02,0xdf,0xd1,0x02,0x00,0x01,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], 0, |0| +// CHECK: [0x05,0x04,0xdf,0xd1,0x02,0x00,0x01,0x02] + +v_div_fixup_f64 v[5:6], |s[2:3]|, |0|, |0| +// CHECK: [0x05,0x07,0xdf,0xd1,0x02,0x00,0x01,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 clamp +// CHECK: [0x05,0x80,0xdf,0xd1,0x02,0x00,0x01,0x02] + +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 mul:2 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0x0a] + +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 mul:4 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0x12] + +v_div_fixup_f64 v[5:6], s[2:3], 0, 0 div:2 +// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x00,0x01,0x1a] v_div_scale_f32 v5, vcc, s1, 0, 0 // CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x00,0x01,0x02] @@ -42950,56 +44154,92 @@ v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[3:4] v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[254:255] // CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x00,0xf9,0x07] -v_div_fmas_f32 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v255, 0, 0, 0 +// CHECK: [0xff,0x00,0xe2,0xd1,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0xc1,0x00,0x01,0x02] + +v_div_fmas_f32 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0xf0,0x00,0x01,0x02] + +v_div_fmas_f32 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0xf7,0x00,0x01,0x02] + +v_div_fmas_f32 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x01,0x01,0x02] + +v_div_fmas_f32 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0xff,0x01,0x01,0x02] -v_div_fmas_f32 v255, v1, v2, v3 -// CHECK: [0xff,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, -1, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x82,0x01,0x02] -v_div_fmas_f32 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xe2,0xd1,0xff,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, 0.5, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0xe0,0x01,0x02] -v_div_fmas_f32 v5, v1, v255, v3 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0xff,0x0f,0x04] +v_div_fmas_f32 v5, 0, -4.0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0xee,0x01,0x02] -v_div_fmas_f32 v5, v1, v2, v255 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0xfe,0x07] +v_div_fmas_f32 v5, 0, v2, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x04,0x02,0x02] -v_div_fmas_f32 v5, -v1, v2, v3 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x24] +v_div_fmas_f32 v5, 0, v255, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0xfe,0x03,0x02] -v_div_fmas_f32 v5, v1, -v2, v3 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x44] +v_div_fmas_f32 v5, 0, 0, -1 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x05,0x03] -v_div_fmas_f32 v5, v1, v2, -v3 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x84] +v_div_fmas_f32 v5, 0, 0, 0.5 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0xc1,0x03] -v_div_fmas_f32 v5, -v1, -v2, -v3 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0xe4] +v_div_fmas_f32 v5, 0, 0, -4.0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0xdd,0x03] -v_div_fmas_f32 v5, |v1|, v2, v3 -// CHECK: [0x05,0x01,0xe2,0xd1,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, 0, v3 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x0d,0x04] -v_div_fmas_f32 v5, v1, |v2|, v3 -// CHECK: [0x05,0x02,0xe2,0xd1,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, 0, v255 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0xfd,0x07] -v_div_fmas_f32 v5, v1, v2, |v3| -// CHECK: [0x05,0x04,0xe2,0xd1,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, neg(0), 0, 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0x22] -v_div_fmas_f32 v5, |v1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xe2,0xd1,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, neg(0), 0 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0x42] -v_div_fmas_f32 v5, v1, v2, v3 clamp -// CHECK: [0x05,0x80,0xe2,0xd1,0x01,0x05,0x0e,0x04] +v_div_fmas_f32 v5, 0, 0, neg(0) +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0x82] -v_div_fmas_f32 v5, v1, v2, v3 mul:2 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x0c] +v_div_fmas_f32 v5, neg(0), neg(0), neg(0) +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0xe2] -v_div_fmas_f32 v5, v1, v2, v3 mul:4 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x14] +v_div_fmas_f32 v5, |0|, 0, 0 +// CHECK: [0x05,0x01,0xe2,0xd1,0x80,0x00,0x01,0x02] -v_div_fmas_f32 v5, v1, v2, v3 div:2 -// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x1c] +v_div_fmas_f32 v5, 0, |0|, 0 +// CHECK: [0x05,0x02,0xe2,0xd1,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, 0, 0, |0| +// CHECK: [0x05,0x04,0xe2,0xd1,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, |0|, |0|, |0| +// CHECK: [0x05,0x07,0xe2,0xd1,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, 0, 0, 0 clamp +// CHECK: [0x05,0x80,0xe2,0xd1,0x80,0x00,0x01,0x02] + +v_div_fmas_f32 v5, 0, 0, 0 mul:2 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0x0a] + +v_div_fmas_f32 v5, 0, 0, 0 mul:4 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0x12] + +v_div_fmas_f32 v5, 0, 0, 0 div:2 +// CHECK: [0x05,0x00,0xe2,0xd1,0x80,0x00,0x01,0x1a] v_div_fmas_f64 v[5:6], vcc, vcc, vcc // CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x01] @@ -43007,18 +44247,54 @@ v_div_fmas_f64 v[5:6], vcc, vcc, vcc v_div_fmas_f64 v[254:255], vcc, vcc, vcc // CHECK: [0xfe,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x01] +v_div_fmas_f64 v[5:6], 0, vcc, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0x80,0xd4,0xa8,0x01] + +v_div_fmas_f64 v[5:6], -1, vcc, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0xc1,0xd4,0xa8,0x01] + +v_div_fmas_f64 v[5:6], 0.5, vcc, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0xf0,0xd4,0xa8,0x01] + +v_div_fmas_f64 v[5:6], -4.0, vcc, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0xf7,0xd4,0xa8,0x01] + v_div_fmas_f64 v[5:6], v[1:2], vcc, vcc // CHECK: [0x05,0x00,0xe3,0xd1,0x01,0xd5,0xa8,0x01] v_div_fmas_f64 v[5:6], v[254:255], vcc, vcc // CHECK: [0x05,0x00,0xe3,0xd1,0xfe,0xd5,0xa8,0x01] +v_div_fmas_f64 v[5:6], vcc, 0, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0x00,0xa9,0x01] + +v_div_fmas_f64 v[5:6], vcc, -1, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0x82,0xa9,0x01] + +v_div_fmas_f64 v[5:6], vcc, 0.5, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xe0,0xa9,0x01] + +v_div_fmas_f64 v[5:6], vcc, -4.0, vcc +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xee,0xa9,0x01] + v_div_fmas_f64 v[5:6], vcc, v[2:3], vcc // CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0x04,0xaa,0x01] v_div_fmas_f64 v[5:6], vcc, v[254:255], vcc // CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xfc,0xab,0x01] +v_div_fmas_f64 v[5:6], vcc, vcc, 0 +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0x00,0x02] + +v_div_fmas_f64 v[5:6], vcc, vcc, -1 +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0x04,0x03] + +v_div_fmas_f64 v[5:6], vcc, vcc, 0.5 +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xc0,0x03] + +v_div_fmas_f64 v[5:6], vcc, vcc, -4.0 +// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xdc,0x03] + v_div_fmas_f64 v[5:6], vcc, vcc, v[3:4] // CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0x0c,0x04] @@ -43391,92 +44667,353 @@ v_mqsad_u32_u8 v[5:8], 0, v255, v[3:6] v_mqsad_u32_u8 v[5:8], 0, s2, v[252:255] // CHECK: [0x05,0x00,0xe7,0xd1,0x80,0x04,0xf0,0x07] -v_mad_f16 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[254:255], s[12:13], s1, 0, 0 +// CHECK: [0xfe,0x0c,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[14:15], s1, 0, 0 +// CHECK: [0x05,0x0e,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[100:101], s1, 0, 0 +// CHECK: [0x05,0x64,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], flat_scratch, s1, 0, 0 +// CHECK: [0x05,0x66,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], vcc, s1, 0, 0 +// CHECK: [0x05,0x6a,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], tba, s1, 0, 0 +// CHECK: [0x05,0x6c,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], tma, s1, 0, 0 +// CHECK: [0x05,0x6e,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], ttmp[10:11], s1, 0, 0 +// CHECK: [0x05,0x7a,0xe8,0xd1,0x01,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], s101, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x65,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x66,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x67,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], vcc_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x6a,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], vcc_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x6b,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tba_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x6c,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tba_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x6d,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tma_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x6e,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], tma_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x6f,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], ttmp11, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x7b,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], m0, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x7c,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], exec_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x7e,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], exec_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x7f,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], 0, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x80,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], -1, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0xc1,0x00,0x01,0x02] + +v_mad_u64_u32 v[5:6], s[12:13], 0.5, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0xf0,0x00,0x01,0x02] -v_mad_f16 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xea,0xd1,0x01,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], -4.0, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0xf7,0x00,0x01,0x02] -v_mad_f16 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x65,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], v1, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x01,0x01,0x02] -v_mad_f16 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x66,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], v255, 0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0xff,0x01,0x01,0x02] -v_mad_f16 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x67,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, -1, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x82,0x01,0x02] -v_mad_f16 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x6a,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0.5, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0xe0,0x01,0x02] -v_mad_f16 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x6b,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, -4.0, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0xee,0x01,0x02] -v_mad_f16 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x6c,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, v2, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x04,0x02,0x02] -v_mad_f16 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x6d,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, v255, 0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0xfe,0x03,0x02] -v_mad_f16 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x6e,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, -1 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x00,0x05,0x03] -v_mad_f16 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x6f,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, 0.5 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x00,0xc1,0x03] -v_mad_f16 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x7b,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, -4.0 +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x00,0xdd,0x03] -v_mad_f16 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x7c,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, v[3:4] +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x00,0x0d,0x04] -v_mad_f16 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x7e,0x04,0x0e,0x04] +v_mad_u64_u32 v[5:6], s[12:13], s1, 0, v[254:255] +// CHECK: [0x05,0x0c,0xe8,0xd1,0x01,0x00,0xf9,0x07] -v_mad_f16 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x7f,0x04,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0xfd,0x04,0x0e,0x04] +v_mad_i64_i32 v[254:255], s[12:13], s1, 0, 0 +// CHECK: [0xfe,0x0c,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x05,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[14:15], s1, 0, 0 +// CHECK: [0x05,0x0e,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0xff,0x05,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[100:101], s1, 0, 0 +// CHECK: [0x05,0x64,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0xfe,0x0f,0x04] +v_mad_i64_i32 v[5:6], flat_scratch, s1, 0, 0 +// CHECK: [0x05,0x66,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0xfe,0x07] +v_mad_i64_i32 v[5:6], vcc, s1, 0, 0 +// CHECK: [0x05,0x6a,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x24] +v_mad_i64_i32 v[5:6], tba, s1, 0, 0 +// CHECK: [0x05,0x6c,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x44] +v_mad_i64_i32 v[5:6], tma, s1, 0, 0 +// CHECK: [0x05,0x6e,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x84] +v_mad_i64_i32 v[5:6], ttmp[10:11], s1, 0, 0 +// CHECK: [0x05,0x7a,0xe9,0xd1,0x01,0x00,0x01,0x02] -v_mad_f16 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0xe4] +v_mad_i64_i32 v[5:6], s[12:13], s101, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x65,0x00,0x01,0x02] -v_mad_f16 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xea,0xd1,0x01,0x04,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[12:13], flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x66,0x00,0x01,0x02] -v_mad_f16 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xea,0xd1,0x01,0x04,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[12:13], flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x67,0x00,0x01,0x02] -v_mad_f16 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xea,0xd1,0x01,0x04,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[12:13], vcc_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x6a,0x00,0x01,0x02] -v_mad_f16 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xea,0xd1,0x01,0x04,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[12:13], vcc_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x6b,0x00,0x01,0x02] -v_mad_f16 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xea,0xd1,0x01,0x04,0x0e,0x04] +v_mad_i64_i32 v[5:6], s[12:13], tba_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x6c,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], tba_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x6d,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], tma_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x6e,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], tma_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x6f,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], ttmp11, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x7b,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], m0, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x7c,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], exec_lo, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x7e,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], exec_hi, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x7f,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], 0, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x80,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], -1, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0xc1,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], 0.5, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0xf0,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], -4.0, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0xf7,0x00,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], v1, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x01,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], v255, 0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0xff,0x01,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, -1, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x82,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0.5, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0xe0,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, -4.0, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0xee,0x01,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, v2, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x04,0x02,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, v255, 0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0xfe,0x03,0x02] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, -1 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x00,0x05,0x03] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, 0.5 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x00,0xc1,0x03] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, -4.0 +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x00,0xdd,0x03] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, v[3:4] +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x00,0x0d,0x04] + +v_mad_i64_i32 v[5:6], s[12:13], s1, 0, v[254:255] +// CHECK: [0x05,0x0c,0xe9,0xd1,0x01,0x00,0xf9,0x07] + +v_mad_f16 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0x01,0x02] + +v_mad_f16 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xea,0xd1,0x01,0x00,0x01,0x02] + +v_mad_f16 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x65,0x00,0x01,0x02] + +v_mad_f16 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x66,0x00,0x01,0x02] + +v_mad_f16 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x67,0x00,0x01,0x02] + +v_mad_f16 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x6a,0x00,0x01,0x02] + +v_mad_f16 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x6b,0x00,0x01,0x02] + +v_mad_f16 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x6c,0x00,0x01,0x02] + +v_mad_f16 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x6d,0x00,0x01,0x02] + +v_mad_f16 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x6e,0x00,0x01,0x02] + +v_mad_f16 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x6f,0x00,0x01,0x02] + +v_mad_f16 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x7b,0x00,0x01,0x02] + +v_mad_f16 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x7c,0x00,0x01,0x02] + +v_mad_f16 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x7e,0x00,0x01,0x02] + +v_mad_f16 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x7f,0x00,0x01,0x02] + +v_mad_f16 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x80,0x00,0x01,0x02] + +v_mad_f16 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0xc1,0x00,0x01,0x02] + +v_mad_f16 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0xf0,0x00,0x01,0x02] + +v_mad_f16 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0xf7,0x00,0x01,0x02] + +v_mad_f16 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x01,0x01,0x02] + +v_mad_f16 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0xff,0x01,0x01,0x02] + +v_mad_f16 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x82,0x01,0x02] + +v_mad_f16 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0xe0,0x01,0x02] + +v_mad_f16 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0xee,0x01,0x02] + +v_mad_f16 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x02,0x02] + +v_mad_f16 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0xfe,0x03,0x02] + +v_mad_f16 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0x05,0x03] + +v_mad_f16 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0xc1,0x03] + +v_mad_f16 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0xdd,0x03] + +v_mad_f16 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0x0d,0x04] + +v_mad_f16 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0xfd,0x07] + +v_mad_f16 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0x01,0x22] + +v_mad_f16 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0x01,0x42] + +v_mad_f16 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0x01,0x82] + +v_mad_f16 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x00,0x01,0xe2] + +v_mad_f16 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xea,0xd1,0x01,0x00,0x01,0x02] + +v_mad_f16 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xea,0xd1,0x01,0x00,0x01,0x02] + +v_mad_f16 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xea,0xd1,0x01,0x00,0x01,0x02] + +v_mad_f16 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xea,0xd1,0x01,0x00,0x01,0x02] + +v_mad_f16 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xea,0xd1,0x01,0x00,0x01,0x02] v_mad_u16 v5, s1, 0, 0 // CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x00,0x01,0x02] @@ -43664,257 +45201,332 @@ v_mad_i16 v5, s1, 0, v3 v_mad_i16 v5, s1, 0, v255 // CHECK: [0x05,0x00,0xec,0xd1,0x01,0x00,0xfd,0x07] -v_fma_f16 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0x01,0x02] + +v_fma_f16 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xee,0xd1,0x01,0x00,0x01,0x02] + +v_fma_f16 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x65,0x00,0x01,0x02] + +v_fma_f16 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x66,0x00,0x01,0x02] + +v_fma_f16 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x67,0x00,0x01,0x02] + +v_fma_f16 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x6a,0x00,0x01,0x02] + +v_fma_f16 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x6b,0x00,0x01,0x02] -v_fma_f16 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xee,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x6c,0x00,0x01,0x02] -v_fma_f16 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x65,0x04,0x0e,0x04] +v_fma_f16 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x6d,0x00,0x01,0x02] -v_fma_f16 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x66,0x04,0x0e,0x04] +v_fma_f16 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x6e,0x00,0x01,0x02] -v_fma_f16 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x67,0x04,0x0e,0x04] +v_fma_f16 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x6f,0x00,0x01,0x02] -v_fma_f16 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x6a,0x04,0x0e,0x04] +v_fma_f16 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x7b,0x00,0x01,0x02] -v_fma_f16 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x6b,0x04,0x0e,0x04] +v_fma_f16 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x7c,0x00,0x01,0x02] -v_fma_f16 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x6c,0x04,0x0e,0x04] +v_fma_f16 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x7e,0x00,0x01,0x02] -v_fma_f16 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x6d,0x04,0x0e,0x04] +v_fma_f16 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x7f,0x00,0x01,0x02] -v_fma_f16 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x6e,0x04,0x0e,0x04] +v_fma_f16 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x80,0x00,0x01,0x02] -v_fma_f16 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x6f,0x04,0x0e,0x04] +v_fma_f16 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0xc1,0x00,0x01,0x02] -v_fma_f16 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x7b,0x04,0x0e,0x04] +v_fma_f16 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0xf0,0x00,0x01,0x02] -v_fma_f16 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x7c,0x04,0x0e,0x04] +v_fma_f16 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0xf7,0x00,0x01,0x02] -v_fma_f16 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x7e,0x04,0x0e,0x04] +v_fma_f16 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x01,0x01,0x02] -v_fma_f16 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x7f,0x04,0x0e,0x04] +v_fma_f16 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0xff,0x01,0x01,0x02] -v_fma_f16 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0xfd,0x04,0x0e,0x04] +v_fma_f16 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x82,0x01,0x02] -v_fma_f16 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x05,0x0e,0x04] +v_fma_f16 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0xe0,0x01,0x02] -v_fma_f16 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0xff,0x05,0x0e,0x04] +v_fma_f16 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0xee,0x01,0x02] -v_fma_f16 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0xfe,0x0f,0x04] +v_fma_f16 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x02,0x02] -v_fma_f16 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0xfe,0x07] +v_fma_f16 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0xfe,0x03,0x02] -v_fma_f16 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x24] +v_fma_f16 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0x05,0x03] -v_fma_f16 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x44] +v_fma_f16 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0xc1,0x03] -v_fma_f16 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x84] +v_fma_f16 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0xdd,0x03] -v_fma_f16 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0xe4] +v_fma_f16 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0x0d,0x04] -v_fma_f16 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xee,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0xfd,0x07] -v_fma_f16 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xee,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0x01,0x22] -v_fma_f16 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xee,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0x01,0x42] -v_fma_f16 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xee,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0x01,0x82] -v_fma_f16 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xee,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x00,0x01,0xe2] -v_div_fixup_f16 v5, s1, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xee,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f16 v255, s1, v2, v3 -// CHECK: [0xff,0x00,0xef,0xd1,0x01,0x04,0x0e,0x04] +v_fma_f16 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xee,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f16 v5, s101, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x65,0x04,0x0e,0x04] +v_fma_f16 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xee,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f16 v5, flat_scratch_lo, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x66,0x04,0x0e,0x04] +v_fma_f16 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xee,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f16 v5, flat_scratch_hi, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x67,0x04,0x0e,0x04] +v_fma_f16 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xee,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f16 v5, vcc_lo, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x6a,0x04,0x0e,0x04] +v_div_fixup_f16 v5, s1, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f16 v5, vcc_hi, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x6b,0x04,0x0e,0x04] +v_div_fixup_f16 v255, s1, 0, 0 +// CHECK: [0xff,0x00,0xef,0xd1,0x01,0x00,0x01,0x02] -v_div_fixup_f16 v5, tba_lo, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x6c,0x04,0x0e,0x04] +v_div_fixup_f16 v5, s101, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x65,0x00,0x01,0x02] -v_div_fixup_f16 v5, tba_hi, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x6d,0x04,0x0e,0x04] +v_div_fixup_f16 v5, flat_scratch_lo, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x66,0x00,0x01,0x02] -v_div_fixup_f16 v5, tma_lo, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x6e,0x04,0x0e,0x04] +v_div_fixup_f16 v5, flat_scratch_hi, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x67,0x00,0x01,0x02] -v_div_fixup_f16 v5, tma_hi, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x6f,0x04,0x0e,0x04] +v_div_fixup_f16 v5, vcc_lo, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x6a,0x00,0x01,0x02] -v_div_fixup_f16 v5, ttmp11, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x7b,0x04,0x0e,0x04] +v_div_fixup_f16 v5, vcc_hi, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x6b,0x00,0x01,0x02] -v_div_fixup_f16 v5, m0, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x7c,0x04,0x0e,0x04] +v_div_fixup_f16 v5, tba_lo, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x6c,0x00,0x01,0x02] -v_div_fixup_f16 v5, exec_lo, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x7e,0x04,0x0e,0x04] +v_div_fixup_f16 v5, tba_hi, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x6d,0x00,0x01,0x02] -v_div_fixup_f16 v5, exec_hi, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x7f,0x04,0x0e,0x04] +v_div_fixup_f16 v5, tma_lo, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x6e,0x00,0x01,0x02] -v_div_fixup_f16 v5, scc, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0xfd,0x04,0x0e,0x04] +v_div_fixup_f16 v5, tma_hi, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x6f,0x00,0x01,0x02] -v_div_fixup_f16 v5, v1, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x05,0x0e,0x04] +v_div_fixup_f16 v5, ttmp11, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x7b,0x00,0x01,0x02] -v_div_fixup_f16 v5, v255, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0xff,0x05,0x0e,0x04] +v_div_fixup_f16 v5, m0, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x7c,0x00,0x01,0x02] -v_div_fixup_f16 v5, s1, v255, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0xfe,0x0f,0x04] +v_div_fixup_f16 v5, exec_lo, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x7e,0x00,0x01,0x02] -v_div_fixup_f16 v5, s1, v2, v255 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0xfe,0x07] +v_div_fixup_f16 v5, exec_hi, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x7f,0x00,0x01,0x02] -v_div_fixup_f16 v5, -s1, v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x24] +v_div_fixup_f16 v5, 0, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x80,0x00,0x01,0x02] -v_div_fixup_f16 v5, s1, -v2, v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x44] +v_div_fixup_f16 v5, -1, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0xc1,0x00,0x01,0x02] -v_div_fixup_f16 v5, s1, v2, -v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x84] +v_div_fixup_f16 v5, 0.5, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0xf0,0x00,0x01,0x02] -v_div_fixup_f16 v5, -s1, -v2, -v3 -// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0xe4] +v_div_fixup_f16 v5, -4.0, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0xf7,0x00,0x01,0x02] -v_div_fixup_f16 v5, |s1|, v2, v3 -// CHECK: [0x05,0x01,0xef,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f16 v5, v1, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x01,0x01,0x02] -v_div_fixup_f16 v5, s1, |v2|, v3 -// CHECK: [0x05,0x02,0xef,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f16 v5, v255, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0xff,0x01,0x01,0x02] -v_div_fixup_f16 v5, s1, v2, |v3| -// CHECK: [0x05,0x04,0xef,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f16 v5, s1, -1, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x82,0x01,0x02] -v_div_fixup_f16 v5, |s1|, |v2|, |v3| -// CHECK: [0x05,0x07,0xef,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f16 v5, s1, 0.5, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0xe0,0x01,0x02] -v_div_fixup_f16 v5, s1, v2, v3 clamp -// CHECK: [0x05,0x80,0xef,0xd1,0x01,0x04,0x0e,0x04] +v_div_fixup_f16 v5, s1, -4.0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0xee,0x01,0x02] + +v_div_fixup_f16 v5, s1, v2, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x02,0x02] + +v_div_fixup_f16 v5, s1, v255, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0xfe,0x03,0x02] + +v_div_fixup_f16 v5, s1, 0, -1 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0x05,0x03] + +v_div_fixup_f16 v5, s1, 0, 0.5 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0xc1,0x03] + +v_div_fixup_f16 v5, s1, 0, -4.0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0xdd,0x03] + +v_div_fixup_f16 v5, s1, 0, v3 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0x0d,0x04] + +v_div_fixup_f16 v5, s1, 0, v255 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0xfd,0x07] + +v_div_fixup_f16 v5, -s1, 0, 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0x01,0x22] + +v_div_fixup_f16 v5, s1, neg(0), 0 +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0x01,0x42] + +v_div_fixup_f16 v5, s1, 0, neg(0) +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0x01,0x82] + +v_div_fixup_f16 v5, -s1, neg(0), neg(0) +// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x00,0x01,0xe2] + +v_div_fixup_f16 v5, |s1|, 0, 0 +// CHECK: [0x05,0x01,0xef,0xd1,0x01,0x00,0x01,0x02] + +v_div_fixup_f16 v5, s1, |0|, 0 +// CHECK: [0x05,0x02,0xef,0xd1,0x01,0x00,0x01,0x02] + +v_div_fixup_f16 v5, s1, 0, |0| +// CHECK: [0x05,0x04,0xef,0xd1,0x01,0x00,0x01,0x02] + +v_div_fixup_f16 v5, |s1|, |0|, |0| +// CHECK: [0x05,0x07,0xef,0xd1,0x01,0x00,0x01,0x02] + +v_div_fixup_f16 v5, s1, 0, 0 clamp +// CHECK: [0x05,0x80,0xef,0xd1,0x01,0x00,0x01,0x02] + +v_cvt_pkaccum_u8_f32 v5, 0, s2 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32 v255, 0, s2 +// CHECK: [0xff,0x00,0xf0,0xd1,0x80,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32 v5, -1, s2 +// CHECK: [0x05,0x00,0xf0,0xd1,0xc1,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32 v5, 0.5, s2 +// CHECK: [0x05,0x00,0xf0,0xd1,0xf0,0x04,0x00,0x00] + +v_cvt_pkaccum_u8_f32 v5, -4.0, s2 +// CHECK: [0x05,0x00,0xf0,0xd1,0xf7,0x04,0x00,0x00] v_cvt_pkaccum_u8_f32 v5, v1, s2 // CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x05,0x00,0x00] -v_cvt_pkaccum_u8_f32 v255, v1, s2 -// CHECK: [0xff,0x00,0xf0,0xd1,0x01,0x05,0x00,0x00] - v_cvt_pkaccum_u8_f32 v5, v255, s2 // CHECK: [0x05,0x00,0xf0,0xd1,0xff,0x05,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, s101 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xcb,0x00,0x00] - -v_cvt_pkaccum_u8_f32 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xcd,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, s101 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xca,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xcf,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xcc,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xd5,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xce,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xd7,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xd4,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, tba_lo -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xd9,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xd6,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, tba_hi -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xdb,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, tba_lo +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xd8,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, tma_lo -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xdd,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, tba_hi +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xda,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, tma_hi -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xdf,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, tma_lo +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xdc,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xf7,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, tma_hi +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xde,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, m0 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xf9,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xf6,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, exec_lo -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xfd,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, m0 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xf8,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, exec_hi -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xff,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, exec_lo +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xfc,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, 0 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x01,0x01,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, exec_hi +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xfe,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, -1 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x83,0x01,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, 0 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0x00,0x01,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, 0.5 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xe1,0x01,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, -1 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0x82,0x01,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, -4.0 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xef,0x01,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, 0.5 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xe0,0x01,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, scc -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xfb,0x01,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, -4.0 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xee,0x01,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, v2 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x05,0x02,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, v2 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0x04,0x02,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, v255 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xff,0x03,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, v255 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0xfe,0x03,0x00] -v_cvt_pkaccum_u8_f32 v5, -v1, s2 -// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x05,0x00,0x20] +v_cvt_pkaccum_u8_f32 v5, neg(0), s2 +// CHECK: [0x05,0x00,0xf0,0xd1,0x80,0x04,0x00,0x20] -v_cvt_pkaccum_u8_f32 v5, |v1|, s2 -// CHECK: [0x05,0x01,0xf0,0xd1,0x01,0x05,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, |0|, s2 +// CHECK: [0x05,0x01,0xf0,0xd1,0x80,0x04,0x00,0x00] -v_cvt_pkaccum_u8_f32 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0xf0,0xd1,0x01,0x05,0x00,0x00] +v_cvt_pkaccum_u8_f32 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0xf0,0xd1,0x80,0x04,0x00,0x00] v_add_f64 v[5:6], s[4:5], s[4:5] // CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x00] @@ -43922,12 +45534,36 @@ v_add_f64 v[5:6], s[4:5], s[4:5] v_add_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0x80,0xd2,0x04,0x08,0x00,0x00] +v_add_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0x80,0xd2,0x80,0x08,0x00,0x00] + +v_add_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0x80,0xd2,0xc1,0x08,0x00,0x00] + +v_add_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0x80,0xd2,0xf0,0x08,0x00,0x00] + +v_add_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0x80,0xd2,0xf7,0x08,0x00,0x00] + v_add_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0x80,0xd2,0x01,0x09,0x00,0x00] v_add_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0x80,0xd2,0xfe,0x09,0x00,0x00] +v_add_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x00,0x01,0x00] + +v_add_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x82,0x01,0x00] + +v_add_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0x80,0xd2,0x04,0xe0,0x01,0x00] + +v_add_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0x80,0xd2,0x04,0xee,0x01,0x00] + v_add_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0x80,0xd2,0x04,0x04,0x02,0x00] @@ -43970,12 +45606,36 @@ v_mul_f64 v[5:6], s[4:5], s[4:5] v_mul_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0x81,0xd2,0x04,0x08,0x00,0x00] +v_mul_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0x81,0xd2,0x80,0x08,0x00,0x00] + +v_mul_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0x81,0xd2,0xc1,0x08,0x00,0x00] + +v_mul_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0x81,0xd2,0xf0,0x08,0x00,0x00] + +v_mul_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0x81,0xd2,0xf7,0x08,0x00,0x00] + v_mul_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0x81,0xd2,0x01,0x09,0x00,0x00] v_mul_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0x81,0xd2,0xfe,0x09,0x00,0x00] +v_mul_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x00,0x01,0x00] + +v_mul_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x82,0x01,0x00] + +v_mul_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0x81,0xd2,0x04,0xe0,0x01,0x00] + +v_mul_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0x81,0xd2,0x04,0xee,0x01,0x00] + v_mul_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0x81,0xd2,0x04,0x04,0x02,0x00] @@ -44018,12 +45678,36 @@ v_min_f64 v[5:6], s[4:5], s[4:5] v_min_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0x82,0xd2,0x04,0x08,0x00,0x00] +v_min_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0x82,0xd2,0x80,0x08,0x00,0x00] + +v_min_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0x82,0xd2,0xc1,0x08,0x00,0x00] + +v_min_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0x82,0xd2,0xf0,0x08,0x00,0x00] + +v_min_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0x82,0xd2,0xf7,0x08,0x00,0x00] + v_min_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0x82,0xd2,0x01,0x09,0x00,0x00] v_min_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0x82,0xd2,0xfe,0x09,0x00,0x00] +v_min_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x00,0x01,0x00] + +v_min_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x82,0x01,0x00] + +v_min_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0x82,0xd2,0x04,0xe0,0x01,0x00] + +v_min_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0x82,0xd2,0x04,0xee,0x01,0x00] + v_min_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0x82,0xd2,0x04,0x04,0x02,0x00] @@ -44066,12 +45750,36 @@ v_max_f64 v[5:6], s[4:5], s[4:5] v_max_f64 v[254:255], s[4:5], s[4:5] // CHECK: [0xfe,0x00,0x83,0xd2,0x04,0x08,0x00,0x00] +v_max_f64 v[5:6], 0, s[4:5] +// CHECK: [0x05,0x00,0x83,0xd2,0x80,0x08,0x00,0x00] + +v_max_f64 v[5:6], -1, s[4:5] +// CHECK: [0x05,0x00,0x83,0xd2,0xc1,0x08,0x00,0x00] + +v_max_f64 v[5:6], 0.5, s[4:5] +// CHECK: [0x05,0x00,0x83,0xd2,0xf0,0x08,0x00,0x00] + +v_max_f64 v[5:6], -4.0, s[4:5] +// CHECK: [0x05,0x00,0x83,0xd2,0xf7,0x08,0x00,0x00] + v_max_f64 v[5:6], v[1:2], s[4:5] // CHECK: [0x05,0x00,0x83,0xd2,0x01,0x09,0x00,0x00] v_max_f64 v[5:6], v[254:255], s[4:5] // CHECK: [0x05,0x00,0x83,0xd2,0xfe,0x09,0x00,0x00] +v_max_f64 v[5:6], s[4:5], 0 +// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x00,0x01,0x00] + +v_max_f64 v[5:6], s[4:5], -1 +// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x82,0x01,0x00] + +v_max_f64 v[5:6], s[4:5], 0.5 +// CHECK: [0x05,0x00,0x83,0xd2,0x04,0xe0,0x01,0x00] + +v_max_f64 v[5:6], s[4:5], -4.0 +// CHECK: [0x05,0x00,0x83,0xd2,0x04,0xee,0x01,0x00] + v_max_f64 v[5:6], s[4:5], v[2:3] // CHECK: [0x05,0x00,0x83,0xd2,0x04,0x04,0x02,0x00] @@ -44114,9 +45822,15 @@ v_ldexp_f64 v[5:6], 0, s2 v_ldexp_f64 v[254:255], 0, s2 // CHECK: [0xfe,0x00,0x84,0xd2,0x80,0x04,0x00,0x00] +v_ldexp_f64 v[5:6], -1, s2 +// CHECK: [0x05,0x00,0x84,0xd2,0xc1,0x04,0x00,0x00] + v_ldexp_f64 v[5:6], 0.5, s2 // CHECK: [0x05,0x00,0x84,0xd2,0xf0,0x04,0x00,0x00] +v_ldexp_f64 v[5:6], -4.0, s2 +// CHECK: [0x05,0x00,0x84,0xd2,0xf7,0x04,0x00,0x00] + v_ldexp_f64 v[5:6], v[1:2], s2 // CHECK: [0x05,0x00,0x84,0xd2,0x01,0x05,0x00,0x00] @@ -44174,15 +45888,18 @@ v_ldexp_f64 v[5:6], 0, 0.5 v_ldexp_f64 v[5:6], 0, -4.0 // CHECK: [0x05,0x00,0x84,0xd2,0x80,0xee,0x01,0x00] -v_ldexp_f64 v[5:6], 0, scc -// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xfa,0x01,0x00] - v_ldexp_f64 v[5:6], 0, v2 // CHECK: [0x05,0x00,0x84,0xd2,0x80,0x04,0x02,0x00] v_ldexp_f64 v[5:6], 0, v255 // CHECK: [0x05,0x00,0x84,0xd2,0x80,0xfe,0x03,0x00] +v_ldexp_f64 v[5:6], neg(0), s2 +// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x04,0x00,0x20] + +v_ldexp_f64 v[5:6], |0|, s2 +// CHECK: [0x05,0x01,0x84,0xd2,0x80,0x04,0x00,0x00] + v_ldexp_f64 v[5:6], 0, s2 clamp // CHECK: [0x05,0x80,0x84,0xd2,0x80,0x04,0x00,0x00] @@ -44429,92 +46146,101 @@ v_mul_hi_i32 v5, 0, v2 v_mul_hi_i32 v5, 0, v255 // CHECK: [0x05,0x00,0x87,0xd2,0x80,0xfe,0x03,0x00] +v_ldexp_f32 v5, 0, s2 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x04,0x00,0x00] + +v_ldexp_f32 v255, 0, s2 +// CHECK: [0xff,0x00,0x88,0xd2,0x80,0x04,0x00,0x00] + +v_ldexp_f32 v5, -1, s2 +// CHECK: [0x05,0x00,0x88,0xd2,0xc1,0x04,0x00,0x00] + +v_ldexp_f32 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x88,0xd2,0xf0,0x04,0x00,0x00] + +v_ldexp_f32 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x88,0xd2,0xf7,0x04,0x00,0x00] + v_ldexp_f32 v5, v1, s2 // CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x00] -v_ldexp_f32 v255, v1, s2 -// CHECK: [0xff,0x00,0x88,0xd2,0x01,0x05,0x00,0x00] - v_ldexp_f32 v5, v255, s2 // CHECK: [0x05,0x00,0x88,0xd2,0xff,0x05,0x00,0x00] -v_ldexp_f32 v5, v1, s101 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xcb,0x00,0x00] - -v_ldexp_f32 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xcd,0x00,0x00] +v_ldexp_f32 v5, 0, s101 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xca,0x00,0x00] -v_ldexp_f32 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xcf,0x00,0x00] +v_ldexp_f32 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xcc,0x00,0x00] -v_ldexp_f32 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xd5,0x00,0x00] +v_ldexp_f32 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xce,0x00,0x00] -v_ldexp_f32 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xd7,0x00,0x00] +v_ldexp_f32 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xd4,0x00,0x00] -v_ldexp_f32 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xd9,0x00,0x00] +v_ldexp_f32 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xd6,0x00,0x00] -v_ldexp_f32 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xdb,0x00,0x00] +v_ldexp_f32 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xd8,0x00,0x00] -v_ldexp_f32 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xdd,0x00,0x00] +v_ldexp_f32 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xda,0x00,0x00] -v_ldexp_f32 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xdf,0x00,0x00] +v_ldexp_f32 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xdc,0x00,0x00] -v_ldexp_f32 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xf7,0x00,0x00] +v_ldexp_f32 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xde,0x00,0x00] -v_ldexp_f32 v5, v1, m0 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xf9,0x00,0x00] +v_ldexp_f32 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xf6,0x00,0x00] -v_ldexp_f32 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xfd,0x00,0x00] +v_ldexp_f32 v5, 0, m0 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xf8,0x00,0x00] -v_ldexp_f32 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xff,0x00,0x00] +v_ldexp_f32 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xfc,0x00,0x00] -v_ldexp_f32 v5, v1, 0 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x01,0x01,0x00] +v_ldexp_f32 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xfe,0x00,0x00] -v_ldexp_f32 v5, v1, -1 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x83,0x01,0x00] +v_ldexp_f32 v5, 0, 0 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x00,0x01,0x00] -v_ldexp_f32 v5, v1, 0.5 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xe1,0x01,0x00] +v_ldexp_f32 v5, 0, -1 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x82,0x01,0x00] -v_ldexp_f32 v5, v1, -4.0 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xef,0x01,0x00] +v_ldexp_f32 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xe0,0x01,0x00] -v_ldexp_f32 v5, v1, scc -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xfb,0x01,0x00] +v_ldexp_f32 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xee,0x01,0x00] -v_ldexp_f32 v5, v1, v2 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x02,0x00] +v_ldexp_f32 v5, 0, v2 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x04,0x02,0x00] -v_ldexp_f32 v5, v1, v255 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xff,0x03,0x00] +v_ldexp_f32 v5, 0, v255 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0xfe,0x03,0x00] -v_ldexp_f32 v5, -v1, s2 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x20] +v_ldexp_f32 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x04,0x00,0x20] -v_ldexp_f32 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x88,0xd2,0x01,0x05,0x00,0x00] +v_ldexp_f32 v5, |0|, s2 +// CHECK: [0x05,0x01,0x88,0xd2,0x80,0x04,0x00,0x00] -v_ldexp_f32 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x88,0xd2,0x01,0x05,0x00,0x00] +v_ldexp_f32 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x88,0xd2,0x80,0x04,0x00,0x00] -v_ldexp_f32 v5, v1, s2 mul:2 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x08] +v_ldexp_f32 v5, 0, s2 mul:2 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x04,0x00,0x08] -v_ldexp_f32 v5, v1, s2 mul:4 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x10] +v_ldexp_f32 v5, 0, s2 mul:4 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x04,0x00,0x10] -v_ldexp_f32 v5, v1, s2 div:2 -// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x18] +v_ldexp_f32 v5, 0, s2 div:2 +// CHECK: [0x05,0x00,0x88,0xd2,0x80,0x04,0x00,0x18] v_readlane_b32 s5, v1, s2 // CHECK: [0x05,0x00,0x89,0xd2,0x01,0x05,0x00,0x00] @@ -45056,9 +46782,15 @@ v_trig_preop_f64 v[5:6], 0, s2 v_trig_preop_f64 v[254:255], 0, s2 // CHECK: [0xfe,0x00,0x92,0xd2,0x80,0x04,0x00,0x00] +v_trig_preop_f64 v[5:6], -1, s2 +// CHECK: [0x05,0x00,0x92,0xd2,0xc1,0x04,0x00,0x00] + v_trig_preop_f64 v[5:6], 0.5, s2 // CHECK: [0x05,0x00,0x92,0xd2,0xf0,0x04,0x00,0x00] +v_trig_preop_f64 v[5:6], -4.0, s2 +// CHECK: [0x05,0x00,0x92,0xd2,0xf7,0x04,0x00,0x00] + v_trig_preop_f64 v[5:6], v[1:2], s2 // CHECK: [0x05,0x00,0x92,0xd2,0x01,0x05,0x00,0x00] @@ -45116,15 +46848,18 @@ v_trig_preop_f64 v[5:6], 0, 0.5 v_trig_preop_f64 v[5:6], 0, -4.0 // CHECK: [0x05,0x00,0x92,0xd2,0x80,0xee,0x01,0x00] -v_trig_preop_f64 v[5:6], 0, scc -// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xfa,0x01,0x00] - v_trig_preop_f64 v[5:6], 0, v2 // CHECK: [0x05,0x00,0x92,0xd2,0x80,0x04,0x02,0x00] v_trig_preop_f64 v[5:6], 0, v255 // CHECK: [0x05,0x00,0x92,0xd2,0x80,0xfe,0x03,0x00] +v_trig_preop_f64 v[5:6], neg(0), s2 +// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x04,0x00,0x20] + +v_trig_preop_f64 v[5:6], |0|, s2 +// CHECK: [0x05,0x01,0x92,0xd2,0x80,0x04,0x00,0x00] + v_trig_preop_f64 v[5:6], 0, s2 clamp // CHECK: [0x05,0x80,0x92,0xd2,0x80,0x04,0x00,0x00] @@ -45215,239 +46950,302 @@ v_bfm_b32 v5, 0, v2 v_bfm_b32 v5, 0, v255 // CHECK: [0x05,0x00,0x93,0xd2,0x80,0xfe,0x03,0x00] +v_cvt_pknorm_i16_f32 v5, 0, s2 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32 v255, 0, s2 +// CHECK: [0xff,0x00,0x94,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32 v5, -1, s2 +// CHECK: [0x05,0x00,0x94,0xd2,0xc1,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x94,0xd2,0xf0,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x94,0xd2,0xf7,0x04,0x00,0x00] + v_cvt_pknorm_i16_f32 v5, v1, s2 // CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x00] -v_cvt_pknorm_i16_f32 v255, v1, s2 -// CHECK: [0xff,0x00,0x94,0xd2,0x01,0x05,0x00,0x00] - v_cvt_pknorm_i16_f32 v5, v255, s2 // CHECK: [0x05,0x00,0x94,0xd2,0xff,0x05,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, s101 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xcb,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, s101 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xca,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xcd,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xcc,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xcf,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xce,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xd5,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xd4,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xd7,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xd6,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xd9,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xd8,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xdb,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xda,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xdd,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xdc,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xdf,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xde,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xf7,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xf6,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, m0 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xf9,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, m0 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xf8,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xfd,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xfc,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xff,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xfe,0x00,0x00] -v_cvt_pknorm_i16_f32 v5, v1, scc -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xfb,0x01,0x00] +v_cvt_pknorm_i16_f32 v5, 0, 0 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x00,0x01,0x00] -v_cvt_pknorm_i16_f32 v5, v1, v2 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x02,0x00] +v_cvt_pknorm_i16_f32 v5, 0, -1 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x82,0x01,0x00] -v_cvt_pknorm_i16_f32 v5, v1, v255 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xff,0x03,0x00] +v_cvt_pknorm_i16_f32 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xe0,0x01,0x00] -v_cvt_pknorm_i16_f32 v5, -v1, s2 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x20] +v_cvt_pknorm_i16_f32 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xee,0x01,0x00] -v_cvt_pknorm_i16_f32 v5, v1, -s2 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x40] +v_cvt_pknorm_i16_f32 v5, 0, v2 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x04,0x02,0x00] -v_cvt_pknorm_i16_f32 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x60] +v_cvt_pknorm_i16_f32 v5, 0, v255 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0xfe,0x03,0x00] -v_cvt_pknorm_i16_f32 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x94,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x04,0x00,0x20] -v_cvt_pknorm_i16_f32 v5, v1, |s2| -// CHECK: [0x05,0x02,0x94,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, 0, -s2 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x04,0x00,0x40] -v_cvt_pknorm_i16_f32 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x94,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x04,0x00,0x60] -v_cvt_pknorm_i16_f32 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x94,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_i16_f32 v5, |0|, s2 +// CHECK: [0x05,0x01,0x94,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32 v5, 0, |s2| +// CHECK: [0x05,0x02,0x94,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x94,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_i16_f32 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x94,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v5, 0, s2 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v255, 0, s2 +// CHECK: [0xff,0x00,0x95,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v5, -1, s2 +// CHECK: [0x05,0x00,0x95,0xd2,0xc1,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x95,0xd2,0xf0,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x95,0xd2,0xf7,0x04,0x00,0x00] v_cvt_pknorm_u16_f32 v5, v1, s2 // CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x00] -v_cvt_pknorm_u16_f32 v255, v1, s2 -// CHECK: [0xff,0x00,0x95,0xd2,0x01,0x05,0x00,0x00] - v_cvt_pknorm_u16_f32 v5, v255, s2 // CHECK: [0x05,0x00,0x95,0xd2,0xff,0x05,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, s101 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xcb,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, s101 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xca,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xcd,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xcc,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xcf,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xce,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xd5,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xd4,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xd7,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xd6,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xd9,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xd8,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xdb,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xda,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xdd,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xdc,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xdf,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xde,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xf7,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xf6,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, m0 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xf9,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, m0 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xf8,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xfd,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xfc,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xff,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xfe,0x00,0x00] -v_cvt_pknorm_u16_f32 v5, v1, scc -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xfb,0x01,0x00] +v_cvt_pknorm_u16_f32 v5, 0, 0 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0x00,0x01,0x00] -v_cvt_pknorm_u16_f32 v5, v1, v2 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x02,0x00] +v_cvt_pknorm_u16_f32 v5, 0, -1 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0x82,0x01,0x00] -v_cvt_pknorm_u16_f32 v5, v1, v255 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xff,0x03,0x00] +v_cvt_pknorm_u16_f32 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xe0,0x01,0x00] -v_cvt_pknorm_u16_f32 v5, -v1, s2 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x20] +v_cvt_pknorm_u16_f32 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xee,0x01,0x00] -v_cvt_pknorm_u16_f32 v5, v1, -s2 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x40] +v_cvt_pknorm_u16_f32 v5, 0, v2 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0x04,0x02,0x00] -v_cvt_pknorm_u16_f32 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x60] +v_cvt_pknorm_u16_f32 v5, 0, v255 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0xfe,0x03,0x00] -v_cvt_pknorm_u16_f32 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x95,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0x04,0x00,0x20] -v_cvt_pknorm_u16_f32 v5, v1, |s2| -// CHECK: [0x05,0x02,0x95,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, 0, -s2 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0x04,0x00,0x40] -v_cvt_pknorm_u16_f32 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x95,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x95,0xd2,0x80,0x04,0x00,0x60] -v_cvt_pknorm_u16_f32 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x95,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pknorm_u16_f32 v5, |0|, s2 +// CHECK: [0x05,0x01,0x95,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v5, 0, |s2| +// CHECK: [0x05,0x02,0x95,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x95,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pknorm_u16_f32 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x95,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v5, 0, s2 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v255, 0, s2 +// CHECK: [0xff,0x00,0x96,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v5, -1, s2 +// CHECK: [0x05,0x00,0x96,0xd2,0xc1,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v5, 0.5, s2 +// CHECK: [0x05,0x00,0x96,0xd2,0xf0,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v5, -4.0, s2 +// CHECK: [0x05,0x00,0x96,0xd2,0xf7,0x04,0x00,0x00] v_cvt_pkrtz_f16_f32 v5, v1, s2 // CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x00] -v_cvt_pkrtz_f16_f32 v255, v1, s2 -// CHECK: [0xff,0x00,0x96,0xd2,0x01,0x05,0x00,0x00] - v_cvt_pkrtz_f16_f32 v5, v255, s2 // CHECK: [0x05,0x00,0x96,0xd2,0xff,0x05,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, s101 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xcb,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, s101 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xca,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, flat_scratch_lo -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xcd,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, flat_scratch_lo +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xcc,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, flat_scratch_hi -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xcf,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, flat_scratch_hi +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xce,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, vcc_lo -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xd5,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, vcc_lo +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xd4,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, vcc_hi -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xd7,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, vcc_hi +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xd6,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, tba_lo -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xd9,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, tba_lo +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xd8,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, tba_hi -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xdb,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, tba_hi +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xda,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, tma_lo -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xdd,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, tma_lo +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xdc,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, tma_hi -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xdf,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, tma_hi +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xde,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, ttmp11 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xf7,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, ttmp11 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xf6,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, m0 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xf9,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, m0 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xf8,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, exec_lo -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xfd,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, exec_lo +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xfc,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, exec_hi -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xff,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, exec_hi +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xfe,0x00,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, scc -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xfb,0x01,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, 0 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x00,0x01,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, v2 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x02,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, -1 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x82,0x01,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, v255 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xff,0x03,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, 0.5 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xe0,0x01,0x00] -v_cvt_pkrtz_f16_f32 v5, -v1, s2 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x20] +v_cvt_pkrtz_f16_f32 v5, 0, -4.0 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xee,0x01,0x00] -v_cvt_pkrtz_f16_f32 v5, v1, -s2 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x40] +v_cvt_pkrtz_f16_f32 v5, 0, v2 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x04,0x02,0x00] -v_cvt_pkrtz_f16_f32 v5, -v1, -s2 -// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x60] +v_cvt_pkrtz_f16_f32 v5, 0, v255 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0xfe,0x03,0x00] -v_cvt_pkrtz_f16_f32 v5, |v1|, s2 -// CHECK: [0x05,0x01,0x96,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, neg(0), s2 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x04,0x00,0x20] -v_cvt_pkrtz_f16_f32 v5, v1, |s2| -// CHECK: [0x05,0x02,0x96,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, 0, -s2 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x04,0x00,0x40] -v_cvt_pkrtz_f16_f32 v5, |v1|, |s2| -// CHECK: [0x05,0x03,0x96,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, neg(0), -s2 +// CHECK: [0x05,0x00,0x96,0xd2,0x80,0x04,0x00,0x60] -v_cvt_pkrtz_f16_f32 v5, v1, s2 clamp -// CHECK: [0x05,0x80,0x96,0xd2,0x01,0x05,0x00,0x00] +v_cvt_pkrtz_f16_f32 v5, |0|, s2 +// CHECK: [0x05,0x01,0x96,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v5, 0, |s2| +// CHECK: [0x05,0x02,0x96,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v5, |0|, |s2| +// CHECK: [0x05,0x03,0x96,0xd2,0x80,0x04,0x00,0x00] + +v_cvt_pkrtz_f16_f32 v5, 0, s2 clamp +// CHECK: [0x05,0x80,0x96,0xd2,0x80,0x04,0x00,0x00] v_cvt_pk_u16_u32 v5, 0, s2 // CHECK: [0x05,0x00,0x97,0xd2,0x80,0x04,0x00,0x00] @@ -46082,9 +47880,15 @@ v_cmp_f_f16_e64 tma, 0, s2 v_cmp_f_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00] +v_cmp_f_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_f_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x20,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_f_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_f_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x20,0xd0,0x01,0x05,0x00,0x00] @@ -46133,11 +47937,14 @@ v_cmp_f_f16_e64 s[10:11], 0, exec_hi v_cmp_f_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x00,0x01,0x00] +v_cmp_f_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x82,0x01,0x00] + v_cmp_f_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_f_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_f_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xee,0x01,0x00] v_cmp_f_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x02,0x00] @@ -46145,9 +47952,15 @@ v_cmp_f_f16_e64 s[10:11], 0, v2 v_cmp_f_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_f_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x20] + v_cmp_f_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x40] +v_cmp_f_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x60] + v_cmp_f_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x20,0xd0,0x80,0x04,0x00,0x00] @@ -46244,9 +48057,15 @@ v_cmp_lt_f16_e64 tma, 0, s2 v_cmp_lt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x21,0xd0,0x80,0x04,0x00,0x00] +v_cmp_lt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x21,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_lt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x21,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_lt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x21,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_lt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x21,0xd0,0x01,0x05,0x00,0x00] @@ -46295,11 +48114,14 @@ v_cmp_lt_f16_e64 s[10:11], 0, exec_hi v_cmp_lt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x00,0x01,0x00] +v_cmp_lt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x82,0x01,0x00] + v_cmp_lt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_lt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_lt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xee,0x01,0x00] v_cmp_lt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x04,0x02,0x00] @@ -46307,9 +48129,15 @@ v_cmp_lt_f16_e64 s[10:11], 0, v2 v_cmp_lt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_lt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x04,0x00,0x20] + v_cmp_lt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x04,0x00,0x40] +v_cmp_lt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x04,0x00,0x60] + v_cmp_lt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x21,0xd0,0x80,0x04,0x00,0x00] @@ -46406,9 +48234,15 @@ v_cmp_eq_f16_e64 tma, 0, s2 v_cmp_eq_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00] +v_cmp_eq_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_eq_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x22,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_eq_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_eq_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x22,0xd0,0x01,0x05,0x00,0x00] @@ -46457,11 +48291,14 @@ v_cmp_eq_f16_e64 s[10:11], 0, exec_hi v_cmp_eq_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x00,0x01,0x00] +v_cmp_eq_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x82,0x01,0x00] + v_cmp_eq_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_eq_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_eq_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xee,0x01,0x00] v_cmp_eq_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x02,0x00] @@ -46469,9 +48306,15 @@ v_cmp_eq_f16_e64 s[10:11], 0, v2 v_cmp_eq_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_eq_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x20] + v_cmp_eq_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x40] +v_cmp_eq_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x60] + v_cmp_eq_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x22,0xd0,0x80,0x04,0x00,0x00] @@ -46568,9 +48411,15 @@ v_cmp_le_f16_e64 tma, 0, s2 v_cmp_le_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x23,0xd0,0x80,0x04,0x00,0x00] +v_cmp_le_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x23,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_le_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x23,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_le_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x23,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_le_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x23,0xd0,0x01,0x05,0x00,0x00] @@ -46619,11 +48468,14 @@ v_cmp_le_f16_e64 s[10:11], 0, exec_hi v_cmp_le_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x00,0x01,0x00] +v_cmp_le_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x82,0x01,0x00] + v_cmp_le_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_le_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_le_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xee,0x01,0x00] v_cmp_le_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x04,0x02,0x00] @@ -46631,9 +48483,15 @@ v_cmp_le_f16_e64 s[10:11], 0, v2 v_cmp_le_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_le_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x04,0x00,0x20] + v_cmp_le_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x04,0x00,0x40] +v_cmp_le_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x04,0x00,0x60] + v_cmp_le_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x23,0xd0,0x80,0x04,0x00,0x00] @@ -46730,9 +48588,15 @@ v_cmp_gt_f16_e64 tma, 0, s2 v_cmp_gt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00] +v_cmp_gt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_gt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x24,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_gt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_gt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x24,0xd0,0x01,0x05,0x00,0x00] @@ -46781,11 +48645,14 @@ v_cmp_gt_f16_e64 s[10:11], 0, exec_hi v_cmp_gt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x00,0x01,0x00] +v_cmp_gt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x82,0x01,0x00] + v_cmp_gt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_gt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_gt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xee,0x01,0x00] v_cmp_gt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x02,0x00] @@ -46793,9 +48660,15 @@ v_cmp_gt_f16_e64 s[10:11], 0, v2 v_cmp_gt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_gt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x20] + v_cmp_gt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x40] +v_cmp_gt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x60] + v_cmp_gt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x24,0xd0,0x80,0x04,0x00,0x00] @@ -46892,9 +48765,15 @@ v_cmp_lg_f16_e64 tma, 0, s2 v_cmp_lg_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x25,0xd0,0x80,0x04,0x00,0x00] +v_cmp_lg_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x25,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_lg_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x25,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_lg_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x25,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_lg_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x25,0xd0,0x01,0x05,0x00,0x00] @@ -46943,11 +48822,14 @@ v_cmp_lg_f16_e64 s[10:11], 0, exec_hi v_cmp_lg_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x00,0x01,0x00] +v_cmp_lg_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x82,0x01,0x00] + v_cmp_lg_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_lg_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_lg_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xee,0x01,0x00] v_cmp_lg_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x04,0x02,0x00] @@ -46955,9 +48837,15 @@ v_cmp_lg_f16_e64 s[10:11], 0, v2 v_cmp_lg_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_lg_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x04,0x00,0x20] + v_cmp_lg_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x04,0x00,0x40] +v_cmp_lg_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x04,0x00,0x60] + v_cmp_lg_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x25,0xd0,0x80,0x04,0x00,0x00] @@ -47054,9 +48942,15 @@ v_cmp_ge_f16_e64 tma, 0, s2 v_cmp_ge_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00] +v_cmp_ge_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_ge_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x26,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_ge_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_ge_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x26,0xd0,0x01,0x05,0x00,0x00] @@ -47105,11 +48999,14 @@ v_cmp_ge_f16_e64 s[10:11], 0, exec_hi v_cmp_ge_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x00,0x01,0x00] +v_cmp_ge_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x82,0x01,0x00] + v_cmp_ge_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_ge_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_ge_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xee,0x01,0x00] v_cmp_ge_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x02,0x00] @@ -47117,9 +49014,15 @@ v_cmp_ge_f16_e64 s[10:11], 0, v2 v_cmp_ge_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_ge_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x20] + v_cmp_ge_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x40] +v_cmp_ge_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x60] + v_cmp_ge_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x26,0xd0,0x80,0x04,0x00,0x00] @@ -47216,9 +49119,15 @@ v_cmp_o_f16_e64 tma, 0, s2 v_cmp_o_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x27,0xd0,0x80,0x04,0x00,0x00] +v_cmp_o_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x27,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_o_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x27,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_o_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x27,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_o_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x27,0xd0,0x01,0x05,0x00,0x00] @@ -47267,11 +49176,14 @@ v_cmp_o_f16_e64 s[10:11], 0, exec_hi v_cmp_o_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x00,0x01,0x00] +v_cmp_o_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x82,0x01,0x00] + v_cmp_o_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_o_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_o_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xee,0x01,0x00] v_cmp_o_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x04,0x02,0x00] @@ -47279,9 +49191,15 @@ v_cmp_o_f16_e64 s[10:11], 0, v2 v_cmp_o_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_o_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x04,0x00,0x20] + v_cmp_o_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x04,0x00,0x40] +v_cmp_o_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x04,0x00,0x60] + v_cmp_o_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x27,0xd0,0x80,0x04,0x00,0x00] @@ -47378,9 +49296,15 @@ v_cmp_u_f16_e64 tma, 0, s2 v_cmp_u_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00] +v_cmp_u_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_u_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x28,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_u_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_u_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x28,0xd0,0x01,0x05,0x00,0x00] @@ -47429,11 +49353,14 @@ v_cmp_u_f16_e64 s[10:11], 0, exec_hi v_cmp_u_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x00,0x01,0x00] +v_cmp_u_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x82,0x01,0x00] + v_cmp_u_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_u_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_u_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xee,0x01,0x00] v_cmp_u_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x02,0x00] @@ -47441,9 +49368,15 @@ v_cmp_u_f16_e64 s[10:11], 0, v2 v_cmp_u_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_u_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x20] + v_cmp_u_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x40] +v_cmp_u_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x60] + v_cmp_u_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x28,0xd0,0x80,0x04,0x00,0x00] @@ -47540,9 +49473,15 @@ v_cmp_nge_f16_e64 tma, 0, s2 v_cmp_nge_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x29,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nge_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x29,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nge_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x29,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nge_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x29,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nge_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x29,0xd0,0x01,0x05,0x00,0x00] @@ -47591,11 +49530,14 @@ v_cmp_nge_f16_e64 s[10:11], 0, exec_hi v_cmp_nge_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nge_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nge_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nge_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nge_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xee,0x01,0x00] v_cmp_nge_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x04,0x02,0x00] @@ -47603,9 +49545,15 @@ v_cmp_nge_f16_e64 s[10:11], 0, v2 v_cmp_nge_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nge_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nge_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nge_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nge_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x29,0xd0,0x80,0x04,0x00,0x00] @@ -47702,9 +49650,15 @@ v_cmp_nlg_f16_e64 tma, 0, s2 v_cmp_nlg_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nlg_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nlg_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2a,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nlg_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nlg_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2a,0xd0,0x01,0x05,0x00,0x00] @@ -47753,11 +49707,14 @@ v_cmp_nlg_f16_e64 s[10:11], 0, exec_hi v_cmp_nlg_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nlg_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nlg_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nlg_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nlg_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xee,0x01,0x00] v_cmp_nlg_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x02,0x00] @@ -47765,9 +49722,15 @@ v_cmp_nlg_f16_e64 s[10:11], 0, v2 v_cmp_nlg_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nlg_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nlg_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nlg_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nlg_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x2a,0xd0,0x80,0x04,0x00,0x00] @@ -47864,9 +49827,15 @@ v_cmp_ngt_f16_e64 tma, 0, s2 v_cmp_ngt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00] +v_cmp_ngt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2b,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_ngt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2b,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_ngt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2b,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_ngt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2b,0xd0,0x01,0x05,0x00,0x00] @@ -47915,11 +49884,14 @@ v_cmp_ngt_f16_e64 s[10:11], 0, exec_hi v_cmp_ngt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x00,0x01,0x00] +v_cmp_ngt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x82,0x01,0x00] + v_cmp_ngt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_ngt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_ngt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xee,0x01,0x00] v_cmp_ngt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x04,0x02,0x00] @@ -47927,9 +49899,15 @@ v_cmp_ngt_f16_e64 s[10:11], 0, v2 v_cmp_ngt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_ngt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x20] + v_cmp_ngt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x40] +v_cmp_ngt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x60] + v_cmp_ngt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x2b,0xd0,0x80,0x04,0x00,0x00] @@ -48026,9 +50004,15 @@ v_cmp_nle_f16_e64 tma, 0, s2 v_cmp_nle_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nle_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nle_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2c,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nle_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nle_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2c,0xd0,0x01,0x05,0x00,0x00] @@ -48077,11 +50061,14 @@ v_cmp_nle_f16_e64 s[10:11], 0, exec_hi v_cmp_nle_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nle_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nle_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nle_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nle_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xee,0x01,0x00] v_cmp_nle_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x02,0x00] @@ -48089,9 +50076,15 @@ v_cmp_nle_f16_e64 s[10:11], 0, v2 v_cmp_nle_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nle_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nle_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nle_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nle_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x2c,0xd0,0x80,0x04,0x00,0x00] @@ -48188,9 +50181,15 @@ v_cmp_neq_f16_e64 tma, 0, s2 v_cmp_neq_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00] +v_cmp_neq_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2d,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_neq_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2d,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_neq_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2d,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_neq_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2d,0xd0,0x01,0x05,0x00,0x00] @@ -48239,11 +50238,14 @@ v_cmp_neq_f16_e64 s[10:11], 0, exec_hi v_cmp_neq_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x00,0x01,0x00] +v_cmp_neq_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x82,0x01,0x00] + v_cmp_neq_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_neq_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_neq_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xee,0x01,0x00] v_cmp_neq_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x04,0x02,0x00] @@ -48251,9 +50253,15 @@ v_cmp_neq_f16_e64 s[10:11], 0, v2 v_cmp_neq_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_neq_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x20] + v_cmp_neq_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x40] +v_cmp_neq_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x60] + v_cmp_neq_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x2d,0xd0,0x80,0x04,0x00,0x00] @@ -48350,9 +50358,15 @@ v_cmp_nlt_f16_e64 tma, 0, s2 v_cmp_nlt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nlt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nlt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2e,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nlt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nlt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2e,0xd0,0x01,0x05,0x00,0x00] @@ -48401,11 +50415,14 @@ v_cmp_nlt_f16_e64 s[10:11], 0, exec_hi v_cmp_nlt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nlt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nlt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nlt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nlt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xee,0x01,0x00] v_cmp_nlt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x02,0x00] @@ -48413,9 +50430,15 @@ v_cmp_nlt_f16_e64 s[10:11], 0, v2 v_cmp_nlt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nlt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nlt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nlt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nlt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x2e,0xd0,0x80,0x04,0x00,0x00] @@ -48512,9 +50535,15 @@ v_cmp_tru_f16_e64 tma, 0, s2 v_cmp_tru_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00] +v_cmp_tru_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x2f,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_tru_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x2f,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_tru_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x2f,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_tru_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x2f,0xd0,0x01,0x05,0x00,0x00] @@ -48563,11 +50592,14 @@ v_cmp_tru_f16_e64 s[10:11], 0, exec_hi v_cmp_tru_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x00,0x01,0x00] +v_cmp_tru_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x82,0x01,0x00] + v_cmp_tru_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_tru_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_tru_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xee,0x01,0x00] v_cmp_tru_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x04,0x02,0x00] @@ -48575,9 +50607,15 @@ v_cmp_tru_f16_e64 s[10:11], 0, v2 v_cmp_tru_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_tru_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x20] + v_cmp_tru_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x40] +v_cmp_tru_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x60] + v_cmp_tru_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x2f,0xd0,0x80,0x04,0x00,0x00] @@ -48674,9 +50712,15 @@ v_cmpx_f_f16_e64 tma, 0, s2 v_cmpx_f_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_f_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_f_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x30,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_f_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_f_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x30,0xd0,0x01,0x05,0x00,0x00] @@ -48725,11 +50769,14 @@ v_cmpx_f_f16_e64 s[10:11], 0, exec_hi v_cmpx_f_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_f_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_f_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_f_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_f_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xee,0x01,0x00] v_cmpx_f_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x02,0x00] @@ -48737,9 +50784,15 @@ v_cmpx_f_f16_e64 s[10:11], 0, v2 v_cmpx_f_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_f_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_f_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_f_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_f_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x30,0xd0,0x80,0x04,0x00,0x00] @@ -48836,9 +50889,15 @@ v_cmpx_lt_f16_e64 tma, 0, s2 v_cmpx_lt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x31,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_lt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x31,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_lt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x31,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_lt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x31,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_lt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x31,0xd0,0x01,0x05,0x00,0x00] @@ -48887,11 +50946,14 @@ v_cmpx_lt_f16_e64 s[10:11], 0, exec_hi v_cmpx_lt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_lt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_lt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_lt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_lt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xee,0x01,0x00] v_cmpx_lt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x04,0x02,0x00] @@ -48899,9 +50961,15 @@ v_cmpx_lt_f16_e64 s[10:11], 0, v2 v_cmpx_lt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_lt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_lt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_lt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_lt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x31,0xd0,0x80,0x04,0x00,0x00] @@ -48998,9 +51066,15 @@ v_cmpx_eq_f16_e64 tma, 0, s2 v_cmpx_eq_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_eq_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_eq_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x32,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_eq_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_eq_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x32,0xd0,0x01,0x05,0x00,0x00] @@ -49049,11 +51123,14 @@ v_cmpx_eq_f16_e64 s[10:11], 0, exec_hi v_cmpx_eq_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_eq_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_eq_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_eq_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_eq_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xee,0x01,0x00] v_cmpx_eq_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x02,0x00] @@ -49061,9 +51138,15 @@ v_cmpx_eq_f16_e64 s[10:11], 0, v2 v_cmpx_eq_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_eq_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_eq_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_eq_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_eq_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x32,0xd0,0x80,0x04,0x00,0x00] @@ -49160,9 +51243,15 @@ v_cmpx_le_f16_e64 tma, 0, s2 v_cmpx_le_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x33,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_le_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x33,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_le_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x33,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_le_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x33,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_le_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x33,0xd0,0x01,0x05,0x00,0x00] @@ -49211,11 +51300,14 @@ v_cmpx_le_f16_e64 s[10:11], 0, exec_hi v_cmpx_le_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_le_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_le_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_le_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_le_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xee,0x01,0x00] v_cmpx_le_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x04,0x02,0x00] @@ -49223,9 +51315,15 @@ v_cmpx_le_f16_e64 s[10:11], 0, v2 v_cmpx_le_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_le_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_le_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_le_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_le_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x33,0xd0,0x80,0x04,0x00,0x00] @@ -49322,9 +51420,15 @@ v_cmpx_gt_f16_e64 tma, 0, s2 v_cmpx_gt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_gt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_gt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x34,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_gt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_gt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x34,0xd0,0x01,0x05,0x00,0x00] @@ -49373,11 +51477,14 @@ v_cmpx_gt_f16_e64 s[10:11], 0, exec_hi v_cmpx_gt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_gt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_gt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_gt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_gt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xee,0x01,0x00] v_cmpx_gt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x02,0x00] @@ -49385,9 +51492,15 @@ v_cmpx_gt_f16_e64 s[10:11], 0, v2 v_cmpx_gt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_gt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_gt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_gt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_gt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x34,0xd0,0x80,0x04,0x00,0x00] @@ -49484,9 +51597,15 @@ v_cmpx_lg_f16_e64 tma, 0, s2 v_cmpx_lg_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x35,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_lg_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x35,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_lg_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x35,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_lg_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x35,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_lg_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x35,0xd0,0x01,0x05,0x00,0x00] @@ -49535,11 +51654,14 @@ v_cmpx_lg_f16_e64 s[10:11], 0, exec_hi v_cmpx_lg_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_lg_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_lg_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_lg_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_lg_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xee,0x01,0x00] v_cmpx_lg_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x04,0x02,0x00] @@ -49547,9 +51669,15 @@ v_cmpx_lg_f16_e64 s[10:11], 0, v2 v_cmpx_lg_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_lg_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_lg_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_lg_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_lg_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x35,0xd0,0x80,0x04,0x00,0x00] @@ -49646,9 +51774,15 @@ v_cmpx_ge_f16_e64 tma, 0, s2 v_cmpx_ge_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_ge_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_ge_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x36,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_ge_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_ge_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x36,0xd0,0x01,0x05,0x00,0x00] @@ -49697,11 +51831,14 @@ v_cmpx_ge_f16_e64 s[10:11], 0, exec_hi v_cmpx_ge_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_ge_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_ge_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_ge_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_ge_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xee,0x01,0x00] v_cmpx_ge_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x02,0x00] @@ -49709,9 +51846,15 @@ v_cmpx_ge_f16_e64 s[10:11], 0, v2 v_cmpx_ge_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_ge_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_ge_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_ge_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_ge_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x36,0xd0,0x80,0x04,0x00,0x00] @@ -49808,9 +51951,15 @@ v_cmpx_o_f16_e64 tma, 0, s2 v_cmpx_o_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x37,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_o_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x37,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_o_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x37,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_o_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x37,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_o_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x37,0xd0,0x01,0x05,0x00,0x00] @@ -49859,11 +52008,14 @@ v_cmpx_o_f16_e64 s[10:11], 0, exec_hi v_cmpx_o_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_o_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_o_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_o_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_o_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xee,0x01,0x00] v_cmpx_o_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x04,0x02,0x00] @@ -49871,9 +52023,15 @@ v_cmpx_o_f16_e64 s[10:11], 0, v2 v_cmpx_o_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_o_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_o_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_o_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_o_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x37,0xd0,0x80,0x04,0x00,0x00] @@ -49970,9 +52128,15 @@ v_cmpx_u_f16_e64 tma, 0, s2 v_cmpx_u_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_u_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_u_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x38,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_u_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_u_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x38,0xd0,0x01,0x05,0x00,0x00] @@ -50021,11 +52185,14 @@ v_cmpx_u_f16_e64 s[10:11], 0, exec_hi v_cmpx_u_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_u_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_u_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_u_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_u_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xee,0x01,0x00] v_cmpx_u_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x02,0x00] @@ -50033,9 +52200,15 @@ v_cmpx_u_f16_e64 s[10:11], 0, v2 v_cmpx_u_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_u_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_u_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_u_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_u_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x38,0xd0,0x80,0x04,0x00,0x00] @@ -50132,9 +52305,15 @@ v_cmpx_nge_f16_e64 tma, 0, s2 v_cmpx_nge_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x39,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nge_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x39,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nge_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x39,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nge_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x39,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nge_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x39,0xd0,0x01,0x05,0x00,0x00] @@ -50183,11 +52362,14 @@ v_cmpx_nge_f16_e64 s[10:11], 0, exec_hi v_cmpx_nge_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nge_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nge_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nge_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nge_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nge_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x04,0x02,0x00] @@ -50195,9 +52377,15 @@ v_cmpx_nge_f16_e64 s[10:11], 0, v2 v_cmpx_nge_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nge_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nge_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nge_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nge_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x39,0xd0,0x80,0x04,0x00,0x00] @@ -50294,9 +52482,15 @@ v_cmpx_nlg_f16_e64 tma, 0, s2 v_cmpx_nlg_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nlg_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nlg_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3a,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nlg_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nlg_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3a,0xd0,0x01,0x05,0x00,0x00] @@ -50345,11 +52539,14 @@ v_cmpx_nlg_f16_e64 s[10:11], 0, exec_hi v_cmpx_nlg_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nlg_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nlg_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nlg_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nlg_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nlg_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x02,0x00] @@ -50357,9 +52554,15 @@ v_cmpx_nlg_f16_e64 s[10:11], 0, v2 v_cmpx_nlg_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nlg_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nlg_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nlg_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nlg_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x3a,0xd0,0x80,0x04,0x00,0x00] @@ -50456,9 +52659,15 @@ v_cmpx_ngt_f16_e64 tma, 0, s2 v_cmpx_ngt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_ngt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3b,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_ngt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3b,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_ngt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3b,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_ngt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3b,0xd0,0x01,0x05,0x00,0x00] @@ -50507,11 +52716,14 @@ v_cmpx_ngt_f16_e64 s[10:11], 0, exec_hi v_cmpx_ngt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_ngt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_ngt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_ngt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_ngt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xee,0x01,0x00] v_cmpx_ngt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x04,0x02,0x00] @@ -50519,9 +52731,15 @@ v_cmpx_ngt_f16_e64 s[10:11], 0, v2 v_cmpx_ngt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_ngt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_ngt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_ngt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_ngt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x3b,0xd0,0x80,0x04,0x00,0x00] @@ -50618,9 +52836,15 @@ v_cmpx_nle_f16_e64 tma, 0, s2 v_cmpx_nle_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nle_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nle_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3c,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nle_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nle_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3c,0xd0,0x01,0x05,0x00,0x00] @@ -50669,11 +52893,14 @@ v_cmpx_nle_f16_e64 s[10:11], 0, exec_hi v_cmpx_nle_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nle_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nle_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nle_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nle_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nle_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x02,0x00] @@ -50681,9 +52908,15 @@ v_cmpx_nle_f16_e64 s[10:11], 0, v2 v_cmpx_nle_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nle_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nle_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nle_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nle_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x3c,0xd0,0x80,0x04,0x00,0x00] @@ -50780,9 +53013,15 @@ v_cmpx_neq_f16_e64 tma, 0, s2 v_cmpx_neq_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_neq_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3d,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_neq_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3d,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_neq_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3d,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_neq_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3d,0xd0,0x01,0x05,0x00,0x00] @@ -50831,11 +53070,14 @@ v_cmpx_neq_f16_e64 s[10:11], 0, exec_hi v_cmpx_neq_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_neq_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_neq_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_neq_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_neq_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xee,0x01,0x00] v_cmpx_neq_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x04,0x02,0x00] @@ -50843,9 +53085,15 @@ v_cmpx_neq_f16_e64 s[10:11], 0, v2 v_cmpx_neq_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_neq_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_neq_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_neq_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_neq_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x3d,0xd0,0x80,0x04,0x00,0x00] @@ -50942,9 +53190,15 @@ v_cmpx_nlt_f16_e64 tma, 0, s2 v_cmpx_nlt_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nlt_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nlt_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3e,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nlt_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nlt_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3e,0xd0,0x01,0x05,0x00,0x00] @@ -50993,11 +53247,14 @@ v_cmpx_nlt_f16_e64 s[10:11], 0, exec_hi v_cmpx_nlt_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nlt_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nlt_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nlt_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nlt_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nlt_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x02,0x00] @@ -51005,9 +53262,15 @@ v_cmpx_nlt_f16_e64 s[10:11], 0, v2 v_cmpx_nlt_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nlt_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nlt_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nlt_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nlt_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x3e,0xd0,0x80,0x04,0x00,0x00] @@ -51104,9 +53367,15 @@ v_cmpx_tru_f16_e64 tma, 0, s2 v_cmpx_tru_f16_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_tru_f16_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x3f,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_tru_f16_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x3f,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_tru_f16_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x3f,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_tru_f16_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x3f,0xd0,0x01,0x05,0x00,0x00] @@ -51155,11 +53424,14 @@ v_cmpx_tru_f16_e64 s[10:11], 0, exec_hi v_cmpx_tru_f16_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_tru_f16_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_tru_f16_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_tru_f16_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_tru_f16_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xee,0x01,0x00] v_cmpx_tru_f16_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x04,0x02,0x00] @@ -51167,9 +53439,15 @@ v_cmpx_tru_f16_e64 s[10:11], 0, v2 v_cmpx_tru_f16_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_tru_f16_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_tru_f16_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_tru_f16_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_tru_f16_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x3f,0xd0,0x80,0x04,0x00,0x00] @@ -51266,9 +53544,15 @@ v_cmp_f_f32_e64 tma, 0, s2 v_cmp_f_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x40,0xd0,0x80,0x04,0x00,0x00] +v_cmp_f_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x40,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_f_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x40,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_f_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x40,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_f_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x40,0xd0,0x01,0x05,0x00,0x00] @@ -51317,11 +53601,14 @@ v_cmp_f_f32_e64 s[10:11], 0, exec_hi v_cmp_f_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x00,0x01,0x00] +v_cmp_f_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x82,0x01,0x00] + v_cmp_f_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_f_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_f_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xee,0x01,0x00] v_cmp_f_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x04,0x02,0x00] @@ -51329,9 +53616,15 @@ v_cmp_f_f32_e64 s[10:11], 0, v2 v_cmp_f_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_f_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x04,0x00,0x20] + v_cmp_f_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x04,0x00,0x40] +v_cmp_f_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x04,0x00,0x60] + v_cmp_f_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x40,0xd0,0x80,0x04,0x00,0x00] @@ -51428,9 +53721,15 @@ v_cmp_lt_f32_e64 tma, 0, s2 v_cmp_lt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x41,0xd0,0x80,0x04,0x00,0x00] +v_cmp_lt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x41,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_lt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x41,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_lt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x41,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_lt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x41,0xd0,0x01,0x05,0x00,0x00] @@ -51479,11 +53778,14 @@ v_cmp_lt_f32_e64 s[10:11], 0, exec_hi v_cmp_lt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x00,0x01,0x00] +v_cmp_lt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x82,0x01,0x00] + v_cmp_lt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_lt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_lt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xee,0x01,0x00] v_cmp_lt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x04,0x02,0x00] @@ -51491,9 +53793,15 @@ v_cmp_lt_f32_e64 s[10:11], 0, v2 v_cmp_lt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_lt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x04,0x00,0x20] + v_cmp_lt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x04,0x00,0x40] +v_cmp_lt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x04,0x00,0x60] + v_cmp_lt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x41,0xd0,0x80,0x04,0x00,0x00] @@ -51590,9 +53898,15 @@ v_cmp_eq_f32_e64 tma, 0, s2 v_cmp_eq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x42,0xd0,0x80,0x04,0x00,0x00] +v_cmp_eq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x42,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_eq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x42,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_eq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x42,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_eq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x42,0xd0,0x01,0x05,0x00,0x00] @@ -51641,11 +53955,14 @@ v_cmp_eq_f32_e64 s[10:11], 0, exec_hi v_cmp_eq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x00,0x01,0x00] +v_cmp_eq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x82,0x01,0x00] + v_cmp_eq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_eq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_eq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xee,0x01,0x00] v_cmp_eq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x04,0x02,0x00] @@ -51653,9 +53970,15 @@ v_cmp_eq_f32_e64 s[10:11], 0, v2 v_cmp_eq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_eq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x04,0x00,0x20] + v_cmp_eq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x04,0x00,0x40] +v_cmp_eq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x04,0x00,0x60] + v_cmp_eq_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x42,0xd0,0x80,0x04,0x00,0x00] @@ -51752,9 +54075,15 @@ v_cmp_le_f32_e64 tma, 0, s2 v_cmp_le_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x43,0xd0,0x80,0x04,0x00,0x00] +v_cmp_le_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x43,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_le_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x43,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_le_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x43,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_le_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x43,0xd0,0x01,0x05,0x00,0x00] @@ -51803,11 +54132,14 @@ v_cmp_le_f32_e64 s[10:11], 0, exec_hi v_cmp_le_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x00,0x01,0x00] +v_cmp_le_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x82,0x01,0x00] + v_cmp_le_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_le_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_le_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xee,0x01,0x00] v_cmp_le_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x04,0x02,0x00] @@ -51815,9 +54147,15 @@ v_cmp_le_f32_e64 s[10:11], 0, v2 v_cmp_le_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_le_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x04,0x00,0x20] + v_cmp_le_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x04,0x00,0x40] +v_cmp_le_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x04,0x00,0x60] + v_cmp_le_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x43,0xd0,0x80,0x04,0x00,0x00] @@ -51914,9 +54252,15 @@ v_cmp_gt_f32_e64 tma, 0, s2 v_cmp_gt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x44,0xd0,0x80,0x04,0x00,0x00] +v_cmp_gt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x44,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_gt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x44,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_gt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x44,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_gt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x44,0xd0,0x01,0x05,0x00,0x00] @@ -51965,11 +54309,14 @@ v_cmp_gt_f32_e64 s[10:11], 0, exec_hi v_cmp_gt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x00,0x01,0x00] +v_cmp_gt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x82,0x01,0x00] + v_cmp_gt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_gt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_gt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xee,0x01,0x00] v_cmp_gt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x04,0x02,0x00] @@ -51977,9 +54324,15 @@ v_cmp_gt_f32_e64 s[10:11], 0, v2 v_cmp_gt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_gt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x04,0x00,0x20] + v_cmp_gt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x04,0x00,0x40] +v_cmp_gt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x04,0x00,0x60] + v_cmp_gt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x44,0xd0,0x80,0x04,0x00,0x00] @@ -52076,9 +54429,15 @@ v_cmp_lg_f32_e64 tma, 0, s2 v_cmp_lg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x45,0xd0,0x80,0x04,0x00,0x00] +v_cmp_lg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x45,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_lg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x45,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_lg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x45,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_lg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x45,0xd0,0x01,0x05,0x00,0x00] @@ -52127,11 +54486,14 @@ v_cmp_lg_f32_e64 s[10:11], 0, exec_hi v_cmp_lg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x00,0x01,0x00] +v_cmp_lg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x82,0x01,0x00] + v_cmp_lg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_lg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_lg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xee,0x01,0x00] v_cmp_lg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x04,0x02,0x00] @@ -52139,9 +54501,15 @@ v_cmp_lg_f32_e64 s[10:11], 0, v2 v_cmp_lg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_lg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x04,0x00,0x20] + v_cmp_lg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x04,0x00,0x40] +v_cmp_lg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x04,0x00,0x60] + v_cmp_lg_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x45,0xd0,0x80,0x04,0x00,0x00] @@ -52238,9 +54606,15 @@ v_cmp_ge_f32_e64 tma, 0, s2 v_cmp_ge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x46,0xd0,0x80,0x04,0x00,0x00] +v_cmp_ge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x46,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_ge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x46,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_ge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x46,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_ge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x46,0xd0,0x01,0x05,0x00,0x00] @@ -52289,11 +54663,14 @@ v_cmp_ge_f32_e64 s[10:11], 0, exec_hi v_cmp_ge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x00,0x01,0x00] +v_cmp_ge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x82,0x01,0x00] + v_cmp_ge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_ge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_ge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xee,0x01,0x00] v_cmp_ge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x04,0x02,0x00] @@ -52301,9 +54678,15 @@ v_cmp_ge_f32_e64 s[10:11], 0, v2 v_cmp_ge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_ge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x04,0x00,0x20] + v_cmp_ge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x04,0x00,0x40] +v_cmp_ge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x04,0x00,0x60] + v_cmp_ge_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x46,0xd0,0x80,0x04,0x00,0x00] @@ -52400,9 +54783,15 @@ v_cmp_o_f32_e64 tma, 0, s2 v_cmp_o_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x47,0xd0,0x80,0x04,0x00,0x00] +v_cmp_o_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x47,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_o_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x47,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_o_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x47,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_o_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x47,0xd0,0x01,0x05,0x00,0x00] @@ -52451,11 +54840,14 @@ v_cmp_o_f32_e64 s[10:11], 0, exec_hi v_cmp_o_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x00,0x01,0x00] +v_cmp_o_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x82,0x01,0x00] + v_cmp_o_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_o_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_o_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xee,0x01,0x00] v_cmp_o_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x04,0x02,0x00] @@ -52463,9 +54855,15 @@ v_cmp_o_f32_e64 s[10:11], 0, v2 v_cmp_o_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_o_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x04,0x00,0x20] + v_cmp_o_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x04,0x00,0x40] +v_cmp_o_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x04,0x00,0x60] + v_cmp_o_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x47,0xd0,0x80,0x04,0x00,0x00] @@ -52562,9 +54960,15 @@ v_cmp_u_f32_e64 tma, 0, s2 v_cmp_u_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x48,0xd0,0x80,0x04,0x00,0x00] +v_cmp_u_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x48,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_u_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x48,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_u_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x48,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_u_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x48,0xd0,0x01,0x05,0x00,0x00] @@ -52613,11 +55017,14 @@ v_cmp_u_f32_e64 s[10:11], 0, exec_hi v_cmp_u_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x00,0x01,0x00] +v_cmp_u_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x82,0x01,0x00] + v_cmp_u_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_u_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_u_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xee,0x01,0x00] v_cmp_u_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x04,0x02,0x00] @@ -52625,9 +55032,15 @@ v_cmp_u_f32_e64 s[10:11], 0, v2 v_cmp_u_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_u_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x04,0x00,0x20] + v_cmp_u_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x04,0x00,0x40] +v_cmp_u_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x04,0x00,0x60] + v_cmp_u_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x48,0xd0,0x80,0x04,0x00,0x00] @@ -52724,9 +55137,15 @@ v_cmp_nge_f32_e64 tma, 0, s2 v_cmp_nge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x49,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x49,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x49,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x49,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x49,0xd0,0x01,0x05,0x00,0x00] @@ -52775,11 +55194,14 @@ v_cmp_nge_f32_e64 s[10:11], 0, exec_hi v_cmp_nge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xee,0x01,0x00] v_cmp_nge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x04,0x02,0x00] @@ -52787,9 +55209,15 @@ v_cmp_nge_f32_e64 s[10:11], 0, v2 v_cmp_nge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nge_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x49,0xd0,0x80,0x04,0x00,0x00] @@ -52886,9 +55314,15 @@ v_cmp_nlg_f32_e64 tma, 0, s2 v_cmp_nlg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nlg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x4a,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nlg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x4a,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nlg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x4a,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nlg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x4a,0xd0,0x01,0x05,0x00,0x00] @@ -52937,11 +55371,14 @@ v_cmp_nlg_f32_e64 s[10:11], 0, exec_hi v_cmp_nlg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nlg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nlg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nlg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nlg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xee,0x01,0x00] v_cmp_nlg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x04,0x02,0x00] @@ -52949,9 +55386,15 @@ v_cmp_nlg_f32_e64 s[10:11], 0, v2 v_cmp_nlg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nlg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nlg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nlg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nlg_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x4a,0xd0,0x80,0x04,0x00,0x00] @@ -53048,9 +55491,15 @@ v_cmp_ngt_f32_e64 tma, 0, s2 v_cmp_ngt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00] +v_cmp_ngt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x4b,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_ngt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x4b,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_ngt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x4b,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_ngt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x4b,0xd0,0x01,0x05,0x00,0x00] @@ -53099,11 +55548,14 @@ v_cmp_ngt_f32_e64 s[10:11], 0, exec_hi v_cmp_ngt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x00,0x01,0x00] +v_cmp_ngt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x82,0x01,0x00] + v_cmp_ngt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_ngt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_ngt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xee,0x01,0x00] v_cmp_ngt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x04,0x02,0x00] @@ -53111,9 +55563,15 @@ v_cmp_ngt_f32_e64 s[10:11], 0, v2 v_cmp_ngt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_ngt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x20] + v_cmp_ngt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x40] +v_cmp_ngt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x60] + v_cmp_ngt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x4b,0xd0,0x80,0x04,0x00,0x00] @@ -53210,9 +55668,15 @@ v_cmp_nle_f32_e64 tma, 0, s2 v_cmp_nle_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nle_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x4c,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nle_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x4c,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nle_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x4c,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nle_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x4c,0xd0,0x01,0x05,0x00,0x00] @@ -53261,11 +55725,14 @@ v_cmp_nle_f32_e64 s[10:11], 0, exec_hi v_cmp_nle_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nle_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nle_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nle_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nle_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xee,0x01,0x00] v_cmp_nle_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x04,0x02,0x00] @@ -53273,9 +55740,15 @@ v_cmp_nle_f32_e64 s[10:11], 0, v2 v_cmp_nle_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nle_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nle_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nle_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nle_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x4c,0xd0,0x80,0x04,0x00,0x00] @@ -53372,9 +55845,15 @@ v_cmp_neq_f32_e64 tma, 0, s2 v_cmp_neq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00] +v_cmp_neq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x4d,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_neq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x4d,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_neq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x4d,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_neq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x4d,0xd0,0x01,0x05,0x00,0x00] @@ -53423,11 +55902,14 @@ v_cmp_neq_f32_e64 s[10:11], 0, exec_hi v_cmp_neq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x00,0x01,0x00] +v_cmp_neq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x82,0x01,0x00] + v_cmp_neq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_neq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_neq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xee,0x01,0x00] v_cmp_neq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x04,0x02,0x00] @@ -53435,9 +55917,15 @@ v_cmp_neq_f32_e64 s[10:11], 0, v2 v_cmp_neq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_neq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x20] + v_cmp_neq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x40] +v_cmp_neq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x60] + v_cmp_neq_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x4d,0xd0,0x80,0x04,0x00,0x00] @@ -53534,9 +56022,15 @@ v_cmp_nlt_f32_e64 tma, 0, s2 v_cmp_nlt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00] +v_cmp_nlt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x4e,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_nlt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x4e,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_nlt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x4e,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_nlt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x4e,0xd0,0x01,0x05,0x00,0x00] @@ -53585,11 +56079,14 @@ v_cmp_nlt_f32_e64 s[10:11], 0, exec_hi v_cmp_nlt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x00,0x01,0x00] +v_cmp_nlt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x82,0x01,0x00] + v_cmp_nlt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_nlt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_nlt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xee,0x01,0x00] v_cmp_nlt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x04,0x02,0x00] @@ -53597,9 +56094,15 @@ v_cmp_nlt_f32_e64 s[10:11], 0, v2 v_cmp_nlt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_nlt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x20] + v_cmp_nlt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x40] +v_cmp_nlt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x60] + v_cmp_nlt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x4e,0xd0,0x80,0x04,0x00,0x00] @@ -53696,9 +56199,15 @@ v_cmp_tru_f32_e64 tma, 0, s2 v_cmp_tru_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00] +v_cmp_tru_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x4f,0xd0,0xc1,0x04,0x00,0x00] + v_cmp_tru_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x4f,0xd0,0xf0,0x04,0x00,0x00] +v_cmp_tru_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x4f,0xd0,0xf7,0x04,0x00,0x00] + v_cmp_tru_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x4f,0xd0,0x01,0x05,0x00,0x00] @@ -53747,11 +56256,14 @@ v_cmp_tru_f32_e64 s[10:11], 0, exec_hi v_cmp_tru_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x00,0x01,0x00] +v_cmp_tru_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x82,0x01,0x00] + v_cmp_tru_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xe0,0x01,0x00] -v_cmp_tru_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xfa,0x01,0x00] +v_cmp_tru_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xee,0x01,0x00] v_cmp_tru_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x04,0x02,0x00] @@ -53759,9 +56271,15 @@ v_cmp_tru_f32_e64 s[10:11], 0, v2 v_cmp_tru_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xfe,0x03,0x00] +v_cmp_tru_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x20] + v_cmp_tru_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x40] +v_cmp_tru_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x60] + v_cmp_tru_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x4f,0xd0,0x80,0x04,0x00,0x00] @@ -53858,9 +56376,15 @@ v_cmpx_f_f32_e64 tma, 0, s2 v_cmpx_f_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x50,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_f_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x50,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_f_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x50,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_f_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x50,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_f_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x50,0xd0,0x01,0x05,0x00,0x00] @@ -53909,11 +56433,14 @@ v_cmpx_f_f32_e64 s[10:11], 0, exec_hi v_cmpx_f_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_f_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_f_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_f_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_f_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xee,0x01,0x00] v_cmpx_f_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x04,0x02,0x00] @@ -53921,9 +56448,15 @@ v_cmpx_f_f32_e64 s[10:11], 0, v2 v_cmpx_f_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_f_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_f_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_f_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_f_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x50,0xd0,0x80,0x04,0x00,0x00] @@ -54020,9 +56553,15 @@ v_cmpx_lt_f32_e64 tma, 0, s2 v_cmpx_lt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x51,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_lt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x51,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_lt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x51,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_lt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x51,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_lt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x51,0xd0,0x01,0x05,0x00,0x00] @@ -54071,11 +56610,14 @@ v_cmpx_lt_f32_e64 s[10:11], 0, exec_hi v_cmpx_lt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_lt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_lt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_lt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_lt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xee,0x01,0x00] v_cmpx_lt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x04,0x02,0x00] @@ -54083,9 +56625,15 @@ v_cmpx_lt_f32_e64 s[10:11], 0, v2 v_cmpx_lt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_lt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_lt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_lt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_lt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x51,0xd0,0x80,0x04,0x00,0x00] @@ -54182,9 +56730,15 @@ v_cmpx_eq_f32_e64 tma, 0, s2 v_cmpx_eq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x52,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_eq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x52,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_eq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x52,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_eq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x52,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_eq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x52,0xd0,0x01,0x05,0x00,0x00] @@ -54233,11 +56787,14 @@ v_cmpx_eq_f32_e64 s[10:11], 0, exec_hi v_cmpx_eq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_eq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_eq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_eq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_eq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xee,0x01,0x00] v_cmpx_eq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x04,0x02,0x00] @@ -54245,9 +56802,15 @@ v_cmpx_eq_f32_e64 s[10:11], 0, v2 v_cmpx_eq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_eq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_eq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_eq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_eq_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x52,0xd0,0x80,0x04,0x00,0x00] @@ -54344,9 +56907,15 @@ v_cmpx_le_f32_e64 tma, 0, s2 v_cmpx_le_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x53,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_le_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x53,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_le_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x53,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_le_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x53,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_le_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x53,0xd0,0x01,0x05,0x00,0x00] @@ -54395,11 +56964,14 @@ v_cmpx_le_f32_e64 s[10:11], 0, exec_hi v_cmpx_le_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_le_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_le_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_le_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_le_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xee,0x01,0x00] v_cmpx_le_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x04,0x02,0x00] @@ -54407,9 +56979,15 @@ v_cmpx_le_f32_e64 s[10:11], 0, v2 v_cmpx_le_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_le_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_le_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_le_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_le_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x53,0xd0,0x80,0x04,0x00,0x00] @@ -54506,9 +57084,15 @@ v_cmpx_gt_f32_e64 tma, 0, s2 v_cmpx_gt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x54,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_gt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x54,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_gt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x54,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_gt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x54,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_gt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x54,0xd0,0x01,0x05,0x00,0x00] @@ -54557,11 +57141,14 @@ v_cmpx_gt_f32_e64 s[10:11], 0, exec_hi v_cmpx_gt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_gt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_gt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_gt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_gt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xee,0x01,0x00] v_cmpx_gt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x04,0x02,0x00] @@ -54569,9 +57156,15 @@ v_cmpx_gt_f32_e64 s[10:11], 0, v2 v_cmpx_gt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_gt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_gt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_gt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_gt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x54,0xd0,0x80,0x04,0x00,0x00] @@ -54668,9 +57261,15 @@ v_cmpx_lg_f32_e64 tma, 0, s2 v_cmpx_lg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x55,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_lg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x55,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_lg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x55,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_lg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x55,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_lg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x55,0xd0,0x01,0x05,0x00,0x00] @@ -54719,11 +57318,14 @@ v_cmpx_lg_f32_e64 s[10:11], 0, exec_hi v_cmpx_lg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_lg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_lg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_lg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_lg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xee,0x01,0x00] v_cmpx_lg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x04,0x02,0x00] @@ -54731,9 +57333,15 @@ v_cmpx_lg_f32_e64 s[10:11], 0, v2 v_cmpx_lg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_lg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_lg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_lg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_lg_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x55,0xd0,0x80,0x04,0x00,0x00] @@ -54830,9 +57438,15 @@ v_cmpx_ge_f32_e64 tma, 0, s2 v_cmpx_ge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x56,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_ge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x56,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_ge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x56,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_ge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x56,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_ge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x56,0xd0,0x01,0x05,0x00,0x00] @@ -54881,11 +57495,14 @@ v_cmpx_ge_f32_e64 s[10:11], 0, exec_hi v_cmpx_ge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_ge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_ge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_ge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_ge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xee,0x01,0x00] v_cmpx_ge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x04,0x02,0x00] @@ -54893,9 +57510,15 @@ v_cmpx_ge_f32_e64 s[10:11], 0, v2 v_cmpx_ge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_ge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_ge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_ge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_ge_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x56,0xd0,0x80,0x04,0x00,0x00] @@ -54992,9 +57615,15 @@ v_cmpx_o_f32_e64 tma, 0, s2 v_cmpx_o_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x57,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_o_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x57,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_o_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x57,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_o_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x57,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_o_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x57,0xd0,0x01,0x05,0x00,0x00] @@ -55043,11 +57672,14 @@ v_cmpx_o_f32_e64 s[10:11], 0, exec_hi v_cmpx_o_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_o_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_o_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_o_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_o_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xee,0x01,0x00] v_cmpx_o_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x04,0x02,0x00] @@ -55055,9 +57687,15 @@ v_cmpx_o_f32_e64 s[10:11], 0, v2 v_cmpx_o_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_o_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_o_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_o_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_o_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x57,0xd0,0x80,0x04,0x00,0x00] @@ -55154,9 +57792,15 @@ v_cmpx_u_f32_e64 tma, 0, s2 v_cmpx_u_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x58,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_u_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x58,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_u_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x58,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_u_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x58,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_u_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x58,0xd0,0x01,0x05,0x00,0x00] @@ -55205,11 +57849,14 @@ v_cmpx_u_f32_e64 s[10:11], 0, exec_hi v_cmpx_u_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_u_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_u_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_u_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_u_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xee,0x01,0x00] v_cmpx_u_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x04,0x02,0x00] @@ -55217,9 +57864,15 @@ v_cmpx_u_f32_e64 s[10:11], 0, v2 v_cmpx_u_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_u_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_u_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_u_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_u_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x58,0xd0,0x80,0x04,0x00,0x00] @@ -55316,9 +57969,15 @@ v_cmpx_nge_f32_e64 tma, 0, s2 v_cmpx_nge_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x59,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nge_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x59,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nge_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x59,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nge_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x59,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nge_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x59,0xd0,0x01,0x05,0x00,0x00] @@ -55367,11 +58026,14 @@ v_cmpx_nge_f32_e64 s[10:11], 0, exec_hi v_cmpx_nge_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nge_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nge_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nge_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nge_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nge_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x04,0x02,0x00] @@ -55379,9 +58041,15 @@ v_cmpx_nge_f32_e64 s[10:11], 0, v2 v_cmpx_nge_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nge_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nge_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nge_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nge_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x59,0xd0,0x80,0x04,0x00,0x00] @@ -55478,9 +58146,15 @@ v_cmpx_nlg_f32_e64 tma, 0, s2 v_cmpx_nlg_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nlg_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x5a,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nlg_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x5a,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nlg_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x5a,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nlg_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x5a,0xd0,0x01,0x05,0x00,0x00] @@ -55529,11 +58203,14 @@ v_cmpx_nlg_f32_e64 s[10:11], 0, exec_hi v_cmpx_nlg_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nlg_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nlg_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nlg_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nlg_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nlg_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x04,0x02,0x00] @@ -55541,9 +58218,15 @@ v_cmpx_nlg_f32_e64 s[10:11], 0, v2 v_cmpx_nlg_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nlg_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nlg_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nlg_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nlg_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x5a,0xd0,0x80,0x04,0x00,0x00] @@ -55640,9 +58323,15 @@ v_cmpx_ngt_f32_e64 tma, 0, s2 v_cmpx_ngt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_ngt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x5b,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_ngt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x5b,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_ngt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x5b,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_ngt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x5b,0xd0,0x01,0x05,0x00,0x00] @@ -55691,11 +58380,14 @@ v_cmpx_ngt_f32_e64 s[10:11], 0, exec_hi v_cmpx_ngt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_ngt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_ngt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_ngt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_ngt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xee,0x01,0x00] v_cmpx_ngt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x04,0x02,0x00] @@ -55703,9 +58395,15 @@ v_cmpx_ngt_f32_e64 s[10:11], 0, v2 v_cmpx_ngt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_ngt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_ngt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_ngt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_ngt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x5b,0xd0,0x80,0x04,0x00,0x00] @@ -55802,9 +58500,15 @@ v_cmpx_nle_f32_e64 tma, 0, s2 v_cmpx_nle_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nle_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x5c,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nle_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x5c,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nle_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x5c,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nle_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x5c,0xd0,0x01,0x05,0x00,0x00] @@ -55853,11 +58557,14 @@ v_cmpx_nle_f32_e64 s[10:11], 0, exec_hi v_cmpx_nle_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nle_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nle_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nle_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nle_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nle_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x04,0x02,0x00] @@ -55865,9 +58572,15 @@ v_cmpx_nle_f32_e64 s[10:11], 0, v2 v_cmpx_nle_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nle_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nle_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nle_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nle_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x5c,0xd0,0x80,0x04,0x00,0x00] @@ -55964,9 +58677,15 @@ v_cmpx_neq_f32_e64 tma, 0, s2 v_cmpx_neq_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_neq_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x5d,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_neq_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x5d,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_neq_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x5d,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_neq_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x5d,0xd0,0x01,0x05,0x00,0x00] @@ -56015,11 +58734,14 @@ v_cmpx_neq_f32_e64 s[10:11], 0, exec_hi v_cmpx_neq_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_neq_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_neq_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_neq_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_neq_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xee,0x01,0x00] v_cmpx_neq_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x04,0x02,0x00] @@ -56027,9 +58749,15 @@ v_cmpx_neq_f32_e64 s[10:11], 0, v2 v_cmpx_neq_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_neq_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_neq_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_neq_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_neq_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x5d,0xd0,0x80,0x04,0x00,0x00] @@ -56126,9 +58854,15 @@ v_cmpx_nlt_f32_e64 tma, 0, s2 v_cmpx_nlt_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_nlt_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x5e,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_nlt_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x5e,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_nlt_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x5e,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_nlt_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x5e,0xd0,0x01,0x05,0x00,0x00] @@ -56177,11 +58911,14 @@ v_cmpx_nlt_f32_e64 s[10:11], 0, exec_hi v_cmpx_nlt_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_nlt_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_nlt_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_nlt_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_nlt_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xee,0x01,0x00] v_cmpx_nlt_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x04,0x02,0x00] @@ -56189,9 +58926,15 @@ v_cmpx_nlt_f32_e64 s[10:11], 0, v2 v_cmpx_nlt_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_nlt_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_nlt_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_nlt_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_nlt_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x5e,0xd0,0x80,0x04,0x00,0x00] @@ -56288,9 +59031,15 @@ v_cmpx_tru_f32_e64 tma, 0, s2 v_cmpx_tru_f32_e64 ttmp[10:11], 0, s2 // CHECK: [0x7a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00] +v_cmpx_tru_f32_e64 s[10:11], -1, s2 +// CHECK: [0x0a,0x00,0x5f,0xd0,0xc1,0x04,0x00,0x00] + v_cmpx_tru_f32_e64 s[10:11], 0.5, s2 // CHECK: [0x0a,0x00,0x5f,0xd0,0xf0,0x04,0x00,0x00] +v_cmpx_tru_f32_e64 s[10:11], -4.0, s2 +// CHECK: [0x0a,0x00,0x5f,0xd0,0xf7,0x04,0x00,0x00] + v_cmpx_tru_f32_e64 s[10:11], v1, s2 // CHECK: [0x0a,0x00,0x5f,0xd0,0x01,0x05,0x00,0x00] @@ -56339,11 +59088,14 @@ v_cmpx_tru_f32_e64 s[10:11], 0, exec_hi v_cmpx_tru_f32_e64 s[10:11], 0, 0 // CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x00,0x01,0x00] +v_cmpx_tru_f32_e64 s[10:11], 0, -1 +// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x82,0x01,0x00] + v_cmpx_tru_f32_e64 s[10:11], 0, 0.5 // CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xe0,0x01,0x00] -v_cmpx_tru_f32_e64 s[10:11], 0, scc -// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xfa,0x01,0x00] +v_cmpx_tru_f32_e64 s[10:11], 0, -4.0 +// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xee,0x01,0x00] v_cmpx_tru_f32_e64 s[10:11], 0, v2 // CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x04,0x02,0x00] @@ -56351,9 +59103,15 @@ v_cmpx_tru_f32_e64 s[10:11], 0, v2 v_cmpx_tru_f32_e64 s[10:11], 0, v255 // CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xfe,0x03,0x00] +v_cmpx_tru_f32_e64 s[10:11], neg(0), s2 +// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x20] + v_cmpx_tru_f32_e64 s[10:11], 0, -s2 // CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x40] +v_cmpx_tru_f32_e64 s[10:11], neg(0), -s2 +// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x60] + v_cmpx_tru_f32_e64 s[10:11], 0, s2 clamp // CHECK: [0x0a,0x80,0x5f,0xd0,0x80,0x04,0x00,0x00] @@ -56438,9 +59196,15 @@ v_cmp_f_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_f_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x60,0xd0,0x80,0x08,0x00,0x00] +v_cmp_f_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x60,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_f_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x60,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_f_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x60,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_f_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x60,0xd0,0x01,0x09,0x00,0x00] @@ -56450,9 +59214,15 @@ v_cmp_f_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_f_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x00,0x01,0x00] +v_cmp_f_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x82,0x01,0x00] + v_cmp_f_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_f_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xee,0x01,0x00] + v_cmp_f_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x04,0x02,0x00] @@ -56552,9 +59322,15 @@ v_cmp_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_lt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x61,0xd0,0x80,0x08,0x00,0x00] +v_cmp_lt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x61,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_lt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x61,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_lt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x61,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_lt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x61,0xd0,0x01,0x09,0x00,0x00] @@ -56564,9 +59340,15 @@ v_cmp_lt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_lt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x00,0x01,0x00] +v_cmp_lt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x82,0x01,0x00] + v_cmp_lt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x61,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_lt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0xee,0x01,0x00] + v_cmp_lt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x04,0x02,0x00] @@ -56666,9 +59448,15 @@ v_cmp_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_eq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x62,0xd0,0x80,0x08,0x00,0x00] +v_cmp_eq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x62,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_eq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x62,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_eq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x62,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_eq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x62,0xd0,0x01,0x09,0x00,0x00] @@ -56678,9 +59466,15 @@ v_cmp_eq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_eq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x00,0x01,0x00] +v_cmp_eq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x82,0x01,0x00] + v_cmp_eq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_eq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xee,0x01,0x00] + v_cmp_eq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x04,0x02,0x00] @@ -56780,9 +59574,15 @@ v_cmp_le_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_le_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x63,0xd0,0x80,0x08,0x00,0x00] +v_cmp_le_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x63,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_le_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x63,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_le_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x63,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_le_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x63,0xd0,0x01,0x09,0x00,0x00] @@ -56792,9 +59592,15 @@ v_cmp_le_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_le_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x00,0x01,0x00] +v_cmp_le_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x82,0x01,0x00] + v_cmp_le_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x63,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_le_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0xee,0x01,0x00] + v_cmp_le_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x04,0x02,0x00] @@ -56894,9 +59700,15 @@ v_cmp_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_gt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x64,0xd0,0x80,0x08,0x00,0x00] +v_cmp_gt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x64,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_gt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x64,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_gt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x64,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_gt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x64,0xd0,0x01,0x09,0x00,0x00] @@ -56906,9 +59718,15 @@ v_cmp_gt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_gt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x00,0x01,0x00] +v_cmp_gt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x82,0x01,0x00] + v_cmp_gt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_gt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xee,0x01,0x00] + v_cmp_gt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x04,0x02,0x00] @@ -57008,9 +59826,15 @@ v_cmp_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_lg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x65,0xd0,0x80,0x08,0x00,0x00] +v_cmp_lg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x65,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_lg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x65,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_lg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x65,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_lg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x65,0xd0,0x01,0x09,0x00,0x00] @@ -57020,9 +59844,15 @@ v_cmp_lg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_lg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x00,0x01,0x00] +v_cmp_lg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x82,0x01,0x00] + v_cmp_lg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x65,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_lg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0xee,0x01,0x00] + v_cmp_lg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x04,0x02,0x00] @@ -57122,9 +59952,15 @@ v_cmp_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_ge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x66,0xd0,0x80,0x08,0x00,0x00] +v_cmp_ge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x66,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_ge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x66,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_ge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x66,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_ge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x66,0xd0,0x01,0x09,0x00,0x00] @@ -57134,9 +59970,15 @@ v_cmp_ge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_ge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x00,0x01,0x00] +v_cmp_ge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x82,0x01,0x00] + v_cmp_ge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_ge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xee,0x01,0x00] + v_cmp_ge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x04,0x02,0x00] @@ -57236,9 +60078,15 @@ v_cmp_o_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_o_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x67,0xd0,0x80,0x08,0x00,0x00] +v_cmp_o_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x67,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_o_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x67,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_o_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x67,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_o_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x67,0xd0,0x01,0x09,0x00,0x00] @@ -57248,9 +60096,15 @@ v_cmp_o_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_o_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x00,0x01,0x00] +v_cmp_o_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x82,0x01,0x00] + v_cmp_o_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x67,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_o_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0xee,0x01,0x00] + v_cmp_o_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x04,0x02,0x00] @@ -57350,9 +60204,15 @@ v_cmp_u_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_u_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x68,0xd0,0x80,0x08,0x00,0x00] +v_cmp_u_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x68,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_u_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x68,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_u_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x68,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_u_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x68,0xd0,0x01,0x09,0x00,0x00] @@ -57362,9 +60222,15 @@ v_cmp_u_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_u_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x00,0x01,0x00] +v_cmp_u_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x82,0x01,0x00] + v_cmp_u_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_u_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xee,0x01,0x00] + v_cmp_u_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x04,0x02,0x00] @@ -57464,9 +60330,15 @@ v_cmp_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x69,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x69,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x69,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x69,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x69,0xd0,0x01,0x09,0x00,0x00] @@ -57476,9 +60348,15 @@ v_cmp_nge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x69,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x04,0x02,0x00] @@ -57578,9 +60456,15 @@ v_cmp_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nlg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6a,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nlg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6a,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nlg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6a,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nlg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6a,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nlg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6a,0xd0,0x01,0x09,0x00,0x00] @@ -57590,9 +60474,15 @@ v_cmp_nlg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nlg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nlg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nlg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x04,0x02,0x00] @@ -57692,9 +60582,15 @@ v_cmp_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_ngt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6b,0xd0,0x80,0x08,0x00,0x00] +v_cmp_ngt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6b,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_ngt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6b,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_ngt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6b,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_ngt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6b,0xd0,0x01,0x09,0x00,0x00] @@ -57704,9 +60600,15 @@ v_cmp_ngt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x00,0x01,0x00] +v_cmp_ngt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x82,0x01,0x00] + v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_ngt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0xee,0x01,0x00] + v_cmp_ngt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x04,0x02,0x00] @@ -57806,9 +60708,15 @@ v_cmp_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nle_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6c,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nle_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6c,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nle_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6c,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nle_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6c,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nle_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6c,0xd0,0x01,0x09,0x00,0x00] @@ -57818,9 +60726,15 @@ v_cmp_nle_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nle_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nle_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nle_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nle_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nle_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x04,0x02,0x00] @@ -57920,9 +60834,15 @@ v_cmp_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_neq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6d,0xd0,0x80,0x08,0x00,0x00] +v_cmp_neq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6d,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_neq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6d,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_neq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6d,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_neq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6d,0xd0,0x01,0x09,0x00,0x00] @@ -57932,9 +60852,15 @@ v_cmp_neq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_neq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x00,0x01,0x00] +v_cmp_neq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x82,0x01,0x00] + v_cmp_neq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_neq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0xee,0x01,0x00] + v_cmp_neq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x04,0x02,0x00] @@ -58034,9 +60960,15 @@ v_cmp_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_nlt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6e,0xd0,0x80,0x08,0x00,0x00] +v_cmp_nlt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6e,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_nlt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6e,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_nlt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6e,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_nlt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6e,0xd0,0x01,0x09,0x00,0x00] @@ -58046,9 +60978,15 @@ v_cmp_nlt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x00,0x01,0x00] +v_cmp_nlt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x82,0x01,0x00] + v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_nlt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xee,0x01,0x00] + v_cmp_nlt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x04,0x02,0x00] @@ -58148,9 +61086,15 @@ v_cmp_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmp_tru_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x6f,0xd0,0x80,0x08,0x00,0x00] +v_cmp_tru_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x6f,0xd0,0xc1,0x08,0x00,0x00] + v_cmp_tru_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x6f,0xd0,0xf0,0x08,0x00,0x00] +v_cmp_tru_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x6f,0xd0,0xf7,0x08,0x00,0x00] + v_cmp_tru_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x6f,0xd0,0x01,0x09,0x00,0x00] @@ -58160,9 +61104,15 @@ v_cmp_tru_f64_e64 s[10:11], v[254:255], s[4:5] v_cmp_tru_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x00,0x01,0x00] +v_cmp_tru_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x82,0x01,0x00] + v_cmp_tru_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0xe0,0x01,0x00] +v_cmp_tru_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0xee,0x01,0x00] + v_cmp_tru_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x04,0x02,0x00] @@ -58262,9 +61212,15 @@ v_cmpx_f_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_f_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x70,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_f_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x70,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_f_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x70,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_f_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x70,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_f_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x70,0xd0,0x01,0x09,0x00,0x00] @@ -58274,9 +61230,15 @@ v_cmpx_f_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_f_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_f_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_f_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_f_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_f_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x04,0x02,0x00] @@ -58376,9 +61338,15 @@ v_cmpx_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_lt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x71,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_lt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x71,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_lt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x71,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_lt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x71,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_lt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x71,0xd0,0x01,0x09,0x00,0x00] @@ -58388,9 +61356,15 @@ v_cmpx_lt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_lt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x71,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_lt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_lt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x04,0x02,0x00] @@ -58490,9 +61464,15 @@ v_cmpx_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_eq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x72,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_eq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x72,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_eq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x72,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_eq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x72,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_eq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x72,0xd0,0x01,0x09,0x00,0x00] @@ -58502,9 +61482,15 @@ v_cmpx_eq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_eq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_eq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_eq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x04,0x02,0x00] @@ -58604,9 +61590,15 @@ v_cmpx_le_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_le_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x73,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_le_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x73,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_le_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x73,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_le_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x73,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_le_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x73,0xd0,0x01,0x09,0x00,0x00] @@ -58616,9 +61608,15 @@ v_cmpx_le_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_le_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_le_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_le_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x73,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_le_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_le_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x04,0x02,0x00] @@ -58718,9 +61716,15 @@ v_cmpx_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_gt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x74,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_gt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x74,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_gt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x74,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_gt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x74,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_gt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x74,0xd0,0x01,0x09,0x00,0x00] @@ -58730,9 +61734,15 @@ v_cmpx_gt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_gt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_gt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_gt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x04,0x02,0x00] @@ -58832,9 +61842,15 @@ v_cmpx_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_lg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x75,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_lg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x75,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_lg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x75,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_lg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x75,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_lg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x75,0xd0,0x01,0x09,0x00,0x00] @@ -58844,9 +61860,15 @@ v_cmpx_lg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_lg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x75,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_lg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_lg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x04,0x02,0x00] @@ -58946,9 +61968,15 @@ v_cmpx_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_ge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x76,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_ge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x76,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_ge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x76,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_ge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x76,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_ge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x76,0xd0,0x01,0x09,0x00,0x00] @@ -58958,9 +61986,15 @@ v_cmpx_ge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_ge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_ge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_ge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x04,0x02,0x00] @@ -59060,9 +62094,15 @@ v_cmpx_o_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_o_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x77,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_o_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x77,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_o_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x77,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_o_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x77,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_o_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x77,0xd0,0x01,0x09,0x00,0x00] @@ -59072,9 +62112,15 @@ v_cmpx_o_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_o_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_o_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_o_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x77,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_o_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_o_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x04,0x02,0x00] @@ -59174,9 +62220,15 @@ v_cmpx_u_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_u_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x78,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_u_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x78,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_u_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x78,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_u_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x78,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_u_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x78,0xd0,0x01,0x09,0x00,0x00] @@ -59186,9 +62238,15 @@ v_cmpx_u_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_u_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_u_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_u_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_u_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_u_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x04,0x02,0x00] @@ -59288,9 +62346,15 @@ v_cmpx_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nge_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x79,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nge_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x79,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nge_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x79,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nge_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x79,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nge_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x79,0xd0,0x01,0x09,0x00,0x00] @@ -59300,9 +62364,15 @@ v_cmpx_nge_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nge_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x79,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nge_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nge_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x04,0x02,0x00] @@ -59402,9 +62472,15 @@ v_cmpx_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nlg_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7a,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nlg_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7a,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nlg_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7a,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nlg_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7a,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nlg_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7a,0xd0,0x01,0x09,0x00,0x00] @@ -59414,9 +62490,15 @@ v_cmpx_nlg_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nlg_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nlg_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nlg_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x04,0x02,0x00] @@ -59516,9 +62598,15 @@ v_cmpx_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_ngt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7b,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_ngt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7b,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_ngt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7b,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_ngt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7b,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_ngt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7b,0xd0,0x01,0x09,0x00,0x00] @@ -59528,9 +62616,15 @@ v_cmpx_ngt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_ngt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_ngt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_ngt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x04,0x02,0x00] @@ -59630,9 +62724,15 @@ v_cmpx_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nle_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7c,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nle_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7c,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nle_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7c,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nle_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7c,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nle_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7c,0xd0,0x01,0x09,0x00,0x00] @@ -59642,9 +62742,15 @@ v_cmpx_nle_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nle_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nle_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nle_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x04,0x02,0x00] @@ -59744,9 +62850,15 @@ v_cmpx_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_neq_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7d,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_neq_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7d,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_neq_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7d,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_neq_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7d,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_neq_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7d,0xd0,0x01,0x09,0x00,0x00] @@ -59756,9 +62868,15 @@ v_cmpx_neq_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_neq_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_neq_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_neq_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x04,0x02,0x00] @@ -59858,9 +62976,15 @@ v_cmpx_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_nlt_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7e,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_nlt_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7e,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_nlt_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7e,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_nlt_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7e,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_nlt_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7e,0xd0,0x01,0x09,0x00,0x00] @@ -59870,9 +62994,15 @@ v_cmpx_nlt_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_nlt_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_nlt_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_nlt_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x04,0x02,0x00] @@ -59972,9 +63102,15 @@ v_cmpx_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5] v_cmpx_tru_f64_e64 s[10:11], 0, s[4:5] // CHECK: [0x0a,0x00,0x7f,0xd0,0x80,0x08,0x00,0x00] +v_cmpx_tru_f64_e64 s[10:11], -1, s[4:5] +// CHECK: [0x0a,0x00,0x7f,0xd0,0xc1,0x08,0x00,0x00] + v_cmpx_tru_f64_e64 s[10:11], 0.5, s[4:5] // CHECK: [0x0a,0x00,0x7f,0xd0,0xf0,0x08,0x00,0x00] +v_cmpx_tru_f64_e64 s[10:11], -4.0, s[4:5] +// CHECK: [0x0a,0x00,0x7f,0xd0,0xf7,0x08,0x00,0x00] + v_cmpx_tru_f64_e64 s[10:11], v[1:2], s[4:5] // CHECK: [0x0a,0x00,0x7f,0xd0,0x01,0x09,0x00,0x00] @@ -59984,9 +63120,15 @@ v_cmpx_tru_f64_e64 s[10:11], v[254:255], s[4:5] v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0 // CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x00,0x01,0x00] +v_cmpx_tru_f64_e64 s[10:11], s[4:5], -1 +// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x82,0x01,0x00] + v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0.5 // CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0xe0,0x01,0x00] +v_cmpx_tru_f64_e64 s[10:11], s[4:5], -4.0 +// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0xee,0x01,0x00] + v_cmpx_tru_f64_e64 s[10:11], s[4:5], v[2:3] // CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x04,0x02,0x00] @@ -98831,17 +101973,3 @@ v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1 v_cmpx_t_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD // CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x0e] -s_rfe_restore_b64 s[4:5], s2 -// CHECK: [0x04,0x02,0x80,0x95] - -v_mov_fed_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 -// CHECK: [0xfa,0x12,0x0a,0x7e,0x01,0xe4,0x00,0x00] - -v_mov_fed_b32_e64 v5, s1 -// CHECK: [0x05,0x00,0x49,0xd1,0x01,0x00,0x00,0x00] - -v_mov_fed_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD -// CHECK: [0xf9,0x12,0x0a,0x7e,0x01,0x06,0x06,0x06] - -v_perm_b32 v5, s1, 0, v255 -// CHECK: [0x05,0x00,0xed,0xd1,0x01,0x00,0xfd,0x07] diff --git a/test/MC/ARM/assembly-default-build-attributes.s b/test/MC/ARM/assembly-default-build-attributes.s new file mode 100644 index 0000000000000..e136361af0e54 --- /dev/null +++ b/test/MC/ARM/assembly-default-build-attributes.s @@ -0,0 +1,43 @@ +// RUN: llvm-mc -triple armv7a < %s -arm-add-build-attributes | FileCheck %s --check-prefix=v7A +// RUN: llvm-mc -triple armv6m < %s -arm-add-build-attributes | FileCheck %s --check-prefix=v6M +// RUN: llvm-mc -triple armv7m < %s -arm-add-build-attributes | FileCheck %s --check-prefix=v7M +// RUN: llvm-mc -triple armv7a -mcpu=cortex-a15 < %s -arm-add-build-attributes | FileCheck %s --check-prefix=Cortex-A15 + +// This isn't intended to be a through check of the build attributes emitted +// for each target (that's tested elsewhere), but just to check that the +// hardware attributes are emitted by the assembler based on the selected +// target when requested. + +// v7A-NOT: .cpu +// v7A: .eabi_attribute 6, 10 @ Tag_CPU_arch +// v7A: .eabi_attribute 7, 65 @ Tag_CPU_arch_profile +// v7A: .eabi_attribute 8, 1 @ Tag_ARM_ISA_use +// v7A: .eabi_attribute 9, 2 @ Tag_THUMB_ISA_use +// v7A: .fpu neon +// v7A: .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access + +// v6M-NOT: .cpu +// v6M: .eabi_attribute 6, 12 @ Tag_CPU_arch +// v6M: .eabi_attribute 7, 77 @ Tag_CPU_arch_profile +// v6M: .eabi_attribute 8, 0 @ Tag_ARM_ISA_use +// v6M: .eabi_attribute 9, 1 @ Tag_THUMB_ISA_use +// v6M: .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access + +// v7M-NOT: .cpu +// v7M: .eabi_attribute 6, 10 @ Tag_CPU_arch +// v7M: .eabi_attribute 7, 77 @ Tag_CPU_arch_profile +// v7M: .eabi_attribute 8, 0 @ Tag_ARM_ISA_use +// v7M: .eabi_attribute 9, 2 @ Tag_THUMB_ISA_use +// v7M: .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access + +// Cortex-A15: .cpu cortex-a15 +// Cortex-A15: .eabi_attribute 6, 10 @ Tag_CPU_arch +// Cortex-A15: .eabi_attribute 7, 65 @ Tag_CPU_arch_profile +// Cortex-A15: .eabi_attribute 8, 1 @ Tag_ARM_ISA_use +// Cortex-A15: .eabi_attribute 9, 2 @ Tag_THUMB_ISA_use +// Cortex-A15: .fpu neon-vfpv4 +// Cortex-A15: .eabi_attribute 36, 1 @ Tag_FP_HP_extension +// Cortex-A15: .eabi_attribute 42, 1 @ Tag_MPextension_use +// Cortex-A15: .eabi_attribute 44, 2 @ Tag_DIV_use +// Cortex-A15: .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access +// Cortex-A15: .eabi_attribute 68, 3 @ Tag_Virtualization_use diff --git a/test/MC/ARM/multi-section-mapping.s b/test/MC/ARM/multi-section-mapping.s index e4b7146e4b0f7..7e62b10f5b096 100644 --- a/test/MC/ARM/multi-section-mapping.s +++ b/test/MC/ARM/multi-section-mapping.s @@ -21,14 +21,31 @@ .arm add r0, r0, r0 +@ Similarly no $t if we change back .starts_thumb using .pushsection + .pushsection .starts_thumb + .thumb + adds r0, r0, r0 + +@ When we change back to .text using .popsection .thumb is still active, so we +@ should emit a $t + .popsection + add r0, r0, r0 + +@ .ident does a push then pop of the .comment section, so the .word should +@ cause $d to appear in the .text section + .ident "ident" + .word 0 + @ With all those constraints, we want: -@ + .text to have $a at 0 and no others +@ + .text to have $a at 0, $t at 8, $d at 12 @ + .wibble to have $a at 0 @ + .starts_thumb to have $t at 0 @ + .starts_data to have $d at 0 @ CHECK: 00000000 .text 00000000 $a @ CHECK-NEXT: 00000000 .wibble 00000000 $a +@ CHECK-NEXT: 0000000a .text 00000000 $d @ CHECK-NEXT: 00000000 .starts_thumb 00000000 $t +@ CHECK-NEXT: 00000008 .text 00000000 $t @ CHECK-NOT: ${{[adt]}} diff --git a/test/TableGen/intrinsic-long-name.td b/test/TableGen/intrinsic-long-name.td index d7c9d31762668..24ed89ac4acf0 100644 --- a/test/TableGen/intrinsic-long-name.td +++ b/test/TableGen/intrinsic-long-name.td @@ -22,7 +22,7 @@ class Intrinsic<string name, list<LLVMType> param_types = []> { list<IntrinsicProperty> IntrProperties = []; } -def iAny : ValueType<0, 125>; +def iAny : ValueType<0, 253>; def llvm_anyint_ty : LLVMType<iAny>; // Make sure we generate the long name without crashing diff --git a/test/TableGen/intrinsic-varargs.td b/test/TableGen/intrinsic-varargs.td index 0aafad8093cc2..1e2378550855d 100644 --- a/test/TableGen/intrinsic-varargs.td +++ b/test/TableGen/intrinsic-varargs.td @@ -23,7 +23,7 @@ class Intrinsic<string name, list<LLVMType> param_types = []> { } // isVoid needs to match the definition in ValueTypes.td -def isVoid : ValueType<0, 66>; // Produces no value +def isVoid : ValueType<0, 108>; // Produces no value def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here // CHECK: /* 0 */ 0, 29, 0, diff --git a/test/ThinLTO/X86/autoupgrade.ll b/test/ThinLTO/X86/autoupgrade.ll index 15c74f540b8c8..cbbe833d262ab 100644 --- a/test/ThinLTO/X86/autoupgrade.ll +++ b/test/ThinLTO/X86/autoupgrade.ll @@ -9,10 +9,8 @@ ; RUN: -import=globalfunc1:%p/Inputs/autoupgrade.bc %t.bc \ ; RUN: | llvm-bcanalyzer -dump | FileCheck %s - -; CHECK-NOT: 'llvm.invariant.start' -; CHECK: record string = 'llvm.invariant.start.p0i8' -; CHECK-NOT: 'llvm.invariant.start' +; CHECK: <STRTAB_BLOCK +; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0i8' target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.11.0" diff --git a/test/ThinLTO/X86/distributed_indexes.ll b/test/ThinLTO/X86/distributed_indexes.ll index 0700488b5e92a..b81c94c2df8f2 100644 --- a/test/ThinLTO/X86/distributed_indexes.ll +++ b/test/ThinLTO/X86/distributed_indexes.ll @@ -13,15 +13,11 @@ ; BACKEND1-NEXT: </MODULE_STRTAB_BLOCK ; BACKEND1-NEXT: <GLOBALVAL_SUMMARY_BLOCK ; BACKEND1-NEXT: <VERSION +; BACKEND1-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} +; BACKEND1-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} ; BACKEND1-NEXT: <COMBINED ; BACKEND1-NEXT: <COMBINED ; BACKEND1-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; BACKEND1-NEXT: <VALUE_SYMTAB -; Check that the format is: op0=valueid, op1=offset, op2=funcguid, -; where funcguid is the lower 64 bits of the function name MD5. -; BACKEND1-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; BACKEND1-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; BACKEND1-NEXT: </VALUE_SYMTAB ; The backend index for Input/distributed_indexes.ll contains summaries from ; itself only, as it does not import anything. @@ -30,13 +26,9 @@ ; BACKEND2-NEXT: </MODULE_STRTAB_BLOCK ; BACKEND2-NEXT: <GLOBALVAL_SUMMARY_BLOCK ; BACKEND2-NEXT: <VERSION +; BACKEND2-NEXT: <VALUE_GUID op0=1 op1=-5300342847281564238 ; BACKEND2-NEXT: <COMBINED ; BACKEND2-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; BACKEND2-NEXT: <VALUE_SYMTAB -; Check that the format is: op0=valueid, op1=offset, op2=funcguid, -; where funcguid is the lower 64 bits of the function name MD5. -; BACKEND2-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0=1 op1=-5300342847281564238 -; BACKEND2-NEXT: </VALUE_SYMTAB declare void @g(...) diff --git a/test/Transforms/CodeGenPrepare/split-indirect-loop.ll b/test/Transforms/CodeGenPrepare/split-indirect-loop.ll new file mode 100644 index 0000000000000..cb834bb5dd8f9 --- /dev/null +++ b/test/Transforms/CodeGenPrepare/split-indirect-loop.ll @@ -0,0 +1,37 @@ +; RUN: opt -codegenprepare -S < %s | FileCheck %s + +; Test that an invalid CFG is not created by splitIndirectCriticalEdges +; transformation when the 'target' block is a loop to itself. + +; CHECK: .split: +; CHECK: br label %while.body.clone +; CHECK: if.else1: +; CHECK: indirectbr +; CHECK: while.body.clone: +; CHECK: br label %.split + +define void @test() { +entry: + br label %if.else + +if.else: + br i1 undef, label %while.body, label %preheader + +preheader: + br label %if.else1 + +if.then: + unreachable + +while.body: + %dest.sroa = phi i32 [ %1, %while.body ], [ undef, %if.else1 ], [ undef, %if.else ] + %0 = inttoptr i32 %dest.sroa to i8* + %incdec.ptr = getelementptr inbounds i8, i8* %0, i32 -1 + %1 = ptrtoint i8* %incdec.ptr to i32 + store i8 undef, i8* %incdec.ptr, align 1 + br label %while.body + +if.else1: + indirectbr i8* undef, [label %if.then, label %while.body, label %if.else, label %if.else1] +} + diff --git a/test/Transforms/GVN/non-integral-pointers.ll b/test/Transforms/GVN/non-integral-pointers.ll new file mode 100644 index 0000000000000..9ae4132231d83 --- /dev/null +++ b/test/Transforms/GVN/non-integral-pointers.ll @@ -0,0 +1,39 @@ +; RUN: opt -gvn -S < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4" +target triple = "x86_64-unknown-linux-gnu" + +define void @f0(i1 %alwaysFalse, i64 %val, i64* %loc) { +; CHECK-LABEL: @f0( +; CHECK-NOT: inttoptr +; CHECK-NOT: ptrtoint + entry: + store i64 %val, i64* %loc + br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken + + neverTaken: + %loc.bc = bitcast i64* %loc to i8 addrspace(4)** + %ptr = load i8 addrspace(4)*, i8 addrspace(4)** %loc.bc + store i8 5, i8 addrspace(4)* %ptr + ret void + + alwaysTaken: + ret void +} + +define i64 @f1(i1 %alwaysFalse, i8 addrspace(4)* %val, i8 addrspace(4)** %loc) { +; CHECK-LABEL: @f1( +; CHECK-NOT: inttoptr +; CHECK-NOT: ptrtoint + entry: + store i8 addrspace(4)* %val, i8 addrspace(4)** %loc + br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken + + neverTaken: + %loc.bc = bitcast i8 addrspace(4)** %loc to i64* + %int = load i64, i64* %loc.bc + ret i64 %int + + alwaysTaken: + ret i64 42 +} diff --git a/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll b/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll deleted file mode 100644 index 510a68c3437e8..0000000000000 --- a/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll +++ /dev/null @@ -1,15 +0,0 @@ -; RUN: opt < %s -instcombine -S | grep bitcast | count 2 - -define signext i32 @b(i32* inreg %x) { - ret i32 0 -} - -define void @c(...) { - ret void -} - -define void @g(i32* %y) { - call i32 bitcast (i32 (i32*)* @b to i32 (i32)*)( i32 zeroext 0 ) ; <i32>:2 [#uses=0] - call void bitcast (void (...)* @c to void (i32*)*)( i32* sret null ) - ret void -} diff --git a/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll b/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll index 888f51bf939dd..0c4842c159880 100644 --- a/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll +++ b/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll @@ -227,6 +227,12 @@ define amdgpu_ps float @preserve_metadata_extract_elt0_buffer_load_v2f32(<4 x i3 ret float %elt0 } +declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #1 +declare <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32>, i32, i32, i1, i1) #1 +declare <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32>, i32, i32, i1, i1) #1 +declare <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32>, i32, i32, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32>, i32, i32, i1, i1) #1 + ; -------------------------------------------------------------------- ; llvm.amdgcn.buffer.load.format ; -------------------------------------------------------------------- @@ -304,18 +310,1196 @@ define i16 @extract_lo16_0_bitcast_buffer_load_format_v4f32(i32 %arg) #0 { ret i16 %tmp2 } -declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #1 -declare <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32>, i32, i32, i1, i1) #1 -declare <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32>, i32, i32, i1, i1) #1 -declare <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32>, i32, i32, i1, i1) #1 -declare <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32>, i32, i32, i1, i1) #1 - declare float @llvm.amdgcn.buffer.load.format.f32(<4 x i32>, i32, i32, i1, i1) #1 declare <1 x float> @llvm.amdgcn.buffer.load.format.v1f32(<4 x i32>, i32, i32, i1, i1) #1 declare <2 x float> @llvm.amdgcn.buffer.load.format.v2f32(<4 x i32>, i32, i32, i1, i1) #1 declare <3 x float> @llvm.amdgcn.buffer.load.format.v3f32(<4 x i32>, i32, i32, i1, i1) #1 declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #1 +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_invalid_dmask_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 %dmask, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_invalid_dmask_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc, i32 %dmask) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 %dmask, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; FIXME: Should really fold to undef +; CHECK-LABEL: @extract_elt0_dmask_0000_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_0000_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_dmask_0001_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_0001_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; FIXME: Should really fold to undef +; CHECK-LABEL: @extract_elt0_dmask_0010_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 2, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_0010_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 2, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; FIXME: Should really fold to undef +; CHECK-LABEL: @extract_elt0_dmask_0100_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 4, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_0100_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 4, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; FIXME: Should really fold to undef +; CHECK-LABEL: @extract_elt0_dmask_1000_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 8, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_1000_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 8, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_dmask_1001_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_1001_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 9, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_dmask_0011_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_0011_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_dmask_0111_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_dmask_0111_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 7, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_elt1_dmask_0001_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.image.sample.v2f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret <2 x float> %data +define amdgpu_ps <2 x float> @extract_elt0_elt1_dmask_0001_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1> + ret <2 x float> %shuf +} + +; CHECK-LABEL: @extract_elt0_elt1_dmask_0011_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.image.sample.v2f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret <2 x float> %data +define amdgpu_ps <2 x float> @extract_elt0_elt1_dmask_0011_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1> + ret <2 x float> %shuf +} + +; CHECK-LABEL: @extract_elt0_elt1_dmask_0111_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.image.sample.v2f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret <2 x float> %data +define amdgpu_ps <2 x float> @extract_elt0_elt1_dmask_0111_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 7, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1> + ret <2 x float> %shuf +} + +; CHECK-LABEL: @extract_elt0_elt1_dmask_0101_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.image.sample.v2f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 5, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret <2 x float> %data +define amdgpu_ps <2 x float> @extract_elt0_elt1_dmask_0101_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 5, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1> + ret <2 x float> %shuf +} + +; CHECK-LABEL: @extract_elt0_elt1_elt2_dmask_0001_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> +; CHECK-NEXT: ret <3 x float> %shuf +define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_dmask_0001_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> + ret <3 x float> %shuf +} + +; CHECK-LABEL: @extract_elt0_elt1_elt2_dmask_0011_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> +; CHECK-NEXT: ret <3 x float> %shuf +define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_dmask_0011_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> + ret <3 x float> %shuf +} + +; CHECK-LABEL: @extract_elt0_elt1_elt2_dmask_0101_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 5, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> +; CHECK-NEXT: ret <3 x float> %shuf +define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_dmask_0101_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 5, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> + ret <3 x float> %shuf +} + +; CHECK-LABEL: @extract_elt0_elt1_elt2_dmask_0111_image_sample_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 7, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> +; CHECK-NEXT: ret <3 x float> %shuf +define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_dmask_0111_image_sample_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 7, i1 false, i1 false, i1 false, i1 false, i1 false) + %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> + ret <3 x float> %shuf +} + +declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.d +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_d_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.d.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_d_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.d.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_d_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.d.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_d_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.d.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.d.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.l +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_l_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.l.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_l_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.b +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_b_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.b.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_b_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.b.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_b_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.b.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_b_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.b.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.b.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.lz +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_lz_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.lz.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_lz_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.lz.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.lz.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.cd +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_cd_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.cd.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_cd_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.cd.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.cd.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.cd.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_cd_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.cd.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_cd_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.cd.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_c_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_c_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.d +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_d_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.d.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_d_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.d.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.d.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.d.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_d_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.d.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_d_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.d.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.l +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_l_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.l.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_l_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.l.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.l.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.b +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_b_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.b.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_b_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.b.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.b.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.b.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_b_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.b.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_b_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.b.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.lz +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_lz_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.lz.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_lz_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.lz.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.lz.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.cd +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_cd_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.cd.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_cd_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.cd.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.cd.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.cd.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_cd_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.cd.cl.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_cd_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_o_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.o.f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_o_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_o_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.o.f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_o_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.d.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_d_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.d.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_d_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.d.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.d.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.d.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_d_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.d.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_d_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.d.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.d.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.l.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_l_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.l.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_l_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.l.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.l.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.b.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_b_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.b.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_b_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.b.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.b.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.b.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_b_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.b.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_b_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.b.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.b.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.lz.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_lz_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.lz.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_lz_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.lz.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.lz.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.cd.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_cd_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.cd.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_cd_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.cd.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.cd.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.cd.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_cd_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.cd.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_cd_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.cd.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_c_o_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.o.f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_o_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_sample_c_o_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.o.f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_o_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.d.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_d_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.d.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_d_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.d.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.d.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.d.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_d_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.d.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_d_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.d.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.l.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_l_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.l.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_l_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.b.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_b_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.b.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_b_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.b.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.b.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.b.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_b_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.b.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_b_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.b.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.lz.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_lz_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.lz.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_lz_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.lz.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.lz.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.cd.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_cd_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.cd.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_cd_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.cd.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.cd.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.sample.c.cd.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_sample_c_cd_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.sample.c.cd.cl.o.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_sample_c_cd_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4 +; -------------------------------------------------------------------- + +; Don't handle gather4* + +; CHECK-LABEL: @extract_elt0_image_gather4_v4f32_v4f32_v8i32( +; CHECK: %data = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i3 +define amdgpu_ps float @extract_elt0_image_gather4_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_gather4_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_gather4_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.gather4.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.gather4.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.l +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_l_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.l.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_l_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.l.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.l.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.b +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_b_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.b.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_b_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.b.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.b.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.b.cl +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_b_cl_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_b_cl_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.b.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.lz +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_lz_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.lz.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_lz_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.lz.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.lz.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_gather4_o_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_o_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_gather4_o_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_o_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.l.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_l_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.l.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_l_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.l.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.l.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.b.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_b_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.b.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_b_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.b.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.b.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.b.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_b_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_b_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.lz.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_lz_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.lz.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_lz_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.lz.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.lz.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.c.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_c_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_gather4_c_o_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_o_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_gather4_c_o_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_o_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.c.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_c_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.c.l.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_c_l_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_l_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.c.b.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_c_b_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.b.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_b_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.b.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.c.b.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.c.b.cl.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_c_b_cl_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_b_cl_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.gather4.c.lz.o +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_gather4_c_lz_o_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) +define amdgpu_ps float @extract_elt0_image_gather4_c_lz_o_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %gather4r, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %gather4r, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + +; -------------------------------------------------------------------- +; llvm.amdgcn.image.getlod +; -------------------------------------------------------------------- + +; CHECK-LABEL: @extract_elt0_image_getlod_v4f32_v4f32_v8i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.getlod.f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_getlod_v4f32_v4f32_v8i32(<4 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.getlod.v4f32.v4f32.v8i32(<4 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_getlod_v4f32_v4f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.getlod.f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_getlod_v4f32_v4f32_v4i32(<4 x float> %vaddr, <4 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.getlod.v4f32.v4f32.v4i32(<4 x float> %vaddr, <4 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +; CHECK-LABEL: @extract_elt0_image_getlod_v4f32_v2f32_v4i32( +; CHECK-NEXT: %data = call float @llvm.amdgcn.image.getlod.f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret float %data +define amdgpu_ps float @extract_elt0_image_getlod_v4f32_v2f32_v4i32(<2 x float> %vaddr, <8 x i32> inreg %sampler, <4 x i32> inreg %rsrc) #0 { + %data = call <4 x float> @llvm.amdgcn.image.getlod.v4f32.v2f32.v8i32(<2 x float> %vaddr, <8 x i32> %sampler, <4 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) + %elt0 = extractelement <4 x float> %data, i32 0 + ret float %elt0 +} + +declare <4 x float> @llvm.amdgcn.image.getlod.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.getlod.v4f32.v4f32.v4i32(<4 x float>, <4 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.image.getlod.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1 + attributes #0 = { nounwind } attributes #1 = { nounwind readonly } diff --git a/test/Transforms/InstCombine/call-cast-attrs.ll b/test/Transforms/InstCombine/call-cast-attrs.ll new file mode 100644 index 0000000000000..ddaf90c3e74fd --- /dev/null +++ b/test/Transforms/InstCombine/call-cast-attrs.ll @@ -0,0 +1,29 @@ +; RUN: opt < %s -instcombine -S | FileCheck %s + +define signext i32 @b(i32* inreg %x) { + ret i32 0 +} + +define void @c(...) { + ret void +} + +declare void @useit(i32) + +define void @d(i32 %x, ...) { + call void @useit(i32 %x) + ret void +} + +define void @g(i32* %y) { + call i32 bitcast (i32 (i32*)* @b to i32 (i32)*)(i32 zeroext 0) + call void bitcast (void (...)* @c to void (i32*)*)(i32* %y) + call void bitcast (void (...)* @c to void (i32*)*)(i32* sret %y) + call void bitcast (void (i32, ...)* @d to void (i32, i32*)*)(i32 0, i32* sret %y) + ret void +} +; CHECK-LABEL: define void @g(i32* %y) +; CHECK: call i32 bitcast (i32 (i32*)* @b to i32 (i32)*)(i32 zeroext 0) +; CHECK: call void (...) @c(i32* %y) +; CHECK: call void bitcast (void (...)* @c to void (i32*)*)(i32* sret %y) +; CHECK: call void bitcast (void (i32, ...)* @d to void (i32, i32*)*)(i32 0, i32* sret %y) diff --git a/test/Transforms/InstCombine/constant-fold-math.ll b/test/Transforms/InstCombine/constant-fold-math.ll index 50cd6070896e8..27578387f827a 100644 --- a/test/Transforms/InstCombine/constant-fold-math.ll +++ b/test/Transforms/InstCombine/constant-fold-math.ll @@ -45,4 +45,22 @@ define double @constant_fold_fmuladd_f64() #0 { ret double %x } +; PR32177 + +; CHECK-LABEL: @constant_fold_frem_f32 +; CHECK-NEXT: ret float 0x41A61B2000000000 +define float @constant_fold_frem_f32() #0 { + %x = frem float 0x43cbfcd960000000, 0xc1e2b34a00000000 + ret float %x +} + +; PR3316 + +; CHECK-LABEL: @constant_fold_frem_f64 +; CHECK-NEXT: ret double 0.000000e+00 +define double @constant_fold_frem_f64() { + %x = frem double 0x43E0000000000000, 1.000000e+00 + ret double %x +} + attributes #0 = { nounwind readnone } diff --git a/test/Transforms/InstCombine/div-shift.ll b/test/Transforms/InstCombine/div-shift.ll index 517313ed8e4ed..b5a65048fda01 100644 --- a/test/Transforms/InstCombine/div-shift.ll +++ b/test/Transforms/InstCombine/div-shift.ll @@ -16,6 +16,21 @@ entry: ret i32 %d } +define <2 x i32> @t1vec(<2 x i16> %x, <2 x i32> %y) { +; CHECK-LABEL: @t1vec( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = add <2 x i32> [[Y:%.*]], <i32 1, i32 1> +; CHECK-NEXT: [[D:%.*]] = lshr <2 x i32> [[CONV]], [[TMP0]] +; CHECK-NEXT: ret <2 x i32> [[D]] +; +entry: + %conv = zext <2 x i16> %x to <2 x i32> + %s = shl <2 x i32> <i32 2, i32 2>, %y + %d = sdiv <2 x i32> %conv, %s + ret <2 x i32> %d +} + ; rdar://11721329 define i64 @t2(i64 %x, i32 %y) { ; CHECK-LABEL: @t2( diff --git a/test/Transforms/InstCombine/div.ll b/test/Transforms/InstCombine/div.ll index a037607267ac8..796fce020fd3d 100644 --- a/test/Transforms/InstCombine/div.ll +++ b/test/Transforms/InstCombine/div.ll @@ -225,6 +225,16 @@ define i32 @test19(i32 %x) { ret i32 %A } +define <2 x i32> @test19vec(<2 x i32> %x) { +; CHECK-LABEL: @test19vec( +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i32> [[X:%.*]], <i32 1, i32 1> +; CHECK-NEXT: [[A:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[A]] +; + %A = udiv <2 x i32> <i32 1, i32 1>, %x + ret <2 x i32> %A +} + define i32 @test20(i32 %x) { ; CHECK-LABEL: @test20( ; CHECK-NEXT: [[TMP1:%.*]] = add i32 %x, 1 @@ -236,6 +246,17 @@ define i32 @test20(i32 %x) { ret i32 %A } +define <2 x i32> @test20vec(<2 x i32> %x) { +; CHECK-LABEL: @test20vec( +; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[X:%.*]], <i32 1, i32 1> +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <2 x i32> [[TMP1]], <i32 3, i32 3> +; CHECK-NEXT: [[A:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[X]], <2 x i32> zeroinitializer +; CHECK-NEXT: ret <2 x i32> [[A]] +; + %A = sdiv <2 x i32> <i32 1, i32 1>, %x + ret <2 x i32> %A +} + define i32 @test21(i32 %a) { ; CHECK-LABEL: @test21( ; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, 3 @@ -388,6 +409,17 @@ define i32 @test35(i32 %A) { ret i32 %mul } +define <2 x i32> @test35vec(<2 x i32> %A) { +; CHECK-LABEL: @test35vec( +; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[A:%.*]], <i32 2147483647, i32 2147483647> +; CHECK-NEXT: [[MUL:%.*]] = udiv exact <2 x i32> [[AND]], <i32 2147483647, i32 2147483647> +; CHECK-NEXT: ret <2 x i32> [[MUL]] +; + %and = and <2 x i32> %A, <i32 2147483647, i32 2147483647> + %mul = sdiv exact <2 x i32> %and, <i32 2147483647, i32 2147483647> + ret <2 x i32> %mul +} + define i32 @test36(i32 %A) { ; CHECK-LABEL: @test36( ; CHECK-NEXT: [[AND:%.*]] = and i32 %A, 2147483647 @@ -400,13 +432,10 @@ define i32 @test36(i32 %A) { ret i32 %mul } -; FIXME: Vector should get same transform as scalar. - define <2 x i32> @test36vec(<2 x i32> %A) { ; CHECK-LABEL: @test36vec( -; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> %A, <i32 2147483647, i32 2147483647> -; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw <2 x i32> <i32 1, i32 1>, %A -; CHECK-NEXT: [[MUL:%.*]] = sdiv exact <2 x i32> [[AND]], [[SHL]] +; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[A:%.*]], <i32 2147483647, i32 2147483647> +; CHECK-NEXT: [[MUL:%.*]] = lshr exact <2 x i32> [[AND]], [[A]] ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %and = and <2 x i32> %A, <i32 2147483647, i32 2147483647> diff --git a/test/Transforms/InstCombine/pr32686.ll b/test/Transforms/InstCombine/pr32686.ll new file mode 100644 index 0000000000000..b2d2aff2fde8a --- /dev/null +++ b/test/Transforms/InstCombine/pr32686.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -instcombine %s | FileCheck %s + +@a = external global i8 +@b = external global i32 + +define void @tinkywinky() { +; CHECK-LABEL: @tinkywinky( +; CHECK-NEXT: [[PATATINO:%.*]] = load i8, i8* @a, align 1 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[PATATINO]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL]] to i32 +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], or (i32 zext (i1 icmp ne (i32* bitcast (i8* @a to i32*), i32* @b) to i32), i32 2) +; CHECK-NEXT: store i32 [[OR1]], i32* @b, align 4 +; CHECK-NEXT: ret void +; + %patatino = load i8, i8* @a + %tobool = icmp ne i8 %patatino, 0 + %lnot = xor i1 %tobool, true + %lnot.ext = zext i1 %lnot to i32 + %or = or i32 xor (i32 zext (i1 icmp ne (i32* bitcast (i8* @a to i32*), i32* @b) to i32), i32 2), %lnot.ext + store i32 %or, i32* @b, align 4 + ret void +} diff --git a/test/Transforms/InstCombine/rem.ll b/test/Transforms/InstCombine/rem.ll index 7a7a134db9c5d..86a3580189fd2 100644 --- a/test/Transforms/InstCombine/rem.ll +++ b/test/Transforms/InstCombine/rem.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s define i64 @rem_signed(i64 %x1, i64 %y2) { @@ -571,3 +572,24 @@ rem.is.unsafe: ret i32 0 } +define i32 @test22(i32 %A) { +; CHECK-LABEL: @test22( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 2147483647 +; CHECK-NEXT: [[MUL:%.*]] = urem i32 [[AND]], 2147483647 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = and i32 %A, 2147483647 + %mul = srem i32 %and, 2147483647 + ret i32 %mul +} + +define <2 x i32> @test23(<2 x i32> %A) { +; CHECK-LABEL: @test23( +; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[A:%.*]], <i32 2147483647, i32 2147483647> +; CHECK-NEXT: [[MUL:%.*]] = urem <2 x i32> [[AND]], <i32 2147483647, i32 2147483647> +; CHECK-NEXT: ret <2 x i32> [[MUL]] +; + %and = and <2 x i32> %A, <i32 2147483647, i32 2147483647> + %mul = srem <2 x i32> %and, <i32 2147483647, i32 2147483647> + ret <2 x i32> %mul +} diff --git a/test/Transforms/InstCombine/shift.ll b/test/Transforms/InstCombine/shift.ll index 60ba35557f70a..d5f489280a034 100644 --- a/test/Transforms/InstCombine/shift.ll +++ b/test/Transforms/InstCombine/shift.ll @@ -1268,3 +1268,23 @@ define <2 x i64> @test_64_splat_vec(<2 x i32> %t) { ret <2 x i64> %shl } +define <2 x i8> @ashr_demanded_bits_splat(<2 x i8> %x) { +; CHECK-LABEL: @ashr_demanded_bits_splat( +; CHECK-NEXT: [[SHR:%.*]] = ashr <2 x i8> %x, <i8 7, i8 7> +; CHECK-NEXT: ret <2 x i8> [[SHR]] +; + %and = and <2 x i8> %x, <i8 128, i8 128> + %shr = ashr <2 x i8> %and, <i8 7, i8 7> + ret <2 x i8> %shr +} + +define <2 x i8> @lshr_demanded_bits_splat(<2 x i8> %x) { +; CHECK-LABEL: @lshr_demanded_bits_splat( +; CHECK-NEXT: [[SHR:%.*]] = lshr <2 x i8> %x, <i8 7, i8 7> +; CHECK-NEXT: ret <2 x i8> [[SHR]] +; + %and = and <2 x i8> %x, <i8 128, i8 128> + %shr = lshr <2 x i8> %and, <i8 7, i8 7> + ret <2 x i8> %shr +} + diff --git a/test/Transforms/InstCombine/vector-casts.ll b/test/Transforms/InstCombine/vector-casts.ll index 643ab6c5348fa..2197c250ace2c 100644 --- a/test/Transforms/InstCombine/vector-casts.ll +++ b/test/Transforms/InstCombine/vector-casts.ll @@ -15,9 +15,9 @@ define <2 x i1> @test1(<2 x i64> %a) { ; The ashr turns into an lshr. define <2 x i64> @test2(<2 x i64> %a) { ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[B:%.*]] = and <2 x i64> %a, <i64 65535, i64 65535> -; CHECK-NEXT: [[T:%.*]] = lshr <2 x i64> [[B]], <i64 1, i64 1> -; CHECK-NEXT: ret <2 x i64> [[T]] +; CHECK-NEXT: [[B:%.*]] = and <2 x i64> %a, <i64 65534, i64 65534> +; CHECK-NEXT: [[TMP1:%.*]] = lshr exact <2 x i64> [[B]], <i64 1, i64 1> +; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %b = and <2 x i64> %a, <i64 65535, i64 65535> %t = ashr <2 x i64> %b, <i64 1, i64 1> diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll index 33fd978277d4c..aa71c6ba86ae6 100644 --- a/test/Transforms/InstSimplify/AndOrXor.ll +++ b/test/Transforms/InstSimplify/AndOrXor.ll @@ -376,26 +376,6 @@ define i1 @or_icmp3(i32 %x, i32 %y) { ret i1 %3 } -define i1 @disjoint_cmps(i32 %A) { -; CHECK-LABEL: @disjoint_cmps( -; CHECK-NEXT: ret i1 false -; - %B = icmp eq i32 %A, 1 - %C = icmp sge i32 %A, 3 - %D = and i1 %B, %C - ret i1 %D -} - -define i1 @disjoint_cmps2(i32 %X) { -; CHECK-LABEL: @disjoint_cmps2( -; CHECK-NEXT: ret i1 false -; - %a = icmp ult i32 %X, 31 - %b = icmp slt i32 %X, 0 - %c = and i1 %a, %b - ret i1 %c -} - ; PR27869 - Look through casts to eliminate cmps and bitwise logic. define i32 @and_of_zexted_icmps(i32 %i) { diff --git a/test/Transforms/InstSimplify/icmp-ranges.ll b/test/Transforms/InstSimplify/icmp-ranges.ll new file mode 100644 index 0000000000000..dcbbe0bc7fb9c --- /dev/null +++ b/test/Transforms/InstSimplify/icmp-ranges.ll @@ -0,0 +1,2912 @@ +; RUN: opt < %s -instsimplify -S | FileCheck %s + +; Cycle through all pairs of predicates to test +; simplification of range-intersection or range-union. + +; eq +; x == 13 && x == 17 + +define i1 @and_eq_eq(i8 %x) { +; CHECK-LABEL: @and_eq_eq( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x != 17 + +define i1 @and_eq_ne(i8 %x) { +; CHECK-LABEL: @and_eq_ne( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x >=s 17 + +define i1 @and_eq_sge(i8 %x) { +; CHECK-LABEL: @and_eq_sge( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x >s 17 + +define i1 @and_eq_sgt(i8 %x) { +; CHECK-LABEL: @and_eq_sgt( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x <=s 17 + +define i1 @and_eq_sle(i8 %x) { +; CHECK-LABEL: @and_eq_sle( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x <s 17 + +define i1 @and_eq_slt(i8 %x) { +; CHECK-LABEL: @and_eq_slt( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x >=u 17 + +define i1 @and_eq_uge(i8 %x) { +; CHECK-LABEL: @and_eq_uge( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x >u 17 + +define i1 @and_eq_ugt(i8 %x) { +; CHECK-LABEL: @and_eq_ugt( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x <=u 17 + +define i1 @and_eq_ule(i8 %x) { +; CHECK-LABEL: @and_eq_ule( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 13 && x <u 17 + +define i1 @and_eq_ult(i8 %x) { +; CHECK-LABEL: @and_eq_ult( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ne +; x != 13 && x == 17 + +define i1 @and_ne_eq(i8 %x) { +; CHECK-LABEL: @and_ne_eq( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x != 17 + +define i1 @and_ne_ne(i8 %x) { +; CHECK-LABEL: @and_ne_ne( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x >=s 17 + +define i1 @and_ne_sge(i8 %x) { +; CHECK-LABEL: @and_ne_sge( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x >s 17 + +define i1 @and_ne_sgt(i8 %x) { +; CHECK-LABEL: @and_ne_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x <=s 17 + +define i1 @and_ne_sle(i8 %x) { +; CHECK-LABEL: @and_ne_sle( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x <s 17 + +define i1 @and_ne_slt(i8 %x) { +; CHECK-LABEL: @and_ne_slt( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x >=u 17 + +define i1 @and_ne_uge(i8 %x) { +; CHECK-LABEL: @and_ne_uge( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x >u 17 + +define i1 @and_ne_ugt(i8 %x) { +; CHECK-LABEL: @and_ne_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x <=u 17 + +define i1 @and_ne_ule(i8 %x) { +; CHECK-LABEL: @and_ne_ule( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 13 && x <u 17 + +define i1 @and_ne_ult(i8 %x) { +; CHECK-LABEL: @and_ne_ult( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; sge +; x >=s 13 && x == 17 + +define i1 @and_sge_eq(i8 %x) { +; CHECK-LABEL: @and_sge_eq( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x != 17 + +define i1 @and_sge_ne(i8 %x) { +; CHECK-LABEL: @and_sge_ne( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x >=s 17 + +define i1 @and_sge_sge(i8 %x) { +; CHECK-LABEL: @and_sge_sge( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x >s 17 + +define i1 @and_sge_sgt(i8 %x) { +; CHECK-LABEL: @and_sge_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x <=s 17 + +define i1 @and_sge_sle(i8 %x) { +; CHECK-LABEL: @and_sge_sle( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x <s 17 + +define i1 @and_sge_slt(i8 %x) { +; CHECK-LABEL: @and_sge_slt( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x >=u 17 + +define i1 @and_sge_uge(i8 %x) { +; CHECK-LABEL: @and_sge_uge( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x >u 17 + +define i1 @and_sge_ugt(i8 %x) { +; CHECK-LABEL: @and_sge_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x <=u 17 + +define i1 @and_sge_ule(i8 %x) { +; CHECK-LABEL: @and_sge_ule( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 13 && x <u 17 + +define i1 @and_sge_ult(i8 %x) { +; CHECK-LABEL: @and_sge_ult( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; sgt +; x >s 13 && x == 17 + +define i1 @and_sgt_eq(i8 %x) { +; CHECK-LABEL: @and_sgt_eq( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x != 17 + +define i1 @and_sgt_ne(i8 %x) { +; CHECK-LABEL: @and_sgt_ne( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x >=s 17 + +define i1 @and_sgt_sge(i8 %x) { +; CHECK-LABEL: @and_sgt_sge( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x >s 17 + +define i1 @and_sgt_sgt(i8 %x) { +; CHECK-LABEL: @and_sgt_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x <=s 17 + +define i1 @and_sgt_sle(i8 %x) { +; CHECK-LABEL: @and_sgt_sle( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x <s 17 + +define i1 @and_sgt_slt(i8 %x) { +; CHECK-LABEL: @and_sgt_slt( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x >=u 17 + +define i1 @and_sgt_uge(i8 %x) { +; CHECK-LABEL: @and_sgt_uge( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x >u 17 + +define i1 @and_sgt_ugt(i8 %x) { +; CHECK-LABEL: @and_sgt_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x <=u 17 + +define i1 @and_sgt_ule(i8 %x) { +; CHECK-LABEL: @and_sgt_ule( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 13 && x <u 17 + +define i1 @and_sgt_ult(i8 %x) { +; CHECK-LABEL: @and_sgt_ult( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; sle +; x <=s 13 && x == 17 + +define i1 @and_sle_eq(i8 %x) { +; CHECK-LABEL: @and_sle_eq( +; CHECK-NEXT: ret i1 false +; + %a = icmp sle i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x != 17 + +define i1 @and_sle_ne(i8 %x) { +; CHECK-LABEL: @and_sle_ne( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x >=s 17 + +define i1 @and_sle_sge(i8 %x) { +; CHECK-LABEL: @and_sle_sge( +; CHECK-NEXT: ret i1 false +; + %a = icmp sle i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x >s 17 + +define i1 @and_sle_sgt(i8 %x) { +; CHECK-LABEL: @and_sle_sgt( +; CHECK-NEXT: ret i1 false +; + %a = icmp sle i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x <=s 17 + +define i1 @and_sle_sle(i8 %x) { +; CHECK-LABEL: @and_sle_sle( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x <s 17 + +define i1 @and_sle_slt(i8 %x) { +; CHECK-LABEL: @and_sle_slt( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x >=u 17 + +define i1 @and_sle_uge(i8 %x) { +; CHECK-LABEL: @and_sle_uge( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x >u 17 + +define i1 @and_sle_ugt(i8 %x) { +; CHECK-LABEL: @and_sle_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x <=u 17 + +define i1 @and_sle_ule(i8 %x) { +; CHECK-LABEL: @and_sle_ule( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 13 && x <u 17 + +define i1 @and_sle_ult(i8 %x) { +; CHECK-LABEL: @and_sle_ult( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; slt +; x <s 13 && x == 17 + +define i1 @and_slt_eq(i8 %x) { +; CHECK-LABEL: @and_slt_eq( +; CHECK-NEXT: ret i1 false +; + %a = icmp slt i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x != 17 + +define i1 @and_slt_ne(i8 %x) { +; CHECK-LABEL: @and_slt_ne( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x >=s 17 + +define i1 @and_slt_sge(i8 %x) { +; CHECK-LABEL: @and_slt_sge( +; CHECK-NEXT: ret i1 false +; + %a = icmp slt i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x >s 17 + +define i1 @and_slt_sgt(i8 %x) { +; CHECK-LABEL: @and_slt_sgt( +; CHECK-NEXT: ret i1 false +; + %a = icmp slt i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x <=s 17 + +define i1 @and_slt_sle(i8 %x) { +; CHECK-LABEL: @and_slt_sle( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x <s 17 + +define i1 @and_slt_slt(i8 %x) { +; CHECK-LABEL: @and_slt_slt( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x >=u 17 + +define i1 @and_slt_uge(i8 %x) { +; CHECK-LABEL: @and_slt_uge( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x >u 17 + +define i1 @and_slt_ugt(i8 %x) { +; CHECK-LABEL: @and_slt_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x <=u 17 + +define i1 @and_slt_ule(i8 %x) { +; CHECK-LABEL: @and_slt_ule( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 13 && x <u 17 + +define i1 @and_slt_ult(i8 %x) { +; CHECK-LABEL: @and_slt_ult( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; uge +; x >=u 13 && x == 17 + +define i1 @and_uge_eq(i8 %x) { +; CHECK-LABEL: @and_uge_eq( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x != 17 + +define i1 @and_uge_ne(i8 %x) { +; CHECK-LABEL: @and_uge_ne( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x >=s 17 + +define i1 @and_uge_sge(i8 %x) { +; CHECK-LABEL: @and_uge_sge( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x >s 17 + +define i1 @and_uge_sgt(i8 %x) { +; CHECK-LABEL: @and_uge_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x <=s 17 + +define i1 @and_uge_sle(i8 %x) { +; CHECK-LABEL: @and_uge_sle( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x <s 17 + +define i1 @and_uge_slt(i8 %x) { +; CHECK-LABEL: @and_uge_slt( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x >=u 17 + +define i1 @and_uge_uge(i8 %x) { +; CHECK-LABEL: @and_uge_uge( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x >u 17 + +define i1 @and_uge_ugt(i8 %x) { +; CHECK-LABEL: @and_uge_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x <=u 17 + +define i1 @and_uge_ule(i8 %x) { +; CHECK-LABEL: @and_uge_ule( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 13 && x <u 17 + +define i1 @and_uge_ult(i8 %x) { +; CHECK-LABEL: @and_uge_ult( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ugt +; x >u 13 && x == 17 + +define i1 @and_ugt_eq(i8 %x) { +; CHECK-LABEL: @and_ugt_eq( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x != 17 + +define i1 @and_ugt_ne(i8 %x) { +; CHECK-LABEL: @and_ugt_ne( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x >=s 17 + +define i1 @and_ugt_sge(i8 %x) { +; CHECK-LABEL: @and_ugt_sge( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x >s 17 + +define i1 @and_ugt_sgt(i8 %x) { +; CHECK-LABEL: @and_ugt_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x <=s 17 + +define i1 @and_ugt_sle(i8 %x) { +; CHECK-LABEL: @and_ugt_sle( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x <s 17 + +define i1 @and_ugt_slt(i8 %x) { +; CHECK-LABEL: @and_ugt_slt( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x >=u 17 + +define i1 @and_ugt_uge(i8 %x) { +; CHECK-LABEL: @and_ugt_uge( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x >u 17 + +define i1 @and_ugt_ugt(i8 %x) { +; CHECK-LABEL: @and_ugt_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x <=u 17 + +define i1 @and_ugt_ule(i8 %x) { +; CHECK-LABEL: @and_ugt_ule( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 13 && x <u 17 + +define i1 @and_ugt_ult(i8 %x) { +; CHECK-LABEL: @and_ugt_ult( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ule +; x <=u 13 && x == 17 + +define i1 @and_ule_eq(i8 %x) { +; CHECK-LABEL: @and_ule_eq( +; CHECK-NEXT: ret i1 false +; + %a = icmp ule i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x != 17 + +define i1 @and_ule_ne(i8 %x) { +; CHECK-LABEL: @and_ule_ne( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x >=s 17 + +define i1 @and_ule_sge(i8 %x) { +; CHECK-LABEL: @and_ule_sge( +; CHECK-NEXT: ret i1 false +; + %a = icmp ule i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x >s 17 + +define i1 @and_ule_sgt(i8 %x) { +; CHECK-LABEL: @and_ule_sgt( +; CHECK-NEXT: ret i1 false +; + %a = icmp ule i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x <=s 17 + +define i1 @and_ule_sle(i8 %x) { +; CHECK-LABEL: @and_ule_sle( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x <s 17 + +define i1 @and_ule_slt(i8 %x) { +; CHECK-LABEL: @and_ule_slt( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x >=u 17 + +define i1 @and_ule_uge(i8 %x) { +; CHECK-LABEL: @and_ule_uge( +; CHECK-NEXT: ret i1 false +; + %a = icmp ule i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x >u 17 + +define i1 @and_ule_ugt(i8 %x) { +; CHECK-LABEL: @and_ule_ugt( +; CHECK-NEXT: ret i1 false +; + %a = icmp ule i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x <=u 17 + +define i1 @and_ule_ule(i8 %x) { +; CHECK-LABEL: @and_ule_ule( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 13 && x <u 17 + +define i1 @and_ule_ult(i8 %x) { +; CHECK-LABEL: @and_ule_ult( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ult +; x <u 13 && x == 17 + +define i1 @and_ult_eq(i8 %x) { +; CHECK-LABEL: @and_ult_eq( +; CHECK-NEXT: ret i1 false +; + %a = icmp ult i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x != 17 + +define i1 @and_ult_ne(i8 %x) { +; CHECK-LABEL: @and_ult_ne( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x >=s 17 + +define i1 @and_ult_sge(i8 %x) { +; CHECK-LABEL: @and_ult_sge( +; CHECK-NEXT: ret i1 false +; + %a = icmp ult i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x >s 17 + +define i1 @and_ult_sgt(i8 %x) { +; CHECK-LABEL: @and_ult_sgt( +; CHECK-NEXT: ret i1 false +; + %a = icmp ult i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x <=s 17 + +define i1 @and_ult_sle(i8 %x) { +; CHECK-LABEL: @and_ult_sle( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x <s 17 + +define i1 @and_ult_slt(i8 %x) { +; CHECK-LABEL: @and_ult_slt( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x >=u 17 + +define i1 @and_ult_uge(i8 %x) { +; CHECK-LABEL: @and_ult_uge( +; CHECK-NEXT: ret i1 false +; + %a = icmp ult i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x >u 17 + +define i1 @and_ult_ugt(i8 %x) { +; CHECK-LABEL: @and_ult_ugt( +; CHECK-NEXT: ret i1 false +; + %a = icmp ult i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x <=u 17 + +define i1 @and_ult_ule(i8 %x) { +; CHECK-LABEL: @and_ult_ule( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 13 && x <u 17 + +define i1 @and_ult_ult(i8 %x) { +; CHECK-LABEL: @and_ult_ult( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; eq +; x == 23 && x == 17 + +define i1 @and_eq_eq_swap(i8 %x) { +; CHECK-LABEL: @and_eq_eq_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x != 17 + +define i1 @and_eq_ne_swap(i8 %x) { +; CHECK-LABEL: @and_eq_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x >=s 17 + +define i1 @and_eq_sge_swap(i8 %x) { +; CHECK-LABEL: @and_eq_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x >s 17 + +define i1 @and_eq_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_eq_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x <=s 17 + +define i1 @and_eq_sle_swap(i8 %x) { +; CHECK-LABEL: @and_eq_sle_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x <s 17 + +define i1 @and_eq_slt_swap(i8 %x) { +; CHECK-LABEL: @and_eq_slt_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x >=u 17 + +define i1 @and_eq_uge_swap(i8 %x) { +; CHECK-LABEL: @and_eq_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x >u 17 + +define i1 @and_eq_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_eq_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x <=u 17 + +define i1 @and_eq_ule_swap(i8 %x) { +; CHECK-LABEL: @and_eq_ule_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x == 23 && x <u 17 + +define i1 @and_eq_ult_swap(i8 %x) { +; CHECK-LABEL: @and_eq_ult_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp eq i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ne +; x != 23 && x == 17 + +define i1 @and_ne_eq_swap(i8 %x) { +; CHECK-LABEL: @and_ne_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x != 17 + +define i1 @and_ne_ne_swap(i8 %x) { +; CHECK-LABEL: @and_ne_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x >=s 17 + +define i1 @and_ne_sge_swap(i8 %x) { +; CHECK-LABEL: @and_ne_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x >s 17 + +define i1 @and_ne_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_ne_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x <=s 17 + +define i1 @and_ne_sle_swap(i8 %x) { +; CHECK-LABEL: @and_ne_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x <s 17 + +define i1 @and_ne_slt_swap(i8 %x) { +; CHECK-LABEL: @and_ne_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x >=u 17 + +define i1 @and_ne_uge_swap(i8 %x) { +; CHECK-LABEL: @and_ne_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x >u 17 + +define i1 @and_ne_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_ne_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x <=u 17 + +define i1 @and_ne_ule_swap(i8 %x) { +; CHECK-LABEL: @and_ne_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x != 23 && x <u 17 + +define i1 @and_ne_ult_swap(i8 %x) { +; CHECK-LABEL: @and_ne_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ne i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; sge +; x >=s 23 && x == 17 + +define i1 @and_sge_eq_swap(i8 %x) { +; CHECK-LABEL: @and_sge_eq_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sge i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x != 17 + +define i1 @and_sge_ne_swap(i8 %x) { +; CHECK-LABEL: @and_sge_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x >=s 17 + +define i1 @and_sge_sge_swap(i8 %x) { +; CHECK-LABEL: @and_sge_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x >s 17 + +define i1 @and_sge_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_sge_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x <=s 17 + +define i1 @and_sge_sle_swap(i8 %x) { +; CHECK-LABEL: @and_sge_sle_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sge i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x <s 17 + +define i1 @and_sge_slt_swap(i8 %x) { +; CHECK-LABEL: @and_sge_slt_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sge i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x >=u 17 + +define i1 @and_sge_uge_swap(i8 %x) { +; CHECK-LABEL: @and_sge_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x >u 17 + +define i1 @and_sge_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_sge_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x <=u 17 + +define i1 @and_sge_ule_swap(i8 %x) { +; CHECK-LABEL: @and_sge_ule_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sge i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=s 23 && x <u 17 + +define i1 @and_sge_ult_swap(i8 %x) { +; CHECK-LABEL: @and_sge_ult_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sge i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; sgt +; x >s 23 && x == 17 + +define i1 @and_sgt_eq_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_eq_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sgt i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x != 17 + +define i1 @and_sgt_ne_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x >=s 17 + +define i1 @and_sgt_sge_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x >s 17 + +define i1 @and_sgt_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x <=s 17 + +define i1 @and_sgt_sle_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_sle_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sgt i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x <s 17 + +define i1 @and_sgt_slt_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_slt_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sgt i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x >=u 17 + +define i1 @and_sgt_uge_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x >u 17 + +define i1 @and_sgt_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x <=u 17 + +define i1 @and_sgt_ule_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_ule_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sgt i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >s 23 && x <u 17 + +define i1 @and_sgt_ult_swap(i8 %x) { +; CHECK-LABEL: @and_sgt_ult_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp sgt i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; sle +; x <=s 23 && x == 17 + +define i1 @and_sle_eq_swap(i8 %x) { +; CHECK-LABEL: @and_sle_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x != 17 + +define i1 @and_sle_ne_swap(i8 %x) { +; CHECK-LABEL: @and_sle_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x >=s 17 + +define i1 @and_sle_sge_swap(i8 %x) { +; CHECK-LABEL: @and_sle_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x >s 17 + +define i1 @and_sle_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_sle_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x <=s 17 + +define i1 @and_sle_sle_swap(i8 %x) { +; CHECK-LABEL: @and_sle_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x <s 17 + +define i1 @and_sle_slt_swap(i8 %x) { +; CHECK-LABEL: @and_sle_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x >=u 17 + +define i1 @and_sle_uge_swap(i8 %x) { +; CHECK-LABEL: @and_sle_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x >u 17 + +define i1 @and_sle_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_sle_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x <=u 17 + +define i1 @and_sle_ule_swap(i8 %x) { +; CHECK-LABEL: @and_sle_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=s 23 && x <u 17 + +define i1 @and_sle_ult_swap(i8 %x) { +; CHECK-LABEL: @and_sle_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; slt +; x <s 23 && x == 17 + +define i1 @and_slt_eq_swap(i8 %x) { +; CHECK-LABEL: @and_slt_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x != 17 + +define i1 @and_slt_ne_swap(i8 %x) { +; CHECK-LABEL: @and_slt_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x >=s 17 + +define i1 @and_slt_sge_swap(i8 %x) { +; CHECK-LABEL: @and_slt_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x >s 17 + +define i1 @and_slt_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_slt_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x <=s 17 + +define i1 @and_slt_sle_swap(i8 %x) { +; CHECK-LABEL: @and_slt_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x <s 17 + +define i1 @and_slt_slt_swap(i8 %x) { +; CHECK-LABEL: @and_slt_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x >=u 17 + +define i1 @and_slt_uge_swap(i8 %x) { +; CHECK-LABEL: @and_slt_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x >u 17 + +define i1 @and_slt_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_slt_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x <=u 17 + +define i1 @and_slt_ule_swap(i8 %x) { +; CHECK-LABEL: @and_slt_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <s 23 && x <u 17 + +define i1 @and_slt_ult_swap(i8 %x) { +; CHECK-LABEL: @and_slt_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; uge +; x >=u 23 && x == 17 + +define i1 @and_uge_eq_swap(i8 %x) { +; CHECK-LABEL: @and_uge_eq_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp uge i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x != 17 + +define i1 @and_uge_ne_swap(i8 %x) { +; CHECK-LABEL: @and_uge_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x >=s 17 + +define i1 @and_uge_sge_swap(i8 %x) { +; CHECK-LABEL: @and_uge_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x >s 17 + +define i1 @and_uge_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_uge_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x <=s 17 + +define i1 @and_uge_sle_swap(i8 %x) { +; CHECK-LABEL: @and_uge_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x <s 17 + +define i1 @and_uge_slt_swap(i8 %x) { +; CHECK-LABEL: @and_uge_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x >=u 17 + +define i1 @and_uge_uge_swap(i8 %x) { +; CHECK-LABEL: @and_uge_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x >u 17 + +define i1 @and_uge_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_uge_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x <=u 17 + +define i1 @and_uge_ule_swap(i8 %x) { +; CHECK-LABEL: @and_uge_ule_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp uge i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >=u 23 && x <u 17 + +define i1 @and_uge_ult_swap(i8 %x) { +; CHECK-LABEL: @and_uge_ult_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp uge i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ugt +; x >u 23 && x == 17 + +define i1 @and_ugt_eq_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_eq_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp ugt i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x != 17 + +define i1 @and_ugt_ne_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x >=s 17 + +define i1 @and_ugt_sge_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x >s 17 + +define i1 @and_ugt_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x <=s 17 + +define i1 @and_ugt_sle_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x <s 17 + +define i1 @and_ugt_slt_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x >=u 17 + +define i1 @and_ugt_uge_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x >u 17 + +define i1 @and_ugt_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x <=u 17 + +define i1 @and_ugt_ule_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_ule_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp ugt i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x >u 23 && x <u 17 + +define i1 @and_ugt_ult_swap(i8 %x) { +; CHECK-LABEL: @and_ugt_ult_swap( +; CHECK-NEXT: ret i1 false +; + %a = icmp ugt i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ule +; x <=u 23 && x == 17 + +define i1 @and_ule_eq_swap(i8 %x) { +; CHECK-LABEL: @and_ule_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x != 17 + +define i1 @and_ule_ne_swap(i8 %x) { +; CHECK-LABEL: @and_ule_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x >=s 17 + +define i1 @and_ule_sge_swap(i8 %x) { +; CHECK-LABEL: @and_ule_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x >s 17 + +define i1 @and_ule_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_ule_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x <=s 17 + +define i1 @and_ule_sle_swap(i8 %x) { +; CHECK-LABEL: @and_ule_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x <s 17 + +define i1 @and_ule_slt_swap(i8 %x) { +; CHECK-LABEL: @and_ule_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x >=u 17 + +define i1 @and_ule_uge_swap(i8 %x) { +; CHECK-LABEL: @and_ule_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x >u 17 + +define i1 @and_ule_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_ule_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x <=u 17 + +define i1 @and_ule_ule_swap(i8 %x) { +; CHECK-LABEL: @and_ule_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <=u 23 && x <u 17 + +define i1 @and_ule_ult_swap(i8 %x) { +; CHECK-LABEL: @and_ule_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; ult +; x <u 23 && x == 17 + +define i1 @and_ult_eq_swap(i8 %x) { +; CHECK-LABEL: @and_ult_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x != 17 + +define i1 @and_ult_ne_swap(i8 %x) { +; CHECK-LABEL: @and_ult_ne_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x >=s 17 + +define i1 @and_ult_sge_swap(i8 %x) { +; CHECK-LABEL: @and_ult_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x >s 17 + +define i1 @and_ult_sgt_swap(i8 %x) { +; CHECK-LABEL: @and_ult_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x <=s 17 + +define i1 @and_ult_sle_swap(i8 %x) { +; CHECK-LABEL: @and_ult_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x <s 17 + +define i1 @and_ult_slt_swap(i8 %x) { +; CHECK-LABEL: @and_ult_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x >=u 17 + +define i1 @and_ult_uge_swap(i8 %x) { +; CHECK-LABEL: @and_ult_uge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x >u 17 + +define i1 @and_ult_ugt_swap(i8 %x) { +; CHECK-LABEL: @and_ult_ugt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x <=u 17 + +define i1 @and_ult_ule_swap(i8 %x) { +; CHECK-LABEL: @and_ult_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; x <u 23 && x <u 17 + +define i1 @and_ult_ult_swap(i8 %x) { +; CHECK-LABEL: @and_ult_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = and i1 %a, %b + ret i1 %c +} + +; Special case - slt is uge +; x <u 31 && x <s 0 + +define i1 @empty2(i32 %x) { +; CHECK-LABEL: @empty2( +; CHECK-NEXT: ret i1 false +; + %a = icmp ult i32 %x, 31 + %b = icmp slt i32 %x, 0 + %c = and i1 %a, %b + ret i1 %c +} + diff --git a/test/Transforms/InstSimplify/shufflevector.ll b/test/Transforms/InstSimplify/shufflevector.ll index c6d180da293f8..e03916c5b90d6 100644 --- a/test/Transforms/InstSimplify/shufflevector.ll +++ b/test/Transforms/InstSimplify/shufflevector.ll @@ -120,8 +120,7 @@ define <4 x i32> @undef_mask(<4 x i32> %x) { define <4 x i32> @identity_mask_0(<4 x i32> %x) { ; CHECK-LABEL: @identity_mask_0( -; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: ret <4 x i32> [[SHUF]] +; CHECK-NEXT: ret <4 x i32> [[X:%.*]] ; %shuf = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ret <4 x i32> %shuf @@ -129,8 +128,7 @@ define <4 x i32> @identity_mask_0(<4 x i32> %x) { define <4 x i32> @identity_mask_1(<4 x i32> %x) { ; CHECK-LABEL: @identity_mask_1( -; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> undef, <4 x i32> [[X:%.*]], <4 x i32> <i32 4, i32 5, i32 6, i32 7> -; CHECK-NEXT: ret <4 x i32> [[SHUF]] +; CHECK-NEXT: ret <4 x i32> [[X:%.*]] ; %shuf = shufflevector <4 x i32> undef, <4 x i32> %x, <4 x i32> <i32 4, i32 5, i32 6, i32 7> ret <4 x i32> %shuf @@ -138,13 +136,32 @@ define <4 x i32> @identity_mask_1(<4 x i32> %x) { define <4 x i32> @pseudo_identity_mask(<4 x i32> %x) { ; CHECK-LABEL: @pseudo_identity_mask( -; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> [[X]], <4 x i32> <i32 0, i32 1, i32 2, i32 7> -; CHECK-NEXT: ret <4 x i32> [[SHUF]] +; CHECK-NEXT: ret <4 x i32> [[X:%.*]] ; %shuf = shufflevector <4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 7> ret <4 x i32> %shuf } +define <4 x i32> @not_identity_mask(<4 x i32> %x) { +; CHECK-LABEL: @not_identity_mask( +; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> [[X]], <4 x i32> <i32 0, i32 1, i32 2, i32 6> +; CHECK-NEXT: ret <4 x i32> [[SHUF]] +; + %shuf = shufflevector <4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 6> + ret <4 x i32> %shuf +} + +; TODO: Should we simplify if the mask has an undef element? + +define <4 x i32> @possible_identity_mask(<4 x i32> %x) { +; CHECK-LABEL: @possible_identity_mask( +; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> +; CHECK-NEXT: ret <4 x i32> [[SHUF]] +; + %shuf = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> + ret <4 x i32> %shuf +} + define <4 x i32> @const_operand(<4 x i32> %x) { ; CHECK-LABEL: @const_operand( ; CHECK-NEXT: ret <4 x i32> <i32 42, i32 45, i32 44, i32 43> @@ -155,10 +172,7 @@ define <4 x i32> @const_operand(<4 x i32> %x) { define <4 x i32> @merge(<4 x i32> %x) { ; CHECK-LABEL: @merge( -; CHECK-NEXT: [[LOWER:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <2 x i32> <i32 1, i32 0> -; CHECK-NEXT: [[UPPER:%.*]] = shufflevector <4 x i32> [[X]], <4 x i32> undef, <2 x i32> <i32 2, i32 3> -; CHECK-NEXT: [[MERGED:%.*]] = shufflevector <2 x i32> [[UPPER]], <2 x i32> [[LOWER]], <4 x i32> <i32 3, i32 2, i32 0, i32 1> -; CHECK-NEXT: ret <4 x i32> [[MERGED]] +; CHECK-NEXT: ret <4 x i32> [[X:%.*]] ; %lower = shufflevector <4 x i32> %x, <4 x i32> undef, <2 x i32> <i32 1, i32 0> %upper = shufflevector <4 x i32> %x, <4 x i32> undef, <2 x i32> <i32 2, i32 3> @@ -166,16 +180,24 @@ define <4 x i32> @merge(<4 x i32> %x) { ret <4 x i32> %merged } +; This crosses lanes from the source op. + +define <4 x i32> @not_merge(<4 x i32> %x) { +; CHECK-LABEL: @not_merge( +; CHECK-NEXT: [[L:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <2 x i32> <i32 0, i32 1> +; CHECK-NEXT: [[U:%.*]] = shufflevector <4 x i32> [[X]], <4 x i32> undef, <2 x i32> <i32 2, i32 3> +; CHECK-NEXT: [[MERGED:%.*]] = shufflevector <2 x i32> [[U]], <2 x i32> [[L]], <4 x i32> <i32 3, i32 2, i32 0, i32 1> +; CHECK-NEXT: ret <4 x i32> [[MERGED]] +; + %l = shufflevector <4 x i32> %x, <4 x i32> undef, <2 x i32> <i32 0, i32 1> + %u = shufflevector <4 x i32> %x, <4 x i32> undef, <2 x i32> <i32 2, i32 3> + %merged = shufflevector <2 x i32> %u, <2 x i32> %l, <4 x i32> <i32 3, i32 2, i32 0, i32 1> + ret <4 x i32> %merged +} + define <8 x double> @extract_and_concat(<8 x double> %x) { ; CHECK-LABEL: @extract_and_concat( -; CHECK-NEXT: [[S1:%.*]] = shufflevector <8 x double> [[X:%.*]], <8 x double> undef, <2 x i32> <i32 0, i32 1> -; CHECK-NEXT: [[S2:%.*]] = shufflevector <8 x double> [[X]], <8 x double> undef, <2 x i32> <i32 2, i32 3> -; CHECK-NEXT: [[S3:%.*]] = shufflevector <8 x double> [[X]], <8 x double> undef, <2 x i32> <i32 4, i32 5> -; CHECK-NEXT: [[S4:%.*]] = shufflevector <8 x double> [[X]], <8 x double> undef, <2 x i32> <i32 6, i32 7> -; CHECK-NEXT: [[S5:%.*]] = shufflevector <2 x double> [[S1]], <2 x double> [[S2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[S6:%.*]] = shufflevector <2 x double> [[S3]], <2 x double> [[S4]], <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[S7:%.*]] = shufflevector <4 x double> [[S5]], <4 x double> [[S6]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> -; CHECK-NEXT: ret <8 x double> [[S7]] +; CHECK-NEXT: ret <8 x double> [[X:%.*]] ; %s1 = shufflevector <8 x double> %x, <8 x double> undef, <2 x i32> <i32 0, i32 1> %s2 = shufflevector <8 x double> %x, <8 x double> undef, <2 x i32> <i32 2, i32 3> @@ -191,14 +213,7 @@ define <8 x double> @extract_and_concat(<8 x double> %x) { define <8 x i64> @PR30630(<8 x i64> %x) { ; CHECK-LABEL: @PR30630( -; CHECK-NEXT: [[S1:%.*]] = shufflevector <8 x i64> [[X:%.*]], <8 x i64> undef, <2 x i32> <i32 0, i32 4> -; CHECK-NEXT: [[S2:%.*]] = shufflevector <8 x i64> [[X]], <8 x i64> undef, <2 x i32> <i32 1, i32 5> -; CHECK-NEXT: [[S3:%.*]] = shufflevector <8 x i64> [[X]], <8 x i64> undef, <2 x i32> <i32 2, i32 6> -; CHECK-NEXT: [[S4:%.*]] = shufflevector <8 x i64> [[X]], <8 x i64> undef, <2 x i32> <i32 3, i32 7> -; CHECK-NEXT: [[S5:%.*]] = shufflevector <2 x i64> [[S1]], <2 x i64> [[S2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[S6:%.*]] = shufflevector <2 x i64> [[S3]], <2 x i64> [[S4]], <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[S7:%.*]] = shufflevector <4 x i64> [[S5]], <4 x i64> [[S6]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7> -; CHECK-NEXT: ret <8 x i64> [[S7]] +; CHECK-NEXT: ret <8 x i64> [[X:%.*]] ; %s1 = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 0, i32 4> %s2 = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 1, i32 5> diff --git a/test/Transforms/InstSimplify/vector_gep.ll b/test/Transforms/InstSimplify/vector_gep.ll index 54887e99ee380..b8e61a05cc0c7 100644 --- a/test/Transforms/InstSimplify/vector_gep.ll +++ b/test/Transforms/InstSimplify/vector_gep.ll @@ -61,4 +61,28 @@ define <16 x i32*> @test6() { ; CHECK-NEXT: ret <16 x i32*> getelementptr ([24 x [42 x [3 x i32]]], [24 x [42 x [3 x i32]]]* @v, <16 x i64> zeroinitializer, <16 x i64> zeroinitializer, <16 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, <16 x i64> zeroinitializer) %VectorGep = getelementptr [24 x [42 x [3 x i32]]], [24 x [42 x [3 x i32]]]* @v, i64 0, i64 0, <16 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, i64 0 ret <16 x i32*> %VectorGep -}
\ No newline at end of file +} + +; PR32697 +; CHECK-LABEL: tinkywinky( +; CHECK-NEXT: ret <4 x i8*> undef +define <4 x i8*> @tinkywinky() { + %patatino = getelementptr i8, i8* undef, <4 x i64> undef + ret <4 x i8*> %patatino +} + +; PR32697 +; CHECK-LABEL: dipsy( +; CHECK-NEXT: ret <4 x i8*> undef +define <4 x i8*> @dipsy() { + %patatino = getelementptr i8, <4 x i8 *> undef, <4 x i64> undef + ret <4 x i8*> %patatino +} + +; PR32697 +; CHECK-LABEL: laalaa( +; CHECK-NEXT: ret <4 x i8*> undef +define <4 x i8*> @laalaa() { + %patatino = getelementptr i8, <4 x i8 *> undef, i64 undef + ret <4 x i8*> %patatino +} diff --git a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll index a9d1e87587662..728f5dcac7b19 100644 --- a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll +++ b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll @@ -198,7 +198,7 @@ for.end: ; preds = %for.body ; @testNeon is an important example of the nead for ivchains. ; -; Currently we have three extra add.w's that keep the store address +; Currently we have two extra add.w's that keep the store address ; live past the next increment because ISEL is unfortunately undoing ; the store chain. ISEL also fails to convert all but one of the stores to ; post-increment addressing. However, the loads should use @@ -207,12 +207,10 @@ for.end: ; preds = %for.body ; ; A9: testNeon: ; A9: %.lr.ph -; A9-NOT: lsl.w -; A9-NOT: {{ldr|str|adds|add r}} -; A9: vst1.8 {{.*}} [r{{[0-9]+}}]! -; A9-NOT: {{ldr|str|adds|add r}} ; A9: add.w r +; A9-NOT: lsl.w ; A9-NOT: {{ldr|str|adds|add r}} +; A9: vst1.8 {{.*}} [r{{[0-9]+}}], r{{[0-9]+}} ; A9: add.w r ; A9-NOT: {{ldr|str|adds|add r}} ; A9-NOT: add.w r diff --git a/test/Transforms/LoopUnroll/peel-loop-negative.ll b/test/Transforms/LoopUnroll/peel-loop-negative.ll new file mode 100644 index 0000000000000..eab609a3002d1 --- /dev/null +++ b/test/Transforms/LoopUnroll/peel-loop-negative.ll @@ -0,0 +1,28 @@ +; RUN: opt < %s -S -loop-unroll -unroll-threshold=800 -unroll-peel-max-count=0 | FileCheck %s + +; We should not peel this loop even though we can, because the max count is set +; to zero. +define i32 @invariant_backedge_neg_1(i32 %a, i32 %b) { +; CHECK-LABEL: @invariant_backedge_neg_1 +; CHECK-NOT loop.peel{{.*}}: +; CHECK: loop: +; CHECK: %i = phi +; CHECK: %sum = phi +; CHECK: %plus = phi +entry: + br label %loop + +loop: + %i = phi i32 [ 0, %entry ], [ %inc, %loop ] + %sum = phi i32 [ 0, %entry ], [ %incsum, %loop ] + %plus = phi i32 [ %a, %entry ], [ %b, %loop ] + + %incsum = add i32 %sum, %plus + %inc = add i32 %i, 1 + %cmp = icmp slt i32 %i, 1000 + + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %sum +} diff --git a/test/Transforms/LoopUnroll/peel-loop-not-forced.ll b/test/Transforms/LoopUnroll/peel-loop-not-forced.ll index 3dcac87f8242f..8691481acc12f 100644 --- a/test/Transforms/LoopUnroll/peel-loop-not-forced.ll +++ b/test/Transforms/LoopUnroll/peel-loop-not-forced.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -loop-unroll -unroll-threshold=4 | FileCheck %s +; RUN: opt < %s -S -loop-unroll -unroll-threshold=30 | FileCheck %s define i32 @invariant_backedge_1(i32 %a, i32 %b) { ; CHECK-LABEL: @invariant_backedge_1 @@ -25,10 +25,112 @@ exit: ret i32 %sum } -; Peeling should fail due to method size. define i32 @invariant_backedge_2(i32 %a, i32 %b) { +; This loop should be peeled twice because it has a Phi which becomes invariant +; starting from 3rd iteration. ; CHECK-LABEL: @invariant_backedge_2 -; CHECK-NOT: loop.peel: +; CHECK: loop.peel{{.*}}: +; CHECK: loop.peel{{.*}}: +; CHECK: %i = phi +; CHECK: %sum = phi +; CHECK-NOT: %half.inv = phi +; CHECK-NOT: %plus = phi +entry: + br label %loop + +loop: + %i = phi i32 [ 0, %entry ], [ %inc, %loop ] + %sum = phi i32 [ 0, %entry ], [ %incsum, %loop ] + %half.inv = phi i32 [ %a, %entry ], [ %b, %loop ] + %plus = phi i32 [ %a, %entry ], [ %half.inv, %loop ] + + %incsum = add i32 %sum, %plus + %inc = add i32 %i, 1 + %cmp = icmp slt i32 %i, 1000 + + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %sum +} + +define i32 @invariant_backedge_3(i32 %a, i32 %b) { +; This loop should be peeled thrice because it has a Phi which becomes invariant +; starting from 4th iteration. +; CHECK-LABEL: @invariant_backedge_3 +; CHECK: loop.peel{{.*}}: +; CHECK: loop.peel{{.*}}: +; CHECK: loop.peel{{.*}}: +; CHECK: %i = phi +; CHECK: %sum = phi +; CHECK-NOT: %half.inv = phi +; CHECK-NOT: %half.inv.2 = phi +; CHECK-NOT: %plus = phi +entry: + br label %loop + +loop: + %i = phi i32 [ 0, %entry ], [ %inc, %loop ] + %sum = phi i32 [ 0, %entry ], [ %incsum, %loop ] + %half.inv = phi i32 [ %a, %entry ], [ %b, %loop ] + %half.inv.2 = phi i32 [ %a, %entry ], [ %half.inv, %loop ] + %plus = phi i32 [ %a, %entry ], [ %half.inv.2, %loop ] + + %incsum = add i32 %sum, %plus + %inc = add i32 %i, 1 + %cmp = icmp slt i32 %i, 1000 + + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %sum +} + +define i32 @invariant_backedge_limited_by_size(i32 %a, i32 %b) { +; This loop should normally be peeled thrice because it has a Phi which becomes +; invariant starting from 4th iteration, but the size of the loop only allows +; us to peel twice because we are restricted to 30 instructions in resulting +; code. Thus, %plus Phi node should stay in loop even despite its backedge +; input is an invariant. +; CHECK-LABEL: @invariant_backedge_limited_by_size +; CHECK: loop.peel{{.*}}: +; CHECK: loop.peel{{.*}}: +; CHECK: %i = phi +; CHECK: %sum = phi +; CHECK: %plus = phi i32 [ %a, {{.*}} ], [ %b, %loop ] +; CHECK-NOT: %half.inv = phi +; CHECK-NOT: %half.inv.2 = phi +entry: + br label %loop + +loop: + %i = phi i32 [ 0, %entry ], [ %inc, %loop ] + %sum = phi i32 [ 0, %entry ], [ %incsum, %loop ] + %half.inv = phi i32 [ %a, %entry ], [ %b, %loop ] + %half.inv.2 = phi i32 [ %a, %entry ], [ %half.inv, %loop ] + %plus = phi i32 [ %a, %entry ], [ %half.inv.2, %loop ] + + %incsum = add i32 %sum, %plus + %inc = add i32 %i, 1 + %cmp = icmp slt i32 %i, 1000 + + %incsum2 = add i32 %incsum, %plus + %incsum3 = add i32 %incsum, %plus + %incsum4 = add i32 %incsum, %plus + %incsum5 = add i32 %incsum, %plus + %incsum6 = add i32 %incsum, %plus + %incsum7 = add i32 %incsum, %plus + + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %sum +} + +; Peeling should fail due to method size. +define i32 @invariant_backedge_negative(i32 %a, i32 %b) { +; CHECK-LABEL: @invariant_backedge_negative +; CHECK-NOT: loop.peel{{.*}}: ; CHECK: loop: ; CHECK: %i = phi ; CHECK: %sum = phi @@ -43,6 +145,47 @@ loop: %incsum = add i32 %sum, %plus %incsum2 = add i32 %incsum, %plus + %incsum3 = add i32 %incsum, %plus + %incsum4 = add i32 %incsum, %plus + %incsum5 = add i32 %incsum, %plus + %incsum6 = add i32 %incsum, %plus + %incsum7 = add i32 %incsum, %plus + %incsum8 = add i32 %incsum, %plus + %incsum9 = add i32 %incsum, %plus + %incsum10 = add i32 %incsum, %plus + %incsum11 = add i32 %incsum, %plus + %incsum12 = add i32 %incsum, %plus + %incsum13 = add i32 %incsum, %plus + %incsum14 = add i32 %incsum, %plus + %incsum15 = add i32 %incsum, %plus + %inc = add i32 %i, 1 + %cmp = icmp slt i32 %i, 1000 + + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %sum +} + +define i32 @cycled_phis(i32 %a, i32 %b) { +; Make sure that we do not crash working with cycled Phis and don't peel it. +; TODO: Actually this loop should be partially unrolled with factor 2. +; CHECK-LABEL: @cycled_phis +; CHECK-NOT: loop.peel{{.*}}: +; CHECK: loop: +; CHECK: %i = phi +; CHECK: %phi.a = phi +; CHECK: %phi.b = phi +; CHECK: %sum = phi +entry: + br label %loop + +loop: + %i = phi i32 [ 0, %entry ], [ %inc, %loop ] + %phi.a = phi i32 [ %a, %entry ], [ %phi.b, %loop ] + %phi.b = phi i32 [ %b, %entry ], [ %phi.a, %loop ] + %sum = phi i32 [ 0, %entry], [ %incsum, %loop ] + %incsum = add i32 %sum, %phi.a %inc = add i32 %i, 1 %cmp = icmp slt i32 %i, 1000 diff --git a/test/Transforms/NewGVN/non-integral-pointers.ll b/test/Transforms/NewGVN/non-integral-pointers.ll new file mode 100644 index 0000000000000..75b8285d51f9a --- /dev/null +++ b/test/Transforms/NewGVN/non-integral-pointers.ll @@ -0,0 +1,39 @@ +; RUN: opt -newgvn -S < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4" +target triple = "x86_64-unknown-linux-gnu" + +define void @f0(i1 %alwaysFalse, i64 %val, i64* %loc) { +; CHECK-LABEL: @f0( +; CHECK-NOT: inttoptr +; CHECK-NOT: ptrtoint + entry: + store i64 %val, i64* %loc + br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken + + neverTaken: + %loc.bc = bitcast i64* %loc to i8 addrspace(4)** + %ptr = load i8 addrspace(4)*, i8 addrspace(4)** %loc.bc + store i8 5, i8 addrspace(4)* %ptr + ret void + + alwaysTaken: + ret void +} + +define i64 @f1(i1 %alwaysFalse, i8 addrspace(4)* %val, i8 addrspace(4)** %loc) { +; CHECK-LABEL: @f1( +; CHECK-NOT: inttoptr +; CHECK-NOT: ptrtoint + entry: + store i8 addrspace(4)* %val, i8 addrspace(4)** %loc + br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken + + neverTaken: + %loc.bc = bitcast i8 addrspace(4)** %loc to i64* + %int = load i64, i64* %loc.bc + ret i64 %int + + alwaysTaken: + ret i64 42 +} diff --git a/test/Transforms/PhaseOrdering/globalaa-retained.ll b/test/Transforms/PhaseOrdering/globalaa-retained.ll index bce193b5e851b..47b8e4d7a9edd 100644 --- a/test/Transforms/PhaseOrdering/globalaa-retained.ll +++ b/test/Transforms/PhaseOrdering/globalaa-retained.ll @@ -5,6 +5,37 @@ target triple = "aarch64" @v = internal unnamed_addr global i32 0, align 4 @p = common global i32* null, align 8 + +; This test checks that a number of loads and stores are eliminated, +; that can only be eliminated based on GlobalsAA information. As such, +; it tests that GlobalsAA information is retained until the passes +; that perform this optimization, and it protects against accidentally +; dropping the GlobalsAA information earlier in the pipeline, which +; has happened a few times. + +; GlobalsAA invalidation might happen later in the FunctionPassManager +; pipeline than the optimization eliminating unnecessary loads/stores. +; Since GlobalsAA is a module-level analysis, any FunctionPass +; invalidating the GlobalsAA information will affect FunctionPass +; pipelines that execute later. For example, assume a FunctionPass1 | +; FunctionPass2 pipeline and 2 functions to be processed: f1 and f2. +; Assume furthermore that FunctionPass1 uses GlobalsAA info to do an +; optimization, and FunctionPass2 invalidates GlobalsAA. Assume the +; function passes run in the following order: FunctionPass1(f1), +; FunctionPass2(f1), FunctionPass1(f2), FunctionPass2(f2). Then +; FunctionPass1 will not be able to optimize f2, since GlobalsAA will +; have been invalidated in FuntionPass2(f1). + +; To try and also test this scenario, there is an empty function +; before and after the function we're checking so that one of them +; will be processed by the whole set of FunctionPasses before @f. That +; will ensure that if the invalidation happens, it happens before the +; actual optimizations on @f start. +define void @bar() { +entry: + ret void +} + ; Function Attrs: norecurse nounwind define void @f(i32 %n) { entry: @@ -19,8 +50,17 @@ entry: ret void } -; check variable v is loaded only once after optimization, which should be -; prove that globalsAA survives until the optimization that can use it to -; optimize away the duplicate load/stores on variable v. +; check variable v is loaded/stored only once after optimization, +; which should be prove that globalsAA survives until the optimization +; that can use it to optimize away the duplicate load/stores on +; variable v. ; CHECK: load i32, i32* @v, align 4 +; CHECK: store i32 {{.*}}, i32* @v, align 4 ; CHECK-NOT: load i32, i32* @v, align 4 +; CHECK-NOT: store i32 {{.*}}, i32* @v, align 4 + +; Same as @bar above, in case the functions are processed in reverse order. +define void @bar2() { +entry: + ret void +} diff --git a/test/Transforms/SLPVectorizer/X86/reorder_phi.ll b/test/Transforms/SLPVectorizer/X86/reorder_phi.ll new file mode 100644 index 0000000000000..f7f58d7350b30 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/reorder_phi.ll @@ -0,0 +1,54 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=corei7-avx | FileCheck %s + +%struct.complex = type { float, float } + +; CHECK-LABEL: void @foo +define void @foo (%struct.complex* %A, %struct.complex* %B, %struct.complex* %Result) { + +entry: + %0 = add i64 256, 0 + br label %loop + +; CHECK-LABEL: loop +; CHECK: [[REG0:%[0-9]+]] = phi <2 x float> {{.*}}[ [[REG1:%[0-9]+]], %loop ] +; CHECK: [[REG2:%[0-9]+]] = load <2 x float>, <2 x float>* +; CHECK: [[REG3:%[0-9]+]] = fmul <2 x float> [[REG2]] +; CHECK: [[REG4:%[0-9]+]] = fmul <2 x float> +; CHECK: fsub <2 x float> [[REG3]], [[REG4]] +; CHECK: fadd <2 x float> [[REG3]], [[REG4]] +; CHECK: shufflevector <2 x float> +; CHECK: [[REG1]] = fadd <2 x float>{{.*}}[[REG0]] +loop: + + %1 = phi i64 [ 0, %entry ], [ %20, %loop ] + %2 = phi float [ 0.000000e+00, %entry ], [ %19, %loop ] + %3 = phi float [ 0.000000e+00, %entry ], [ %18, %loop ] + %4 = getelementptr inbounds %"struct.complex", %"struct.complex"* %A, i64 %1, i32 0 + %5 = load float, float* %4, align 4 + %6 = getelementptr inbounds %"struct.complex", %"struct.complex"* %A, i64 %1, i32 1 + %7 = load float, float* %6, align 4 + %8 = getelementptr inbounds %"struct.complex", %"struct.complex"* %B, i64 %1, i32 0 + %9 = load float, float* %8, align 4 + %10 = getelementptr inbounds %"struct.complex", %"struct.complex"* %B, i64 %1, i32 1 + %11 = load float, float* %10, align 4 + %12 = fmul float %5, %9 + %13 = fmul float %7, %11 + %14 = fsub float %12, %13 + %15 = fmul float %7, %9 + %16 = fmul float %5, %11 + %17 = fadd float %15, %16 + %18 = fadd float %3, %14 + %19 = fadd float %2, %17 + %20 = add nuw nsw i64 %1, 1 + %21 = icmp eq i64 %20, %0 + br i1 %21, label %exit, label %loop + +exit: + %22 = getelementptr inbounds %"struct.complex", %"struct.complex"* %Result, i32 0, i32 0 + store float %18, float* %22, align 4 + %23 = getelementptr inbounds %"struct.complex", %"struct.complex"* %Result, i32 0, i32 1 + store float %19, float* %23, align 4 + + ret void + +} diff --git a/test/Transforms/SafeStack/X86/debug-loc.ll b/test/Transforms/SafeStack/X86/debug-loc.ll index fc0b6f911f7ee..88cda693b2932 100644 --- a/test/Transforms/SafeStack/X86/debug-loc.ll +++ b/test/Transforms/SafeStack/X86/debug-loc.ll @@ -37,10 +37,10 @@ entry: ; CHECK-DAG: ![[VAR_ARG]] = !DILocalVariable(name: "zzz" ; 100 aligned up to 8 -; CHECK-DAG: ![[EXPR_ARG]] = !DIExpression(DW_OP_deref, DW_OP_minus, 104 +; CHECK-DAG: ![[EXPR_ARG]] = !DIExpression(DW_OP_minus, 104) ; CHECK-DAG: ![[VAR_LOCAL]] = !DILocalVariable(name: "xxx" -; CHECK-DAG: ![[EXPR_LOCAL]] = !DIExpression(DW_OP_deref, DW_OP_minus, 208 +; CHECK-DAG: ![[EXPR_LOCAL]] = !DIExpression(DW_OP_minus, 208) ; Function Attrs: nounwind readnone declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 diff --git a/test/Transforms/SampleProfile/Inputs/indirect-call.prof b/test/Transforms/SampleProfile/Inputs/indirect-call.prof index 428d4cedef5a8..aaf9ec15d02e3 100644 --- a/test/Transforms/SampleProfile/Inputs/indirect-call.prof +++ b/test/Transforms/SampleProfile/Inputs/indirect-call.prof @@ -11,3 +11,9 @@ test_noinline:3000:0 test_direct:3000:0 5: foo_direct:3000 1: 3000 +test_inline_strip:3000:0 + 5: foo_inline_strip:3000 + 1: 3000 +test_inline_strip_confilict:3000:0 + 5: foo_inline_strip_conflict:3000 + 1: 3000 diff --git a/test/Transforms/SampleProfile/indirect-call.ll b/test/Transforms/SampleProfile/indirect-call.ll index 5a4913d6358f5..4647dd4212998 100644 --- a/test/Transforms/SampleProfile/indirect-call.ll +++ b/test/Transforms/SampleProfile/indirect-call.ll @@ -29,6 +29,34 @@ define void @test_inline(i64* (i32*)*, i32* %x) !dbg !3 { ret void } +; CHECK-LABEL: @test_inline_strip +; If the indirect call is promoted and inlined in profile, and the callee name +; is stripped we should promote and inline it. +define void @test_inline_strip(i64* (i32*)*, i32* %x) !dbg !3 { + %2 = alloca i64* (i32*)* + store i64* (i32*)* %0, i64* (i32*)** %2 + %3 = load i64* (i32*)*, i64* (i32*)** %2 +; CHECK: icmp {{.*}} @foo_inline_strip.suffix +; CHECK: if.true.direct_targ: +; CHECK-NOT: call +; CHECK: if.false.orig_indirect: +; CHECK: call + call i64* %3(i32* %x), !dbg !5 + ret void +} + +; CHECK-LABEL: @test_inline_strip_conflict +; If the indirect call is promoted and inlined in profile, and the callee name +; is stripped, but have more than 1 potential match, we should not promote. +define void @test_inline_strip_conflict(i64* (i32*)*, i32* %x) !dbg !3 { + %2 = alloca i64* (i32*)* + store i64* (i32*)* %0, i64* (i32*)** %2 + %3 = load i64* (i32*)*, i64* (i32*)** %2 +; CHECK-NOT: if.true.direct_targ: + call i64* %3(i32* %x), !dbg !5 + ret void +} + ; CHECK-LABEL: @test_noinline ; If the indirect call target is not available, we should not promote it. define void @test_noinline(void ()*) !dbg !3 { @@ -47,6 +75,22 @@ define i32* @foo_inline1(i32* %x) !dbg !3 { ret i32* %x } +define i32* @foo_inline_strip.suffix(i32* %x) !dbg !3 { + ret i32* %x +} + +define i32* @foo_inline_strip_conflict.suffix1(i32* %x) !dbg !3 { + ret i32* %x +} + +define i32* @foo_inline_strip_conflict.suffix2(i32* %x) !dbg !3 { + ret i32* %x +} + +define i32* @foo_inline_strip_conflict.suffix3(i32* %x) !dbg !3 { + ret i32* %x +} + define i32* @foo_inline2(i32* %x) !dbg !3 { ret i32* %x } diff --git a/test/Transforms/StructurizeCFG/invert-compare.ll b/test/Transforms/StructurizeCFG/invert-compare.ll new file mode 100644 index 0000000000000..87d9c6d105694 --- /dev/null +++ b/test/Transforms/StructurizeCFG/invert-compare.ll @@ -0,0 +1,60 @@ +; RUN: opt -S -structurizecfg %s | FileCheck %s + +; CHECK-LABEL: @directly_invert_compare_condition_jump_into_loop( +; CHECK: %cmp0 = fcmp uge float %arg0, %arg1 +; CHECK-NEXT: br i1 %cmp0, label %end.loop, label %Flow +define void @directly_invert_compare_condition_jump_into_loop(i32 addrspace(1)* %out, i32 %n, float %arg0, float %arg1) #0 { +entry: + br label %for.body + +for.body: + %i = phi i32 [0, %entry], [%i.inc, %end.loop] + %ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %i + store i32 %i, i32 addrspace(1)* %ptr, align 4 + %cmp0 = fcmp olt float %arg0, %arg1 + br i1 %cmp0, label %mid.loop, label %end.loop + +mid.loop: + store i32 333, i32 addrspace(1)* %out, align 4 + br label %for.end + +end.loop: + %i.inc = add i32 %i, 1 + %cmp = icmp ne i32 %i.inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: + ret void +} + +; CHECK-LABEL: @invert_multi_use_compare_condition_jump_into_loop( +; CHECK: %cmp0 = fcmp olt float %arg0, %arg1 +; CHECK: store volatile i1 %cmp0, i1 addrspace(1)* undef +; CHECK: %0 = xor i1 %cmp0, true +; CHECK-NEXT: br i1 %0, label %end.loop, label %Flow +define void @invert_multi_use_compare_condition_jump_into_loop(i32 addrspace(1)* %out, i32 %n, float %arg0, float %arg1) #0 { +entry: + br label %for.body + +for.body: + %i = phi i32 [0, %entry], [%i.inc, %end.loop] + %ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %i + store i32 %i, i32 addrspace(1)* %ptr, align 4 + %cmp0 = fcmp olt float %arg0, %arg1 + store volatile i1 %cmp0, i1 addrspace(1)* undef + br i1 %cmp0, label %mid.loop, label %end.loop + +mid.loop: + store i32 333, i32 addrspace(1)* %out, align 4 + br label %for.end + +end.loop: + %i.inc = add i32 %i, 1 + %cmp = icmp ne i32 %i.inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: + ret void +} + +attributes #0 = { nounwind }
\ No newline at end of file diff --git a/test/Transforms/StructurizeCFG/one-loop-multiple-backedges.ll b/test/Transforms/StructurizeCFG/one-loop-multiple-backedges.ll index 668a1e99d814d..aff59642cbcb4 100644 --- a/test/Transforms/StructurizeCFG/one-loop-multiple-backedges.ll +++ b/test/Transforms/StructurizeCFG/one-loop-multiple-backedges.ll @@ -11,8 +11,8 @@ bb: bb3: ; preds = %bb7, %bb %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb7 ] %tmp4 = fcmp ult float %arg1, 3.500000e+00 -; CHECK: %0 = xor i1 %tmp4, true -; CHECK: br i1 %0, label %bb5, label %Flow +; CHECK: %tmp4 = fcmp oge float %arg1, 3.500000e+00 +; CHECK: br i1 %tmp4, label %bb5, label %Flow br i1 %tmp4, label %bb7, label %bb5 ; CHECK: bb5: @@ -22,7 +22,8 @@ bb5: ; preds = %bb3 br i1 %tmp6, label %bb10, label %bb7 ; CHECK: Flow: -; CHECK: br i1 %3, label %bb7, label %Flow1 +; CHECK: %1 = phi i1 [ %tmp6, %bb5 ], [ %tmp4, %bb3 ] +; CHECK-NEXT: br i1 %1, label %bb7, label %Flow1 ; CHECK: bb7 bb7: ; preds = %bb5, %bb3 @@ -32,9 +33,10 @@ bb7: ; preds = %bb5, %bb3 br i1 %tmp9, label %bb3, label %bb10 ; CHECK: Flow1: -; CHECK: br i1 %7, label %bb10, label %bb3 +; CHECK: %4 = phi i1 [ %tmp9, %bb7 ], [ true, %Flow ] +; CHECK-NEXT: br i1 %4, label %bb10, label %bb3 -; CHECK: bb10 +; CHECK: bb10: bb10: ; preds = %bb7, %bb5 %tmp11 = phi i32 [ 15, %bb5 ], [ 255, %bb7 ] store i32 %tmp11, i32 addrspace(1)* %arg, align 4 diff --git a/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll b/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll index ba9aa29130611..a8835f19d447f 100644 --- a/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll +++ b/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll @@ -59,7 +59,8 @@ for.end: ; preds = %for.body.1, %if.the ; CHECK: br i1 %{{[0-9]}}, label %for.body.1, label %Flow2 ; CHECK: for.body.1: -; CHECK: br i1 %{{[0-9]+}}, label %for.body.6, label %Flow3 +; CHECK: %cmp1.5 = icmp ne i32 %tmp22, %K1 +; CHECK-NEXT: br i1 %cmp1.5, label %for.body.6, label %Flow3 for.body.1: ; preds = %if.then, %lor.lhs.false %best_val.233 = phi float [ %tmp5, %if.then ], [ %best_val.027, %lor.lhs.false ] %best_count.231 = phi i32 [ %sub4, %if.then ], [ %best_count.025, %lor.lhs.false ] diff --git a/test/tools/gold/X86/thinlto.ll b/test/tools/gold/X86/thinlto.ll index 5e1d913eb09e7..bb87adc44745b 100644 --- a/test/tools/gold/X86/thinlto.ll +++ b/test/tools/gold/X86/thinlto.ll @@ -82,15 +82,11 @@ ; BACKEND1-NEXT: </MODULE_STRTAB_BLOCK ; BACKEND1-NEXT: <GLOBALVAL_SUMMARY_BLOCK ; BACKEND1-NEXT: <VERSION +; BACKEND1-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} +; BACKEND1-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} ; BACKEND1-NEXT: <COMBINED ; BACKEND1-NEXT: <COMBINED ; BACKEND1-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; BACKEND1-NEXT: <VALUE_SYMTAB -; Check that the format is: op0=valueid, op1=offset, op2=funcguid, -; where funcguid is the lower 64 bits of the function name MD5. -; BACKEND1-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; BACKEND1-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; BACKEND1-NEXT: </VALUE_SYMTAB ; The backend index for Input/thinlto.ll contains summaries from itself only, ; as it does not import anything. @@ -99,13 +95,9 @@ ; BACKEND2-NEXT: </MODULE_STRTAB_BLOCK ; BACKEND2-NEXT: <GLOBALVAL_SUMMARY_BLOCK ; BACKEND2-NEXT: <VERSION +; BACKEND2-NEXT: <VALUE_GUID op0=1 op1=-5300342847281564238 ; BACKEND2-NEXT: <COMBINED ; BACKEND2-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; BACKEND2-NEXT: <VALUE_SYMTAB -; Check that the format is: op0=valueid, op1=offset, op2=funcguid, -; where funcguid is the lower 64 bits of the function name MD5. -; BACKEND2-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0=1 op1=-5300342847281564238 -; BACKEND2-NEXT: </VALUE_SYMTAB ; COMBINED: <MODULE_STRTAB_BLOCK ; COMBINED-NEXT: <ENTRY {{.*}} record string = '{{.*}}/test/tools/gold/X86/Output/thinlto.ll.tmp{{.*}}.o' @@ -113,15 +105,11 @@ ; COMBINED-NEXT: </MODULE_STRTAB_BLOCK ; COMBINED-NEXT: <GLOBALVAL_SUMMARY_BLOCK ; COMBINED-NEXT: <VERSION +; COMBINED-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} +; COMBINED-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} ; COMBINED-NEXT: <COMBINED ; COMBINED-NEXT: <COMBINED ; COMBINED-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; COMBINED-NEXT: <VALUE_SYMTAB -; Check that the format is: op0=valueid, op1=offset, op2=funcguid, -; where funcguid is the lower 64 bits of the function name MD5. -; COMBINED-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; COMBINED-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; COMBINED-NEXT: </VALUE_SYMTAB target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/test/tools/llvm-lto/thinlto.ll b/test/tools/llvm-lto/thinlto.ll index 61c52b33e72e8..86aca13a4c884 100644 --- a/test/tools/llvm-lto/thinlto.ll +++ b/test/tools/llvm-lto/thinlto.ll @@ -11,15 +11,11 @@ ; COMBINED-NEXT: </MODULE_STRTAB_BLOCK ; COMBINED-NEXT: <GLOBALVAL_SUMMARY_BLOCK ; COMBINED-NEXT: <VERSION +; COMBINED-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} +; COMBINED-NEXT: <VALUE_GUID op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} ; COMBINED-NEXT: <COMBINED ; COMBINED-NEXT: <COMBINED ; COMBINED-NEXT: </GLOBALVAL_SUMMARY_BLOCK -; COMBINED-NEXT: <VALUE_SYMTAB -; Check that the format is: op0=valueid, op1=offset, op2=funcguid, -; where funcguid is the lower 64 bits of the function name MD5. -; COMBINED-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; COMBINED-NEXT: <COMBINED_ENTRY abbrevid={{[0-9]+}} op0={{1|2}} op1={{-3706093650706652785|-5300342847281564238}} -; COMBINED-NEXT: </VALUE_SYMTAB target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/test/tools/llvm-symbolizer/Inputs/discrim b/test/tools/llvm-symbolizer/Inputs/discrim Binary files differindex ec61fe960bffb..0e5e847a5ed71 100644 --- a/test/tools/llvm-symbolizer/Inputs/discrim +++ b/test/tools/llvm-symbolizer/Inputs/discrim diff --git a/test/tools/llvm-symbolizer/Inputs/discrim.c b/test/tools/llvm-symbolizer/Inputs/discrim.c index decbce8d454e6..e53cbd4671390 100644 --- a/test/tools/llvm-symbolizer/Inputs/discrim.c +++ b/test/tools/llvm-symbolizer/Inputs/discrim.c @@ -1,8 +1,11 @@ static volatile int do_mul; -static volatile int do_inc; +static volatile int x, v; -int main () { - int x = 1; - if (do_mul) x *= 2; else x /= 2; - return do_inc ? ++x : --x; +int foo () { + if (do_mul) x *= v; else x /= v; + return x; +} + +int main() { + return foo() + foo(); } diff --git a/test/tools/llvm-symbolizer/Inputs/discrim.inp b/test/tools/llvm-symbolizer/Inputs/discrim.inp index f8ad6018d7092..a5cfcb2558f35 100644 --- a/test/tools/llvm-symbolizer/Inputs/discrim.inp +++ b/test/tools/llvm-symbolizer/Inputs/discrim.inp @@ -1,5 +1,8 @@ some text
-0x4004f2
-0x400509
-0x40050d
+0x400590
+0x4005a5
+0x4005ad
+0x4005b9
+0x4005ce
+0x4005d4
some more text
diff --git a/test/tools/llvm-symbolizer/padding-x86_64.ll b/test/tools/llvm-symbolizer/padding-x86_64.ll new file mode 100644 index 0000000000000..114c9f701c680 --- /dev/null +++ b/test/tools/llvm-symbolizer/padding-x86_64.ll @@ -0,0 +1,40 @@ +; REQUIRES: x86_64-linux +; Checks if symbolizer can correctly symbolize address in the padding between +; functions. +; RUN: llc -o %t.o -filetype=obj -mtriple=x86_64-pc-linux %s +; RUN: echo 0x5 | llvm-symbolizer -obj=%t.o | FileCheck %s --check-prefix=FOO +; RUN: echo 0xd | llvm-symbolizer -obj=%t.o | FileCheck %s --check-prefix=PADDING +; RUN: echo 0x10 | llvm-symbolizer -obj=%t.o | FileCheck %s --check-prefix=MAIN + +;FOO: foo +;PADDING: ?? +;MAIN: main + +@a = global i32 1, align 4 + +define i32 @foo() !dbg !9 { +entry: + %0 = load i32, i32* @a, align 4 + ret i32 %0 +} + +define i32 @main() !dbg !14 { +entry: + %call = call i32 @foo(), !dbg !18 + ret i32 %call +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!6, !7} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "padding-x86_64.c", directory: "/tmp/") +!2 = !{} +!5 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed) +!6 = !{i32 2, !"Dwarf Version", i32 4} +!7 = !{i32 2, !"Debug Info Version", i32 3} +!9 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 2, type: !10, isLocal: false, isDefinition: true, scopeLine: 2, isOptimized: false, unit: !0, variables: !2) +!10 = !DISubroutineType(types: !11) +!11 = !{!5} +!14 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 6, type: !10, isLocal: false, isDefinition: true, scopeLine: 6, isOptimized: false, unit: !0, variables: !2) +!18 = !DILocation(line: 7, column: 8, scope: !14) diff --git a/test/tools/llvm-symbolizer/sym-verbose.test b/test/tools/llvm-symbolizer/sym-verbose.test index ef66db919faae..5b401e3b0982f 100644 --- a/test/tools/llvm-symbolizer/sym-verbose.test +++ b/test/tools/llvm-symbolizer/sym-verbose.test @@ -1,39 +1,97 @@ #static volatile int do_mul; -#static volatile int do_inc; +#static volatile int x, v; # -#int main () { -# int x = 1; -# if (do_mul) x *= 2; else x /= 2; -# return do_inc ? ++x : --x; +#int foo () { +# if (do_mul) x *= v; else x /= v; +# return x; #} -#Build as : clang -g -O2 discrim.c -o discrim +# +#int main() { +# return foo() + foo(); +#} +#Build as : clang -gmlt -fdebug-info-for-profiling -O2 discrim.c -o discrim RUN: llvm-symbolizer -verbose -print-address -obj=%p/Inputs/discrim < %p/Inputs/discrim.inp | FileCheck %s #CHECK: some text -#CHECK: 0x4004f2 +#CHECK: 0x400590 +#CHECK-NEXT: foo +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 4 +#CHECK-NEXT: Line: 9 +#CHECK-NEXT: Column: 0 #CHECK-NEXT: main #CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 9 +#CHECK-NEXT: Line: 10 +#CHECK-NEXT: Column: 0 + +#CHECK: 0x4005a5 +#CHECK-NEXT: foo +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c #CHECK-NEXT: Function start line: 4 -#CHECK-NEXT: Line: 6 -#CHECK-NEXT: Column: 7 -#CHECK-NOT: Discriminator: 0 +#CHECK-NEXT: Line: 5 +#CHECK-NEXT: Column: 17 +#CHECK-NEXT: Discriminator: 2 +#CHECK-NEXT: main +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 9 +#CHECK-NEXT: Line: 10 +#CHECK-NEXT: Column: 0 -#CHECK: 0x400509 +#CHECK: 0x4005ad +#CHECK-NEXT: foo +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 4 +#CHECK-NEXT: Line: 0 +#CHECK-NEXT: Column: 30 +#CHECK-NEXT: Discriminator: 4 #CHECK-NEXT: main #CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 9 +#CHECK-NEXT: Line: 10 +#CHECK-NEXT: Column: 0 + +#CHECK: 0x4005b9 +#CHECK-NEXT: foo +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c #CHECK-NEXT: Function start line: 4 -#CHECK-NEXT: Line: 7 -#CHECK-NEXT: Column: 3 -#CHECK-NEXT: Discriminator: 1 +#CHECK-NEXT: Line: 5 +#CHECK-NEXT: Column: 7 +#CHECK-NEXT: main +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 9 +#CHECK-NEXT: Line: 10 +#CHECK-NEXT: Column: 0 +#CHECK-NEXT: Discriminator: 2 -#CHECK: 0x40050d +#CHECK: 0x4005ce +#CHECK-NEXT: foo +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 4 +#CHECK-NEXT: Line: 5 +#CHECK-NEXT: Column: 17 +#CHECK-NEXT: Discriminator: 2 #CHECK-NEXT: main #CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 9 +#CHECK-NEXT: Line: 10 +#CHECK-NEXT: Column: 0 +#CHECK-NEXT: Discriminator: 2 + +#CHECK: 0x4005d4 +#CHECK-NEXT: foo +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c #CHECK-NEXT: Function start line: 4 -#CHECK-NEXT: Line: 7 -#CHECK-NEXT: Column: 3 +#CHECK-NEXT: Line: 5 +#CHECK-NEXT: Column: 30 +#CHECK-NEXT: Discriminator: 4 +#CHECK-NEXT: main +#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c +#CHECK-NEXT: Function start line: 9 +#CHECK-NEXT: Line: 10 +#CHECK-NEXT: Column: 0 #CHECK-NEXT: Discriminator: 2 #CHECK: some more text diff --git a/test/tools/llvm-xray/X86/extract-instrmap-symbolize.ll b/test/tools/llvm-xray/X86/extract-instrmap-symbolize.ll new file mode 100644 index 0000000000000..86358ca5c13f0 --- /dev/null +++ b/test/tools/llvm-xray/X86/extract-instrmap-symbolize.ll @@ -0,0 +1,10 @@ +; This tests that we can extract the instrumentation map and symbolize the +; function addresses. +; RUN: llvm-xray extract %S/Inputs/elf64-example.bin -s | FileCheck %s + +; CHECK: --- +; CHECK-NEXT: - { id: 1, address: 0x000000000041C900, function: 0x000000000041C900, kind: function-enter, always-instrument: true, function-name: {{.*foo.*}} } +; CHECK-NEXT: - { id: 1, address: 0x000000000041C912, function: 0x000000000041C900, kind: function-exit, always-instrument: true, function-name: {{.*foo.*}} } +; CHECK-NEXT: - { id: 2, address: 0x000000000041C930, function: 0x000000000041C930, kind: function-enter, always-instrument: true, function-name: {{.*bar.*}} } +; CHECK-NEXT: - { id: 2, address: 0x000000000041C946, function: 0x000000000041C930, kind: function-exit, always-instrument: true, function-name: {{.*bar.*}} } +; CHECK-NEXT: ... diff --git a/test/tools/llvm-xray/X86/extract-instrmap.ll b/test/tools/llvm-xray/X86/extract-instrmap.ll index 7447aec681144..c036944bd3820 100644 --- a/test/tools/llvm-xray/X86/extract-instrmap.ll +++ b/test/tools/llvm-xray/X86/extract-instrmap.ll @@ -4,8 +4,8 @@ ; RUN: llvm-xray extract %S/Inputs/elf64-example.bin | FileCheck %s ; CHECK: --- -; CHECK-NEXT: - { id: 1, address: 0x000000000041C900, function: 0x000000000041C900, kind: function-enter, always-instrument: true } -; CHECK-NEXT: - { id: 1, address: 0x000000000041C912, function: 0x000000000041C900, kind: function-exit, always-instrument: true } -; CHECK-NEXT: - { id: 2, address: 0x000000000041C930, function: 0x000000000041C930, kind: function-enter, always-instrument: true } -; CHECK-NEXT: - { id: 2, address: 0x000000000041C946, function: 0x000000000041C930, kind: function-exit, always-instrument: true } +; CHECK-NEXT: - { id: 1, address: 0x000000000041C900, function: 0x000000000041C900, kind: function-enter, always-instrument: true{{.*}} } +; CHECK-NEXT: - { id: 1, address: 0x000000000041C912, function: 0x000000000041C900, kind: function-exit, always-instrument: true{{.*}} } +; CHECK-NEXT: - { id: 2, address: 0x000000000041C930, function: 0x000000000041C930, kind: function-enter, always-instrument: true{{.*}} } +; CHECK-NEXT: - { id: 2, address: 0x000000000041C946, function: 0x000000000041C930, kind: function-exit, always-instrument: true{{.*}} } ; CHECK-NEXT: ... diff --git a/tools/dsymutil/DwarfLinker.cpp b/tools/dsymutil/DwarfLinker.cpp index 25f1a0f271223..6ee052f101f93 100644 --- a/tools/dsymutil/DwarfLinker.cpp +++ b/tools/dsymutil/DwarfLinker.cpp @@ -522,7 +522,8 @@ public: /// \brief Emit the abbreviation table \p Abbrevs to the /// debug_abbrev section. - void emitAbbrevs(const std::vector<std::unique_ptr<DIEAbbrev>> &Abbrevs); + void emitAbbrevs(const std::vector<std::unique_ptr<DIEAbbrev>> &Abbrevs, + unsigned DwarfVersion); /// \brief Emit the string table described by \p Pool. void emitStrings(const NonRelocatableStringpool &Pool); @@ -690,8 +691,10 @@ void DwarfStreamer::emitCompileUnitHeader(CompileUnit &Unit) { /// \brief Emit the \p Abbrevs array as the shared abbreviation table /// for the linked Dwarf file. void DwarfStreamer::emitAbbrevs( - const std::vector<std::unique_ptr<DIEAbbrev>> &Abbrevs) { + const std::vector<std::unique_ptr<DIEAbbrev>> &Abbrevs, + unsigned DwarfVersion) { MS->SwitchSection(MOFI->getDwarfAbbrevSection()); + MC->setDwarfVersion(DwarfVersion); Asm->emitDwarfAbbrevs(Abbrevs); } @@ -1129,6 +1132,12 @@ private: /// \brief Called at the end of a debug object link. void endDebugObject(); + /// Remembers the newest DWARF version we've seen in a unit. + void maybeUpdateMaxDwarfVersion(unsigned Version) { + if (MaxDwarfVersion < Version) + MaxDwarfVersion = Version; + } + /// Keeps track of relocations. class RelocationManager { struct ValidReloc { @@ -1430,6 +1439,7 @@ private: std::unique_ptr<DwarfStreamer> Streamer; uint64_t OutputDebugInfoSize; unsigned UnitID; ///< A unique ID that identifies each compile unit. + unsigned MaxDwarfVersion = 0; /// The units of the current debug map object. std::vector<std::unique_ptr<CompileUnit>> Units; @@ -3435,9 +3445,11 @@ bool DwarfLinker::link(const DebugMap &Map) { CUDie.dump(outs(), 0); } - if (!registerModuleReference(CUDie, *CU, ModuleMap)) + if (!registerModuleReference(CUDie, *CU, ModuleMap)) { Units.push_back(llvm::make_unique<CompileUnit>(*CU, UnitID++, !Options.NoODR, "")); + maybeUpdateMaxDwarfVersion(CU->getVersion()); + } } // Now build the DIE parent links that we will use during the next phase. @@ -3471,7 +3483,7 @@ bool DwarfLinker::link(const DebugMap &Map) { // Emit everything that's global. if (!Options.NoOutput) { - Streamer->emitAbbrevs(Abbreviations); + Streamer->emitAbbrevs(Abbreviations, MaxDwarfVersion); Streamer->emitStrings(StringPool); } diff --git a/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp b/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp index abc6fa27a0e05..676134ca23689 100644 --- a/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp +++ b/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp @@ -122,6 +122,7 @@ static const char *GetBlockName(unsigned BlockID, case bitc::GLOBALVAL_SUMMARY_BLOCK_ID: return "GLOBALVAL_SUMMARY_BLOCK"; case bitc::MODULE_STRTAB_BLOCK_ID: return "MODULE_STRTAB_BLOCK"; + case bitc::STRTAB_BLOCK_ID: return "STRTAB_BLOCK"; } } @@ -315,6 +316,7 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID, STRINGIFY_CODE(FS, TYPE_CHECKED_LOAD_VCALLS) STRINGIFY_CODE(FS, TYPE_TEST_ASSUME_CONST_VCALL) STRINGIFY_CODE(FS, TYPE_CHECKED_LOAD_CONST_VCALL) + STRINGIFY_CODE(FS, VALUE_GUID) } case bitc::METADATA_ATTACHMENT_ID: switch(CodeID) { @@ -381,6 +383,11 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID, default: return nullptr; case bitc::OPERAND_BUNDLE_TAG: return "OPERAND_BUNDLE_TAG"; } + case bitc::STRTAB_BLOCK_ID: + switch(CodeID) { + default: return nullptr; + case bitc::STRTAB_BLOB: return "BLOB"; + } } #undef STRINGIFY_CODE } diff --git a/tools/llvm-cat/llvm-cat.cpp b/tools/llvm-cat/llvm-cat.cpp index 4d62099094bb8..8a21a6d07caab 100644 --- a/tools/llvm-cat/llvm-cat.cpp +++ b/tools/llvm-cat/llvm-cat.cpp @@ -44,11 +44,16 @@ int main(int argc, char **argv) { std::unique_ptr<MemoryBuffer> MB = ExitOnErr( errorOrToExpected(MemoryBuffer::getFileOrSTDIN(InputFilename))); std::vector<BitcodeModule> Mods = ExitOnErr(getBitcodeModuleList(*MB)); - for (auto &BitcodeMod : Mods) + for (auto &BitcodeMod : Mods) { Buffer.insert(Buffer.end(), BitcodeMod.getBuffer().begin(), BitcodeMod.getBuffer().end()); + Writer.copyStrtab(BitcodeMod.getStrtab()); + } } } else { + // The string table does not own strings added to it, some of which are + // owned by the modules; keep them alive until we write the string table. + std::vector<std::unique_ptr<Module>> OwnedMods; for (const auto &InputFilename : InputFilenames) { SMDiagnostic Err; std::unique_ptr<Module> M = parseIRFile(InputFilename, Err, Context); @@ -57,7 +62,9 @@ int main(int argc, char **argv) { return 1; } Writer.writeModule(M.get()); + OwnedMods.push_back(std::move(M)); } + Writer.writeStrtab(); } std::error_code EC; diff --git a/tools/llvm-modextract/llvm-modextract.cpp b/tools/llvm-modextract/llvm-modextract.cpp index 6c2e364be448c..58cede1374ea5 100644 --- a/tools/llvm-modextract/llvm-modextract.cpp +++ b/tools/llvm-modextract/llvm-modextract.cpp @@ -59,9 +59,12 @@ int main(int argc, char **argv) { ExitOnErr(errorCodeToError(EC)); if (BinaryExtract) { - SmallVector<char, 0> Header; - BitcodeWriter Writer(Header); - Out->os() << Header << Ms[ModuleIndex].getBuffer(); + SmallVector<char, 0> Result; + BitcodeWriter Writer(Result); + Result.append(Ms[ModuleIndex].getBuffer().begin(), + Ms[ModuleIndex].getBuffer().end()); + Writer.copyStrtab(Ms[ModuleIndex].getStrtab()); + Out->os() << Result; Out->keep(); return 0; } diff --git a/tools/llvm-shlib/CMakeLists.txt b/tools/llvm-shlib/CMakeLists.txt index c68a2b0e60eae..2781586862964 100644 --- a/tools/llvm-shlib/CMakeLists.txt +++ b/tools/llvm-shlib/CMakeLists.txt @@ -38,8 +38,12 @@ add_llvm_library(LLVM SHARED DISABLE_LLVM_LINK_LLVM_DYLIB SONAME ${SOURCES}) list(REMOVE_DUPLICATES LIB_NAMES) if(("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") OR (MINGW) OR ("${CMAKE_SYSTEM_NAME}" STREQUAL "FreeBSD") OR ("${CMAKE_SYSTEM_NAME}" STREQUAL "DragonFly")) # FIXME: It should be "GNU ld for elf" + configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/simple_version_script.map.in + ${LLVM_LIBRARY_DIR}/tools/llvm-shlib/simple_version_script.map) + # GNU ld doesn't resolve symbols in the version script. - set(LIB_NAMES -Wl,--whole-archive ${LIB_NAMES} -Wl,--no-whole-archive) + set(LIB_NAMES -Wl,--version-script,${LLVM_LIBRARY_DIR}/tools/llvm-shlib/simple_version_script.map -Wl,--whole-archive ${LIB_NAMES} -Wl,--no-whole-archive) elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "Darwin") set(LIB_NAMES -Wl,-all_load ${LIB_NAMES}) endif() diff --git a/tools/llvm-shlib/simple_version_script.map.in b/tools/llvm-shlib/simple_version_script.map.in new file mode 100644 index 0000000000000..e9515fe78625c --- /dev/null +++ b/tools/llvm-shlib/simple_version_script.map.in @@ -0,0 +1 @@ +LLVM_@LLVM_VERSION_MAJOR@.@LLVM_VERSION_MINOR@ { global: *; }; diff --git a/tools/llvm-xray/xray-extract.cc b/tools/llvm-xray/xray-extract.cc index 26e461869a083..d7015a05b0f29 100644 --- a/tools/llvm-xray/xray-extract.cc +++ b/tools/llvm-xray/xray-extract.cc @@ -16,6 +16,7 @@ #include <type_traits> #include <utility> +#include "func-id-helper.h" #include "xray-registry.h" #include "llvm/Object/ELF.h" #include "llvm/Object/ObjectFile.h" @@ -45,10 +46,18 @@ static cl::opt<std::string> static cl::alias ExtractOutput2("o", cl::aliasopt(ExtractOutput), cl::desc("Alias for -output"), cl::sub(Extract)); +static cl::opt<bool> ExtractSymbolize("symbolize", cl::value_desc("symbolize"), + cl::init(false), + cl::desc("symbolize functions"), + cl::sub(Extract)); +static cl::alias ExtractSymbolize2("s", cl::aliasopt(ExtractSymbolize), + cl::desc("alias for -symbolize"), + cl::sub(Extract)); namespace { -void exportAsYAML(const InstrumentationMap &Map, raw_ostream &OS) { +void exportAsYAML(const InstrumentationMap &Map, raw_ostream &OS, + FuncIdConversionHelper &FH) { // First we translate the sleds into the YAMLXRaySledEntry objects in a deque. std::vector<YAMLXRaySledEntry> YAMLSleds; auto Sleds = Map.sleds(); @@ -58,7 +67,8 @@ void exportAsYAML(const InstrumentationMap &Map, raw_ostream &OS) { if (!FuncId) return; YAMLSleds.push_back({*FuncId, Sled.Address, Sled.Function, Sled.Kind, - Sled.AlwaysInstrument}); + Sled.AlwaysInstrument, + ExtractSymbolize ? FH.SymbolOrNumber(*FuncId) : ""}); } Output Out(OS, nullptr, 0); Out << YAMLSleds; @@ -80,6 +90,13 @@ static CommandRegistration Unused(&Extract, []() -> Error { if (EC) return make_error<StringError>( Twine("Cannot open file '") + ExtractOutput + "' for writing.", EC); - exportAsYAML(*InstrumentationMapOrError, OS); + const auto &FunctionAddresses = + InstrumentationMapOrError->getFunctionAddresses(); + symbolize::LLVMSymbolizer::Options Opts( + symbolize::FunctionNameKind::LinkageName, true, true, false, ""); + symbolize::LLVMSymbolizer Symbolizer(Opts); + llvm::xray::FuncIdConversionHelper FuncIdHelper(ExtractInput, Symbolizer, + FunctionAddresses); + exportAsYAML(*InstrumentationMapOrError, OS, FuncIdHelper); return Error::success(); }); diff --git a/unittests/ADT/APIntTest.cpp b/unittests/ADT/APIntTest.cpp index 65481f5b2f220..5d3afe9a159f3 100644 --- a/unittests/ADT/APIntTest.cpp +++ b/unittests/ADT/APIntTest.cpp @@ -37,11 +37,6 @@ TEST(APIntTest, i64_ArithmeticRightShiftNegative) { EXPECT_EQ(neg_one, neg_one.ashr(7)); } -TEST(APIntTest, i64_LogicalRightShiftNegative) { - const APInt neg_one(128, static_cast<uint64_t>(-1), true); - EXPECT_EQ(0, neg_one.lshr(257)); -} - TEST(APIntTest, i128_NegativeCount) { APInt Minus3(128, static_cast<uint64_t>(-3), true); EXPECT_EQ(126u, Minus3.countLeadingOnes()); @@ -1606,36 +1601,6 @@ TEST(APIntTest, isShiftedMask) { } } -#if defined(__clang__) -// Disable the pragma warning from versions of Clang without -Wself-move -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunknown-pragmas" -// Disable the warning that triggers on exactly what is being tested. -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wself-move" -#endif -TEST(APIntTest, SelfMoveAssignment) { - APInt X(32, 0xdeadbeef); - X = std::move(X); - EXPECT_EQ(32u, X.getBitWidth()); - EXPECT_EQ(0xdeadbeefULL, X.getLimitedValue()); - - uint64_t Bits[] = {0xdeadbeefdeadbeefULL, 0xdeadbeefdeadbeefULL}; - APInt Y(128, Bits); - Y = std::move(Y); - EXPECT_EQ(128u, Y.getBitWidth()); - EXPECT_EQ(~0ULL, Y.getLimitedValue()); - const uint64_t *Raw = Y.getRawData(); - EXPECT_EQ(2u, Y.getNumWords()); - EXPECT_EQ(0xdeadbeefdeadbeefULL, Raw[0]); - EXPECT_EQ(0xdeadbeefdeadbeefULL, Raw[1]); -} -#if defined(__clang__) -#pragma clang diagnostic pop -#pragma clang diagnostic pop -#endif -} - TEST(APIntTest, reverseBits) { EXPECT_EQ(1, APInt(1, 1).reverseBits()); EXPECT_EQ(0, APInt(1, 0).reverseBits()); @@ -2025,3 +1990,100 @@ TEST(APIntTest, GCD) { APInt C = GreatestCommonDivisor(A, B); EXPECT_EQ(C, HugePrime); } + +TEST(APIntTest, LogicalRightShift) { + APInt i256(APInt::getHighBitsSet(256, 2)); + + i256.lshrInPlace(1); + EXPECT_EQ(1U, i256.countLeadingZeros()); + EXPECT_EQ(253U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256.lshrInPlace(62); + EXPECT_EQ(63U, i256.countLeadingZeros()); + EXPECT_EQ(191U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256.lshrInPlace(65); + EXPECT_EQ(128U, i256.countLeadingZeros()); + EXPECT_EQ(126U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256.lshrInPlace(64); + EXPECT_EQ(192U, i256.countLeadingZeros()); + EXPECT_EQ(62U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256.lshrInPlace(63); + EXPECT_EQ(255U, i256.countLeadingZeros()); + EXPECT_EQ(0U, i256.countTrailingZeros()); + EXPECT_EQ(1U, i256.countPopulation()); + + // Ensure we handle large shifts of multi-word. + const APInt neg_one(128, static_cast<uint64_t>(-1), true); + EXPECT_EQ(0, neg_one.lshr(128)); +} + +TEST(APIntTest, LeftShift) { + APInt i256(APInt::getLowBitsSet(256, 2)); + + i256 <<= 1; + EXPECT_EQ(253U, i256.countLeadingZeros()); + EXPECT_EQ(1U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256 <<= 62; + EXPECT_EQ(191U, i256.countLeadingZeros()); + EXPECT_EQ(63U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256 <<= 65; + EXPECT_EQ(126U, i256.countLeadingZeros()); + EXPECT_EQ(128U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256 <<= 64; + EXPECT_EQ(62U, i256.countLeadingZeros()); + EXPECT_EQ(192U, i256.countTrailingZeros()); + EXPECT_EQ(2U, i256.countPopulation()); + + i256 <<= 63; + EXPECT_EQ(0U, i256.countLeadingZeros()); + EXPECT_EQ(255U, i256.countTrailingZeros()); + EXPECT_EQ(1U, i256.countPopulation()); + + // Ensure we handle large shifts of multi-word. + const APInt neg_one(128, static_cast<uint64_t>(-1), true); + EXPECT_EQ(0, neg_one.shl(128)); +} + +TEST(APIntTest, isSubsetOf) { + APInt i32_1(32, 1); + APInt i32_2(32, 2); + APInt i32_3(32, 3); + EXPECT_FALSE(i32_3.isSubsetOf(i32_1)); + EXPECT_TRUE(i32_1.isSubsetOf(i32_3)); + EXPECT_FALSE(i32_2.isSubsetOf(i32_1)); + EXPECT_FALSE(i32_1.isSubsetOf(i32_2)); + EXPECT_TRUE(i32_3.isSubsetOf(i32_3)); + + APInt i128_1(128, 1); + APInt i128_2(128, 2); + APInt i128_3(128, 3); + EXPECT_FALSE(i128_3.isSubsetOf(i128_1)); + EXPECT_TRUE(i128_1.isSubsetOf(i128_3)); + EXPECT_FALSE(i128_2.isSubsetOf(i128_1)); + EXPECT_FALSE(i128_1.isSubsetOf(i128_2)); + EXPECT_TRUE(i128_3.isSubsetOf(i128_3)); + + i128_1 <<= 64; + i128_2 <<= 64; + i128_3 <<= 64; + EXPECT_FALSE(i128_3.isSubsetOf(i128_1)); + EXPECT_TRUE(i128_1.isSubsetOf(i128_3)); + EXPECT_FALSE(i128_2.isSubsetOf(i128_1)); + EXPECT_FALSE(i128_1.isSubsetOf(i128_2)); + EXPECT_TRUE(i128_3.isSubsetOf(i128_3)); +} + +} // end anonymous namespace diff --git a/unittests/ADT/BitVectorTest.cpp b/unittests/ADT/BitVectorTest.cpp index 98ef66735ad23..71b6be36c3bd8 100644 --- a/unittests/ADT/BitVectorTest.cpp +++ b/unittests/ADT/BitVectorTest.cpp @@ -345,6 +345,128 @@ TYPED_TEST(BitVectorTest, BinOps) { EXPECT_FALSE(B.anyCommon(A)); } +typedef std::vector<std::pair<int, int>> RangeList; + +template <typename VecType> +static inline VecType createBitVector(uint32_t Size, + const RangeList &setRanges) { + VecType V; + V.resize(Size); + for (auto &R : setRanges) + V.set(R.first, R.second); + return V; +} + +TYPED_TEST(BitVectorTest, ShiftOpsSingleWord) { + // Test that shift ops work when the desired shift amount is less + // than one word. + + // 1. Case where the number of bits in the BitVector also fit into a single + // word. + TypeParam A = createBitVector<TypeParam>(12, {{2, 4}, {8, 10}}); + TypeParam B = A; + + EXPECT_EQ(4U, A.count()); + EXPECT_TRUE(A.test(2)); + EXPECT_TRUE(A.test(3)); + EXPECT_TRUE(A.test(8)); + EXPECT_TRUE(A.test(9)); + + A >>= 1; + EXPECT_EQ(createBitVector<TypeParam>(12, {{1, 3}, {7, 9}}), A); + + A <<= 1; + EXPECT_EQ(B, A); + + A >>= 10; + EXPECT_EQ(createBitVector<TypeParam>(12, {}), A); + + A = B; + A <<= 10; + EXPECT_EQ(createBitVector<TypeParam>(12, {}), A); + + // 2. Case where the number of bits in the BitVector do not fit into a single + // word. + + // 31----------------------------------------------------------------------0 + // XXXXXXXX XXXXXXXX XXXXXXXX 00000111 | 11111110 00000000 00001111 11111111 + A = createBitVector<TypeParam>(40, {{0, 12}, {25, 35}}); + EXPECT_EQ(40U, A.size()); + EXPECT_EQ(22U, A.count()); + + // 2a. Make sure that left shifting some 1 bits out of the vector works. + // 31----------------------------------------------------------------------0 + // Before: + // XXXXXXXX XXXXXXXX XXXXXXXX 00000111 | 11111110 00000000 00001111 11111111 + // After: + // XXXXXXXX XXXXXXXX XXXXXXXX 11111100 | 00000000 00011111 11111110 00000000 + A <<= 9; + EXPECT_EQ(createBitVector<TypeParam>(40, {{9, 21}, {34, 40}}), A); + + // 2b. Make sure that keeping the number of one bits unchanged works. + // 31----------------------------------------------------------------------0 + // Before: + // XXXXXXXX XXXXXXXX XXXXXXXX 11111100 | 00000000 00011111 11111110 00000000 + // After: + // XXXXXXXX XXXXXXXX XXXXXXXX 00000011 | 11110000 00000000 01111111 11111000 + A >>= 6; + EXPECT_EQ(createBitVector<TypeParam>(40, {{3, 15}, {28, 34}}), A); + + // 2c. Make sure that right shifting some 1 bits out of the vector works. + // 31----------------------------------------------------------------------0 + // Before: + // XXXXXXXX XXXXXXXX XXXXXXXX 00000011 | 11110000 00000000 01111111 11111000 + // After: + // XXXXXXXX XXXXXXXX XXXXXXXX 00000000 | 00000000 11111100 00000000 00011111 + A >>= 10; + EXPECT_EQ(createBitVector<TypeParam>(40, {{0, 5}, {18, 24}}), A); + + // 3. Big test. + A = createBitVector<TypeParam>(300, {{1, 30}, {60, 95}, {200, 275}}); + A <<= 29; + EXPECT_EQ(createBitVector<TypeParam>( + 300, {{1 + 29, 30 + 29}, {60 + 29, 95 + 29}, {200 + 29, 300}}), + A); +} + +TYPED_TEST(BitVectorTest, ShiftOpsMultiWord) { + // Test that shift ops work when the desired shift amount is greater than or + // equal to the size of a single word. + auto A = createBitVector<TypeParam>(300, {{1, 30}, {60, 95}, {200, 275}}); + + // Make a copy so we can re-use it later. + auto B = A; + + // 1. Shift left by an exact multiple of the word size. This should invoke + // only a memmove and no per-word bit operations. + A <<= 64; + auto Expected = createBitVector<TypeParam>( + 300, {{1 + 64, 30 + 64}, {60 + 64, 95 + 64}, {200 + 64, 300}}); + EXPECT_EQ(Expected, A); + + // 2. Shift left by a non multiple of the word size. This should invoke both + // a memmove and per-word bit operations. + A = B; + A <<= 93; + EXPECT_EQ(createBitVector<TypeParam>( + 300, {{1 + 93, 30 + 93}, {60 + 93, 95 + 93}, {200 + 93, 300}}), + A); + + // 1. Shift right by an exact multiple of the word size. This should invoke + // only a memmove and no per-word bit operations. + A = B; + A >>= 64; + EXPECT_EQ( + createBitVector<TypeParam>(300, {{0, 95 - 64}, {200 - 64, 275 - 64}}), A); + + // 2. Shift left by a non multiple of the word size. This should invoke both + // a memmove and per-word bit operations. + A = B; + A >>= 93; + EXPECT_EQ( + createBitVector<TypeParam>(300, {{0, 95 - 93}, {200 - 93, 275 - 93}}), A); +} + TYPED_TEST(BitVectorTest, RangeOps) { TypeParam A; A.resize(256); diff --git a/unittests/Analysis/ScalarEvolutionTest.cpp b/unittests/Analysis/ScalarEvolutionTest.cpp index df9fd4b5ec330..5736957359578 100644 --- a/unittests/Analysis/ScalarEvolutionTest.cpp +++ b/unittests/Analysis/ScalarEvolutionTest.cpp @@ -666,5 +666,95 @@ TEST_F(ScalarEvolutionsTest, SCEVNormalization) { }); } +// Expect the call of getZeroExtendExpr will not cost exponential time. +TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) { + LLVMContext C; + SMDiagnostic Err; + + // Generate a function like below: + // define void @foo() { + // entry: + // br label %for.cond + // + // for.cond: + // %0 = phi i64 [ 100, %entry ], [ %dec, %for.inc ] + // %cmp = icmp sgt i64 %0, 90 + // br i1 %cmp, label %for.inc, label %for.cond1 + // + // for.inc: + // %dec = add nsw i64 %0, -1 + // br label %for.cond + // + // for.cond1: + // %1 = phi i64 [ 100, %for.cond ], [ %dec5, %for.inc2 ] + // %cmp3 = icmp sgt i64 %1, 90 + // br i1 %cmp3, label %for.inc2, label %for.cond4 + // + // for.inc2: + // %dec5 = add nsw i64 %1, -1 + // br label %for.cond1 + // + // ...... + // + // for.cond89: + // %19 = phi i64 [ 100, %for.cond84 ], [ %dec94, %for.inc92 ] + // %cmp93 = icmp sgt i64 %19, 90 + // br i1 %cmp93, label %for.inc92, label %for.end + // + // for.inc92: + // %dec94 = add nsw i64 %19, -1 + // br label %for.cond89 + // + // for.end: + // %gep = getelementptr i8, i8* null, i64 %dec + // %gep6 = getelementptr i8, i8* %gep, i64 %dec5 + // ...... + // %gep95 = getelementptr i8, i8* %gep91, i64 %dec94 + // ret void + // } + FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), {}, false); + Function *F = cast<Function>(M.getOrInsertFunction("foo", FTy)); + + BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); + BasicBlock *CondBB = BasicBlock::Create(Context, "for.cond", F); + BasicBlock *EndBB = BasicBlock::Create(Context, "for.end", F); + BranchInst::Create(CondBB, EntryBB); + BasicBlock *PrevBB = EntryBB; + + Type *I64Ty = Type::getInt64Ty(Context); + Type *I8Ty = Type::getInt8Ty(Context); + Type *I8PtrTy = Type::getInt8PtrTy(Context); + Value *Accum = Constant::getNullValue(I8PtrTy); + int Iters = 20; + for (int i = 0; i < Iters; i++) { + BasicBlock *IncBB = BasicBlock::Create(Context, "for.inc", F, EndBB); + auto *PN = PHINode::Create(I64Ty, 2, "", CondBB); + PN->addIncoming(ConstantInt::get(Context, APInt(64, 100)), PrevBB); + auto *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_SGT, PN, + ConstantInt::get(Context, APInt(64, 90)), "cmp", + CondBB); + BasicBlock *NextBB; + if (i != Iters - 1) + NextBB = BasicBlock::Create(Context, "for.cond", F, EndBB); + else + NextBB = EndBB; + BranchInst::Create(IncBB, NextBB, Cmp, CondBB); + auto *Dec = BinaryOperator::CreateNSWAdd( + PN, ConstantInt::get(Context, APInt(64, -1)), "dec", IncBB); + PN->addIncoming(Dec, IncBB); + BranchInst::Create(CondBB, IncBB); + + Accum = GetElementPtrInst::Create(I8Ty, Accum, Dec, "gep", EndBB); + + PrevBB = CondBB; + CondBB = NextBB; + } + ReturnInst::Create(Context, nullptr, EndBB); + ScalarEvolution SE = buildSE(*F); + const SCEV *S = SE.getSCEV(Accum); + Type *I128Ty = Type::getInt128Ty(Context); + SE.getZeroExtendExpr(S, I128Ty); +} + } // end anonymous namespace } // end namespace llvm diff --git a/unittests/CodeGen/CMakeLists.txt b/unittests/CodeGen/CMakeLists.txt index 240734dc6b18f..e944f6c9e3b99 100644 --- a/unittests/CodeGen/CMakeLists.txt +++ b/unittests/CodeGen/CMakeLists.txt @@ -9,6 +9,7 @@ set(CodeGenSources DIEHashTest.cpp LowLevelTypeTest.cpp MachineInstrBundleIteratorTest.cpp + ScalableVectorMVTsTest.cpp ) add_llvm_unittest(CodeGenTests diff --git a/unittests/CodeGen/LowLevelTypeTest.cpp b/unittests/CodeGen/LowLevelTypeTest.cpp index 67113005a46a1..428d6b93f790b 100644 --- a/unittests/CodeGen/LowLevelTypeTest.cpp +++ b/unittests/CodeGen/LowLevelTypeTest.cpp @@ -171,6 +171,7 @@ TEST(LowLevelTypeTest, Pointer) { for (unsigned AS : {0U, 1U, 127U, 0xffffU}) { const LLT Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS)); + const LLT VTy = LLT::vector(4, Ty); // Test kind. ASSERT_TRUE(Ty.isValid()); @@ -179,16 +180,26 @@ TEST(LowLevelTypeTest, Pointer) { ASSERT_FALSE(Ty.isScalar()); ASSERT_FALSE(Ty.isVector()); + ASSERT_TRUE(VTy.isValid()); + ASSERT_TRUE(VTy.isVector()); + ASSERT_TRUE(VTy.getElementType().isPointer()); + // Test addressspace. EXPECT_EQ(AS, Ty.getAddressSpace()); + EXPECT_EQ(AS, VTy.getElementType().getAddressSpace()); // Test equality operators. EXPECT_TRUE(Ty == Ty); EXPECT_FALSE(Ty != Ty); + EXPECT_TRUE(VTy == VTy); + EXPECT_FALSE(VTy != VTy); // Test Type->LLT conversion. Type *IRTy = PointerType::get(IntegerType::get(C, 8), AS); EXPECT_EQ(Ty, getLLTForType(*IRTy, DL)); + Type *IRVTy = + VectorType::get(PointerType::get(IntegerType::get(C, 8), AS), 4); + EXPECT_EQ(VTy, getLLTForType(*IRVTy, DL)); } } diff --git a/unittests/CodeGen/ScalableVectorMVTsTest.cpp b/unittests/CodeGen/ScalableVectorMVTsTest.cpp new file mode 100644 index 0000000000000..0071823f2cc92 --- /dev/null +++ b/unittests/CodeGen/ScalableVectorMVTsTest.cpp @@ -0,0 +1,88 @@ +//===-------- llvm/unittest/CodeGen/ScalableVectorMVTsTest.cpp ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/MachineValueType.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/IR/LLVMContext.h" +#include "gtest/gtest.h" + +using namespace llvm; + +namespace { + +TEST(ScalableVectorMVTsTest, IntegerMVTs) { + for (auto VecTy : MVT::integer_scalable_vector_valuetypes()) { + ASSERT_TRUE(VecTy.isValid()); + ASSERT_TRUE(VecTy.isInteger()); + ASSERT_TRUE(VecTy.isVector()); + ASSERT_TRUE(VecTy.isScalableVector()); + ASSERT_TRUE(VecTy.getScalarType().isValid()); + + ASSERT_FALSE(VecTy.isFloatingPoint()); + } +} + +TEST(ScalableVectorMVTsTest, FloatMVTs) { + for (auto VecTy : MVT::fp_scalable_vector_valuetypes()) { + ASSERT_TRUE(VecTy.isValid()); + ASSERT_TRUE(VecTy.isFloatingPoint()); + ASSERT_TRUE(VecTy.isVector()); + ASSERT_TRUE(VecTy.isScalableVector()); + ASSERT_TRUE(VecTy.getScalarType().isValid()); + + ASSERT_FALSE(VecTy.isInteger()); + } +} + +TEST(ScalableVectorMVTsTest, HelperFuncs) { + LLVMContext Ctx; + + // Create with scalable flag + EVT Vnx4i32 = EVT::getVectorVT(Ctx, MVT::i32, 4, /*Scalable=*/true); + ASSERT_TRUE(Vnx4i32.isScalableVector()); + + // Create with separate MVT::ElementCount + auto EltCnt = MVT::ElementCount(2, true); + EVT Vnx2i32 = EVT::getVectorVT(Ctx, MVT::i32, EltCnt); + ASSERT_TRUE(Vnx2i32.isScalableVector()); + + // Create with inline MVT::ElementCount + EVT Vnx2i64 = EVT::getVectorVT(Ctx, MVT::i64, {2, true}); + ASSERT_TRUE(Vnx2i64.isScalableVector()); + + // Check that changing scalar types/element count works + EXPECT_EQ(Vnx2i32.widenIntegerVectorElementType(Ctx), Vnx2i64); + EXPECT_EQ(Vnx4i32.getHalfNumVectorElementsVT(Ctx), Vnx2i32); + + // Check that overloaded '*' and '/' operators work + EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt * 2), MVT::nxv4i64); + EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt / 2), MVT::nxv1i64); + + // Check that float->int conversion works + EVT Vnx2f64 = EVT::getVectorVT(Ctx, MVT::f64, {2, true}); + EXPECT_EQ(Vnx2f64.changeTypeToInteger(), Vnx2i64); + + // Check fields inside MVT::ElementCount + EltCnt = Vnx4i32.getVectorElementCount(); + EXPECT_EQ(EltCnt.Min, 4U); + ASSERT_TRUE(EltCnt.Scalable); + + // Check that fixed-length vector types aren't scalable. + EVT V8i32 = EVT::getVectorVT(Ctx, MVT::i32, 8); + ASSERT_FALSE(V8i32.isScalableVector()); + EVT V4f64 = EVT::getVectorVT(Ctx, MVT::f64, {4, false}); + ASSERT_FALSE(V4f64.isScalableVector()); + + // Check that MVT::ElementCount works for fixed-length types. + EltCnt = V8i32.getVectorElementCount(); + EXPECT_EQ(EltCnt.Min, 8U); + ASSERT_FALSE(EltCnt.Scalable); +} + +} diff --git a/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp b/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp index a6c5b3a34ccb7..2078e3a96a843 100644 --- a/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp +++ b/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp @@ -170,7 +170,8 @@ void TestAllForms() { CUDie.addAttribute(Attr_DW_FORM_ref8, DW_FORM_ref8, Data8); const auto Attr_DW_FORM_ref_sig8 = static_cast<dwarf::Attribute>(Attr++); - CUDie.addAttribute(Attr_DW_FORM_ref_sig8, DW_FORM_ref_sig8, Data8_2); + if (Version >= 4) + CUDie.addAttribute(Attr_DW_FORM_ref_sig8, DW_FORM_ref_sig8, Data8_2); const auto Attr_DW_FORM_ref_udata = static_cast<dwarf::Attribute>(Attr++); CUDie.addAttribute(Attr_DW_FORM_ref_udata, DW_FORM_ref_udata, UData[0]); @@ -185,7 +186,8 @@ void TestAllForms() { CUDie.addAttribute(Attr_DW_FORM_flag_false, DW_FORM_flag, false); const auto Attr_DW_FORM_flag_present = static_cast<dwarf::Attribute>(Attr++); - CUDie.addAttribute(Attr_DW_FORM_flag_present, DW_FORM_flag_present); + if (Version >= 4) + CUDie.addAttribute(Attr_DW_FORM_flag_present, DW_FORM_flag_present); //---------------------------------------------------------------------- // Test SLEB128 based forms @@ -213,8 +215,9 @@ void TestAllForms() { Dwarf32Values[0]); const auto Attr_DW_FORM_sec_offset = static_cast<dwarf::Attribute>(Attr++); - CUDie.addAttribute(Attr_DW_FORM_sec_offset, DW_FORM_sec_offset, - Dwarf32Values[1]); + if (Version >= 4) + CUDie.addAttribute(Attr_DW_FORM_sec_offset, DW_FORM_sec_offset, + Dwarf32Values[1]); //---------------------------------------------------------------------- // Add an address at the end to make sure we can decode this value @@ -307,7 +310,8 @@ void TestAllForms() { EXPECT_EQ(Data2, toReference(DieDG.find(Attr_DW_FORM_ref2), 0)); EXPECT_EQ(Data4, toReference(DieDG.find(Attr_DW_FORM_ref4), 0)); EXPECT_EQ(Data8, toReference(DieDG.find(Attr_DW_FORM_ref8), 0)); - EXPECT_EQ(Data8_2, toReference(DieDG.find(Attr_DW_FORM_ref_sig8), 0)); + if (Version >= 4) + EXPECT_EQ(Data8_2, toReference(DieDG.find(Attr_DW_FORM_ref_sig8), 0)); EXPECT_EQ(UData[0], toReference(DieDG.find(Attr_DW_FORM_ref_udata), 0)); //---------------------------------------------------------------------- @@ -315,7 +319,8 @@ void TestAllForms() { //---------------------------------------------------------------------- EXPECT_EQ(1ULL, toUnsigned(DieDG.find(Attr_DW_FORM_flag_true), 0)); EXPECT_EQ(0ULL, toUnsigned(DieDG.find(Attr_DW_FORM_flag_false), 1)); - EXPECT_EQ(1ULL, toUnsigned(DieDG.find(Attr_DW_FORM_flag_present), 0)); + if (Version >= 4) + EXPECT_EQ(1ULL, toUnsigned(DieDG.find(Attr_DW_FORM_flag_present), 0)); //---------------------------------------------------------------------- // Test SLEB128 based forms @@ -334,8 +339,9 @@ void TestAllForms() { //---------------------------------------------------------------------- EXPECT_EQ(Dwarf32Values[0], toReference(DieDG.find(Attr_DW_FORM_GNU_ref_alt), 0)); - EXPECT_EQ(Dwarf32Values[1], - toSectionOffset(DieDG.find(Attr_DW_FORM_sec_offset), 0)); + if (Version >= 4) + EXPECT_EQ(Dwarf32Values[1], + toSectionOffset(DieDG.find(Attr_DW_FORM_sec_offset), 0)); //---------------------------------------------------------------------- // Add an address at the end to make sure we can decode this value diff --git a/unittests/IR/AttributesTest.cpp b/unittests/IR/AttributesTest.cpp index b5b221c63a173..7c3df2e19e8f9 100644 --- a/unittests/IR/AttributesTest.cpp +++ b/unittests/IR/AttributesTest.cpp @@ -49,4 +49,18 @@ TEST(Attributes, Ordering) { EXPECT_NE(SetA, SetB); } +TEST(Attributes, AddAttributes) { + LLVMContext C; + AttributeList AL; + AttrBuilder B; + B.addAttribute(Attribute::NoReturn); + AL = AL.addAttributes(C, AttributeList::FunctionIndex, AttributeSet::get(C, B)); + EXPECT_TRUE(AL.hasFnAttribute(Attribute::NoReturn)); + B.clear(); + B.addAttribute(Attribute::SExt); + AL = AL.addAttributes(C, AttributeList::ReturnIndex, B); + EXPECT_TRUE(AL.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)); + EXPECT_TRUE(AL.hasFnAttribute(Attribute::NoReturn)); +} + } // end anonymous namespace diff --git a/unittests/IR/ConstantRangeTest.cpp b/unittests/IR/ConstantRangeTest.cpp index 58fd04448e2e9..b22f82154f403 100644 --- a/unittests/IR/ConstantRangeTest.cpp +++ b/unittests/IR/ConstantRangeTest.cpp @@ -670,14 +670,14 @@ TEST(ConstantRange, MakeGuaranteedNoWrapRegion) { for (APInt I = NUWRegion.getLower(), E = NUWRegion.getUpper(); I != E; ++I) { bool Overflow = false; - I.uadd_ov(C, Overflow); + (void)I.uadd_ov(C, Overflow); EXPECT_FALSE(Overflow); } for (APInt I = NSWRegion.getLower(), E = NSWRegion.getUpper(); I != E; ++I) { bool Overflow = false; - I.sadd_ov(C, Overflow); + (void)I.sadd_ov(C, Overflow); EXPECT_FALSE(Overflow); } @@ -685,10 +685,10 @@ TEST(ConstantRange, MakeGuaranteedNoWrapRegion) { ++I) { bool Overflow = false; - I.sadd_ov(C, Overflow); + (void)I.sadd_ov(C, Overflow); EXPECT_FALSE(Overflow); - I.uadd_ov(C, Overflow); + (void)I.uadd_ov(C, Overflow); EXPECT_FALSE(Overflow); } } diff --git a/unittests/Support/MathExtrasTest.cpp b/unittests/Support/MathExtrasTest.cpp index b2c3779788744..f46d94e9e577b 100644 --- a/unittests/Support/MathExtrasTest.cpp +++ b/unittests/Support/MathExtrasTest.cpp @@ -66,6 +66,31 @@ TEST(MathExtras, countLeadingZeros) { } } +TEST(MathExtras, onesMask) { + EXPECT_EQ(0U, maskLeadingOnes<uint8_t>(0)); + EXPECT_EQ(0U, maskTrailingOnes<uint8_t>(0)); + EXPECT_EQ(0U, maskLeadingOnes<uint16_t>(0)); + EXPECT_EQ(0U, maskTrailingOnes<uint16_t>(0)); + EXPECT_EQ(0U, maskLeadingOnes<uint32_t>(0)); + EXPECT_EQ(0U, maskTrailingOnes<uint32_t>(0)); + EXPECT_EQ(0U, maskLeadingOnes<uint64_t>(0)); + EXPECT_EQ(0U, maskTrailingOnes<uint64_t>(0)); + + EXPECT_EQ(0x00000003U, maskTrailingOnes<uint32_t>(2U)); + EXPECT_EQ(0xC0000000U, maskLeadingOnes<uint32_t>(2U)); + + EXPECT_EQ(0x000007FFU, maskTrailingOnes<uint32_t>(11U)); + EXPECT_EQ(0xFFE00000U, maskLeadingOnes<uint32_t>(11U)); + + EXPECT_EQ(0xFFFFFFFFU, maskTrailingOnes<uint32_t>(32U)); + EXPECT_EQ(0xFFFFFFFFU, maskLeadingOnes<uint32_t>(32U)); + EXPECT_EQ(0xFFFFFFFFFFFFFFFFULL, maskTrailingOnes<uint64_t>(64U)); + EXPECT_EQ(0xFFFFFFFFFFFFFFFFULL, maskLeadingOnes<uint64_t>(64U)); + + EXPECT_EQ(0x0000FFFFFFFFFFFFULL, maskTrailingOnes<uint64_t>(48U)); + EXPECT_EQ(0xFFFFFFFFFFFF0000ULL, maskLeadingOnes<uint64_t>(48U)); +} + TEST(MathExtras, findFirstSet) { uint8_t Z8 = 0; uint16_t Z16 = 0; diff --git a/unittests/Support/TargetParserTest.cpp b/unittests/Support/TargetParserTest.cpp index f0bfe7dbde964..9465f479fe8c1 100644 --- a/unittests/Support/TargetParserTest.cpp +++ b/unittests/Support/TargetParserTest.cpp @@ -149,8 +149,8 @@ TEST(TargetParserTest, testARMCPU) { EXPECT_TRUE(testARMCPU("cortex-a5", "armv7-a", "neon-vfpv4", ARM::AEK_MP | ARM::AEK_SEC | ARM::AEK_DSP, "7-A")); EXPECT_TRUE(testARMCPU("cortex-a7", "armv7-a", "neon-vfpv4", - ARM::AEK_HWDIV | ARM::AEK_HWDIVARM | ARM::AEK_MP | - ARM::AEK_SEC | ARM::AEK_VIRT | ARM::AEK_DSP, + ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM | ARM::AEK_MP | + ARM::AEK_SEC | ARM::AEK_VIRT | ARM::AEK_DSP, "7-A")); EXPECT_TRUE(testARMCPU("cortex-a8", "armv7-a", "neon", ARM::AEK_SEC | ARM::AEK_DSP, "7-A")); @@ -158,104 +158,111 @@ TEST(TargetParserTest, testARMCPU) { ARM::AEK_MP | ARM::AEK_SEC | ARM::AEK_DSP, "7-A")); EXPECT_TRUE(testARMCPU("cortex-a12", "armv7-a", "neon-vfpv4", ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | + ARM::AEK_DSP, "7-A")); EXPECT_TRUE(testARMCPU("cortex-a15", "armv7-a", "neon-vfpv4", ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | + ARM::AEK_DSP, "7-A")); EXPECT_TRUE(testARMCPU("cortex-a17", "armv7-a", "neon-vfpv4", ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | + ARM::AEK_DSP, "7-A")); EXPECT_TRUE(testARMCPU("krait", "armv7-a", "neon-vfpv4", - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "7-A")); EXPECT_TRUE(testARMCPU("cortex-r4", "armv7-r", "none", - ARM::AEK_HWDIV | ARM::AEK_DSP, "7-R")); + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "7-R")); EXPECT_TRUE(testARMCPU("cortex-r4f", "armv7-r", "vfpv3-d16", - ARM::AEK_HWDIV | ARM::AEK_DSP, "7-R")); + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "7-R")); EXPECT_TRUE(testARMCPU("cortex-r5", "armv7-r", "vfpv3-d16", - ARM::AEK_MP | ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | - ARM::AEK_DSP, "7-R")); + ARM::AEK_MP | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | + ARM::AEK_DSP, + "7-R")); EXPECT_TRUE(testARMCPU("cortex-r7", "armv7-r", "vfpv3-d16-fp16", - ARM::AEK_MP | ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | - ARM::AEK_DSP, "7-R")); + ARM::AEK_MP | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | + ARM::AEK_DSP, + "7-R")); EXPECT_TRUE(testARMCPU("cortex-r8", "armv7-r", "vfpv3-d16-fp16", - ARM::AEK_MP | ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | - ARM::AEK_DSP, "7-R")); + ARM::AEK_MP | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | + ARM::AEK_DSP, + "7-R")); EXPECT_TRUE(testARMCPU("cortex-r52", "armv8-r", "neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_MP | ARM::AEK_VIRT | - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | + ARM::AEK_DSP, "8-R")); - EXPECT_TRUE(testARMCPU("sc300", "armv7-m", "none", - ARM::AEK_HWDIV, "7-M")); - EXPECT_TRUE(testARMCPU("cortex-m3", "armv7-m", "none", - ARM::AEK_HWDIV, "7-M")); + EXPECT_TRUE( + testARMCPU("sc300", "armv7-m", "none", ARM::AEK_HWDIVTHUMB, "7-M")); + EXPECT_TRUE( + testARMCPU("cortex-m3", "armv7-m", "none", ARM::AEK_HWDIVTHUMB, "7-M")); EXPECT_TRUE(testARMCPU("cortex-m4", "armv7e-m", "fpv4-sp-d16", - ARM::AEK_HWDIV | ARM::AEK_DSP, "7E-M")); + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "7E-M")); EXPECT_TRUE(testARMCPU("cortex-m7", "armv7e-m", "fpv5-d16", - ARM::AEK_HWDIV | ARM::AEK_DSP, "7E-M")); + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "7E-M")); EXPECT_TRUE(testARMCPU("cortex-a32", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("cortex-a35", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("cortex-a53", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("cortex-a57", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("cortex-a72", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("cortex-a73", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("cyclone", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("exynos-m1", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("exynos-m2", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("exynos-m3", "armv8-a", "crypto-neon-fp-armv8", ARM::AEK_CRC | ARM::AEK_SEC | ARM::AEK_MP | - ARM::AEK_VIRT | ARM::AEK_HWDIVARM | - ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_VIRT | ARM::AEK_HWDIVARM | + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-A")); EXPECT_TRUE(testARMCPU("cortex-m23", "armv8-m.base", "none", - ARM::AEK_HWDIV, "8-M.Baseline")); + ARM::AEK_HWDIVTHUMB, "8-M.Baseline")); EXPECT_TRUE(testARMCPU("cortex-m33", "armv8-m.main", "fpv5-sp-d16", - ARM::AEK_HWDIV | ARM::AEK_DSP, "8-M.Mainline")); + ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "8-M.Mainline")); EXPECT_TRUE(testARMCPU("iwmmxt", "iwmmxt", "none", ARM::AEK_NONE, "iwmmxt")); EXPECT_TRUE(testARMCPU("xscale", "xscale", "none", ARM::AEK_NONE, "xscale")); EXPECT_TRUE(testARMCPU("swift", "armv7s", "neon-vfpv4", - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP, + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP, "7-S")); } @@ -454,7 +461,7 @@ TEST(TargetParserTest, ARMFPURestriction) { TEST(TargetParserTest, ARMExtensionFeatures) { std::vector<StringRef> Features; unsigned Extensions = ARM::AEK_CRC | ARM::AEK_CRYPTO | ARM::AEK_DSP | - ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_MP | + ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_MP | ARM::AEK_SEC | ARM::AEK_VIRT | ARM::AEK_RAS; for (unsigned i = 0; i <= Extensions; i++) diff --git a/unittests/Transforms/Utils/Cloning.cpp b/unittests/Transforms/Utils/Cloning.cpp index 403c9c06c18a2..2f4ee8636530d 100644 --- a/unittests/Transforms/Utils/Cloning.cpp +++ b/unittests/Transforms/Utils/Cloning.cpp @@ -162,10 +162,8 @@ TEST_F(CloneInstruction, Attributes) { Function *F2 = Function::Create(FT1, Function::ExternalLinkage); - Attribute::AttrKind AK[] = { Attribute::NoCapture }; - AttributeList AS = AttributeList::get(context, 0, AK); Argument *A = &*F1->arg_begin(); - A->addAttr(AS); + A->addAttr(Attribute::NoCapture); SmallVector<ReturnInst*, 4> Returns; ValueToValueMapTy VMap; diff --git a/utils/TableGen/CodeGenTarget.cpp b/utils/TableGen/CodeGenTarget.cpp index d93511b0d8733..03c58ac09c2df 100644 --- a/utils/TableGen/CodeGenTarget.cpp +++ b/utils/TableGen/CodeGenTarget.cpp @@ -126,6 +126,45 @@ StringRef llvm::getEnumName(MVT::SimpleValueType T) { case MVT::v2f64: return "MVT::v2f64"; case MVT::v4f64: return "MVT::v4f64"; case MVT::v8f64: return "MVT::v8f64"; + case MVT::nxv2i1: return "MVT::nxv2i1"; + case MVT::nxv4i1: return "MVT::nxv4i1"; + case MVT::nxv8i1: return "MVT::nxv8i1"; + case MVT::nxv16i1: return "MVT::nxv16i1"; + case MVT::nxv32i1: return "MVT::nxv32i1"; + case MVT::nxv1i8: return "MVT::nxv1i8"; + case MVT::nxv2i8: return "MVT::nxv2i8"; + case MVT::nxv4i8: return "MVT::nxv4i8"; + case MVT::nxv8i8: return "MVT::nxv8i8"; + case MVT::nxv16i8: return "MVT::nxv16i8"; + case MVT::nxv32i8: return "MVT::nxv32i8"; + case MVT::nxv1i16: return "MVT::nxv1i16"; + case MVT::nxv2i16: return "MVT::nxv2i16"; + case MVT::nxv4i16: return "MVT::nxv4i16"; + case MVT::nxv8i16: return "MVT::nxv8i16"; + case MVT::nxv16i16: return "MVT::nxv16i16"; + case MVT::nxv32i16: return "MVT::nxv32i16"; + case MVT::nxv1i32: return "MVT::nxv1i32"; + case MVT::nxv2i32: return "MVT::nxv2i32"; + case MVT::nxv4i32: return "MVT::nxv4i32"; + case MVT::nxv8i32: return "MVT::nxv8i32"; + case MVT::nxv16i32: return "MVT::nxv16i32"; + case MVT::nxv1i64: return "MVT::nxv1i64"; + case MVT::nxv2i64: return "MVT::nxv2i64"; + case MVT::nxv4i64: return "MVT::nxv4i64"; + case MVT::nxv8i64: return "MVT::nxv8i64"; + case MVT::nxv16i64: return "MVT::nxv16i64"; + case MVT::nxv2f16: return "MVT::nxv2f16"; + case MVT::nxv4f16: return "MVT::nxv4f16"; + case MVT::nxv8f16: return "MVT::nxv8f16"; + case MVT::nxv1f32: return "MVT::nxv1f32"; + case MVT::nxv2f32: return "MVT::nxv2f32"; + case MVT::nxv4f32: return "MVT::nxv4f32"; + case MVT::nxv8f32: return "MVT::nxv8f32"; + case MVT::nxv16f32: return "MVT::nxv16f32"; + case MVT::nxv1f64: return "MVT::nxv1f64"; + case MVT::nxv2f64: return "MVT::nxv2f64"; + case MVT::nxv4f64: return "MVT::nxv4f64"; + case MVT::nxv8f64: return "MVT::nxv8f64"; case MVT::token: return "MVT::token"; case MVT::Metadata: return "MVT::Metadata"; case MVT::iPTR: return "MVT::iPTR"; diff --git a/utils/TableGen/IntrinsicEmitter.cpp b/utils/TableGen/IntrinsicEmitter.cpp index e9dd2fa0aca00..e979b94e46d6e 100644 --- a/utils/TableGen/IntrinsicEmitter.cpp +++ b/utils/TableGen/IntrinsicEmitter.cpp @@ -84,14 +84,11 @@ void IntrinsicEmitter::run(raw_ostream &OS) { // Emit the intrinsic parameter attributes. EmitAttributes(Ints, OS); - // Individual targets don't need GCC builtin name mappings. - if (!TargetOnly) { - // Emit code to translate GCC builtins into LLVM intrinsics. - EmitIntrinsicToBuiltinMap(Ints, true, OS); + // Emit code to translate GCC builtins into LLVM intrinsics. + EmitIntrinsicToBuiltinMap(Ints, true, OS); - // Emit code to translate MS builtins into LLVM intrinsics. - EmitIntrinsicToBuiltinMap(Ints, false, OS); - } + // Emit code to translate MS builtins into LLVM intrinsics. + EmitIntrinsicToBuiltinMap(Ints, false, OS); EmitSuffix(OS); } @@ -756,6 +753,17 @@ void IntrinsicEmitter::EmitIntrinsicToBuiltinMap( << "Builtin(const char " << "*TargetPrefixStr, StringRef BuiltinNameStr) {\n"; } + + if (Table.Empty()) { + OS << " return "; + if (!TargetPrefix.empty()) + OS << "(" << TargetPrefix << "Intrinsic::ID)"; + OS << "Intrinsic::not_intrinsic;\n"; + OS << "}\n"; + OS << "#endif\n\n"; + return; + } + OS << " static const char BuiltinNames[] = {\n"; Table.EmitCharArray(OS); OS << " };\n\n"; |