summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/BasicAA/cs-cs-arm.ll34
-rw-r--r--test/Analysis/BasicAA/cs-cs.ll37
-rw-r--r--test/Analysis/BasicAA/intrinsics-arm.ll31
-rw-r--r--test/Analysis/BasicAA/intrinsics.ll34
-rw-r--r--test/Analysis/BranchProbabilityInfo/basic.ll6
-rw-r--r--test/Analysis/CostModel/AArch64/free-widening-casts.ll622
-rw-r--r--test/Analysis/CostModel/AMDGPU/extractelement.ll74
-rw-r--r--test/Analysis/CostModel/AMDGPU/insertelement.ll43
-rw-r--r--test/Analysis/CostModel/AMDGPU/shufflevector.ll43
-rw-r--r--test/Analysis/CostModel/X86/div.ll32
-rw-r--r--test/Analysis/CostModel/X86/vshift-ashr-cost.ll138
-rw-r--r--test/Analysis/CostModel/X86/vshift-lshr-cost.ll128
-rw-r--r--test/Analysis/CostModel/X86/vshift-shl-cost.ll134
-rw-r--r--test/Analysis/ScalarEvolution/different-loops-recs.ll454
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/intrinsics.ll18
-rw-r--r--test/Assembler/globalvariable-attributes.ll19
-rw-r--r--test/Bitcode/globalvariable-attributes.ll19
-rw-r--r--test/Bitcode/ptest-old.ll1
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll121
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir96
-rw-r--r--test/CodeGen/AArch64/GlobalISel/call-translator.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-fml-combines.ll24
-rw-r--r--test/CodeGen/AArch64/arm64-hello.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-misched-multimmo.ll2
-rw-r--r--test/CodeGen/AArch64/macho-global-symbols.ll17
-rw-r--r--test/CodeGen/AArch64/misched-fusion-aes.ll33
-rw-r--r--test/CodeGen/AArch64/stackmap-frame-setup.ll4
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir2
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir2
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir20
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/lit.local.cfg2
-rw-r--r--test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir70
-rw-r--r--test/CodeGen/AMDGPU/constant-fold-mi-operands.ll12
-rw-r--r--test/CodeGen/AMDGPU/ctpop.ll80
-rw-r--r--test/CodeGen/AMDGPU/ctpop64.ll16
-rw-r--r--test/CodeGen/AMDGPU/fneg-combines.ll9
-rw-r--r--test/CodeGen/AMDGPU/fneg.f16.ll39
-rw-r--r--test/CodeGen/AMDGPU/inserted-wait-states.mir10
-rw-r--r--test/CodeGen/AMDGPU/limit-coalesce.mir6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll18
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll4
-rw-r--r--test/CodeGen/AMDGPU/madak.ll6
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-volatile.ll12
-rw-r--r--test/CodeGen/AMDGPU/v_madak_f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/waitcnt.mir22
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir200
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll16
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir30
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir136
-rw-r--r--test/CodeGen/ARM/divmod-eabi.ll73
-rw-r--r--test/CodeGen/ARM/divmod.ll1
-rw-r--r--test/CodeGen/AVR/select-mbb-placement-bug.ll35
-rw-r--r--test/CodeGen/Generic/expand-experimental-reductions.ll210
-rw-r--r--test/CodeGen/Hexagon/regalloc-bad-undef.mir8
-rw-r--r--test/CodeGen/Lanai/masking_setccs.ll48
-rw-r--r--test/CodeGen/Lanai/peephole-compare.mir4
-rw-r--r--test/CodeGen/MIR/ARM/PR32721_ifcvt_triangle_unanalyzable.mir24
-rw-r--r--test/CodeGen/MIR/ARM/ifcvt_canFallThroughTo.mir64
-rw-r--r--test/CodeGen/MIR/X86/frame-info-save-restore-points.mir2
-rw-r--r--test/CodeGen/MSP430/hwmult16.ll43
-rw-r--r--test/CodeGen/MSP430/hwmult32.ll43
-rw-r--r--test/CodeGen/MSP430/hwmultf5.ll43
-rw-r--r--test/CodeGen/MSP430/jumptable.ll2
-rw-r--r--test/CodeGen/MSP430/libcalls.ll595
-rw-r--r--test/CodeGen/MSP430/promote-i8-mul.ll (renamed from test/CodeGen/MSP430/2009-11-05-8BitLibcalls.ll)2
-rw-r--r--test/CodeGen/NVPTX/bug17709.ll52
-rw-r--r--test/CodeGen/NVPTX/ctlz.ll2
-rw-r--r--test/CodeGen/NVPTX/ctpop.ll2
-rw-r--r--test/CodeGen/NVPTX/cttz.ll3
-rw-r--r--test/CodeGen/NVPTX/f16-instructions.ll2157
-rw-r--r--test/CodeGen/NVPTX/f16x2-instructions.ll2853
-rw-r--r--test/CodeGen/NVPTX/fma.ll84
-rw-r--r--test/CodeGen/NVPTX/i8-param.ll46
-rw-r--r--test/CodeGen/NVPTX/param-load-store.ll1878
-rw-r--r--test/CodeGen/NVPTX/sched1.ll4
-rw-r--r--test/CodeGen/NVPTX/sched2.ll4
-rw-r--r--test/CodeGen/NVPTX/simple-call.ll52
-rw-r--r--test/CodeGen/NVPTX/vec8.ll2
-rw-r--r--test/CodeGen/NVPTX/vector-call.ll60
-rw-r--r--test/CodeGen/NVPTX/zeroext-32bit.ll52
-rw-r--r--test/CodeGen/PowerPC/mtvsrdd.ll22
-rw-r--r--test/CodeGen/PowerPC/setcc-logic.ll12
-rw-r--r--test/CodeGen/PowerPC/stackmap-frame-setup.ll4
-rw-r--r--test/CodeGen/PowerPC/tail-dup-layout.ll97
-rw-r--r--test/CodeGen/PowerPC/testComparesieqsc.ll138
-rw-r--r--test/CodeGen/PowerPC/testComparesieqsi.ll138
-rw-r--r--test/CodeGen/PowerPC/testComparesieqss.ll138
-rw-r--r--test/CodeGen/PowerPC/testComparesiequc.ll138
-rw-r--r--test/CodeGen/PowerPC/testComparesiequi.ll138
-rw-r--r--test/CodeGen/PowerPC/testComparesiequs.ll138
-rw-r--r--test/CodeGen/PowerPC/testCompareslleqsc.ll138
-rw-r--r--test/CodeGen/PowerPC/testCompareslleqsi.ll138
-rw-r--r--test/CodeGen/PowerPC/testCompareslleqss.ll137
-rw-r--r--test/CodeGen/PowerPC/testComparesllequc.ll137
-rw-r--r--test/CodeGen/PowerPC/testComparesllequi.ll137
-rw-r--r--test/CodeGen/PowerPC/testComparesllequs.ll137
-rw-r--r--test/CodeGen/SPARC/LeonItinerariesUT.ll4
-rw-r--r--test/CodeGen/SPARC/inlineasm-v9.ll30
-rw-r--r--test/CodeGen/SPARC/inlineasm.ll18
-rw-r--r--test/CodeGen/SystemZ/list-ilp-crash.ll23
-rw-r--r--test/CodeGen/SystemZ/lower-copy-undef-src.mir14
-rw-r--r--test/CodeGen/Thumb2/v8_IT_5.ll2
-rw-r--r--test/CodeGen/X86/2007-01-08-InstrSched.ll4
-rw-r--r--test/CodeGen/X86/2010-01-18-DbgValue.ll13
-rw-r--r--test/CodeGen/X86/2012-11-30-handlemove-dbg.ll51
-rw-r--r--test/CodeGen/X86/2012-11-30-misched-dbg.ll142
-rw-r--r--test/CodeGen/X86/2012-11-30-regpres-dbg.ll47
-rw-r--r--test/CodeGen/X86/GlobalISel/add-scalar.ll44
-rw-r--r--test/CodeGen/X86/GlobalISel/binop.ll42
-rw-r--r--test/CodeGen/X86/GlobalISel/br.ll19
-rw-r--r--test/CodeGen/X86/GlobalISel/cmp.ll159
-rw-r--r--test/CodeGen/X86/GlobalISel/ext-x86-64.ll14
-rw-r--r--test/CodeGen/X86/GlobalISel/ext.ll18
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-cmp.mir179
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir64
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-ext.mir64
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll (renamed from test/CodeGen/X86/GlobalISel/memop-x32.ll)0
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar.ll (renamed from test/CodeGen/X86/GlobalISel/memop.ll)64
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-vec.ll39
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir125
-rw-r--r--test/CodeGen/X86/GlobalISel/select-br.mir39
-rw-r--r--test/CodeGen/X86/GlobalISel/select-cmp.mir563
-rw-r--r--test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir38
-rw-r--r--test/CodeGen/X86/GlobalISel/select-ext.mir33
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir (renamed from test/CodeGen/X86/GlobalISel/select-memop-x32.mir)0
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-scalar.mir (renamed from test/CodeGen/X86/GlobalISel/select-memop.mir)137
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-v128.mir143
-rw-r--r--test/CodeGen/X86/O0-pipeline.ll67
-rw-r--r--test/CodeGen/X86/all-ones-vector.ll112
-rw-r--r--test/CodeGen/X86/avg.ll833
-rw-r--r--test/CodeGen/X86/avx-basic.ll8
-rw-r--r--test/CodeGen/X86/avx-cvt-3.ll22
-rw-r--r--test/CodeGen/X86/avx-intrinsics-fast-isel.ll60
-rw-r--r--test/CodeGen/X86/avx-schedule.ll50
-rw-r--r--test/CodeGen/X86/avx.ll2
-rw-r--r--test/CodeGen/X86/avx512-cmp-kor-sequence.ll6
-rw-r--r--test/CodeGen/X86/avx512-gather-scatter-intrin.ll10
-rw-r--r--test/CodeGen/X86/avx512-intrinsics-upgrade.ll44
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll215
-rw-r--r--test/CodeGen/X86/avx512-mask-spills.ll40
-rw-r--r--test/CodeGen/X86/avx512-scalar_mask.ll107
-rw-r--r--test/CodeGen/X86/avx512-vselect.ll61
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll12
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll24
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll2
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics.ll2
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll2
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics.ll4
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll10
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics.ll4
-rw-r--r--test/CodeGen/X86/avx512er-intrinsics.ll48
-rw-r--r--test/CodeGen/X86/avx512ifma-intrinsics.ll8
-rw-r--r--test/CodeGen/X86/avx512ifmavl-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll64
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics.ll28
-rw-r--r--test/CodeGen/X86/bmi.ll76
-rw-r--r--test/CodeGen/X86/bswap_tree2.ll35
-rw-r--r--test/CodeGen/X86/cast-vsel.ll37
-rw-r--r--test/CodeGen/X86/combine-abs.ll11
-rw-r--r--test/CodeGen/X86/combine-shl.ll3
-rw-r--r--test/CodeGen/X86/combine-srl.ll22
-rw-r--r--test/CodeGen/X86/constructor.ll5
-rw-r--r--test/CodeGen/X86/dbg-baseptr.ll62
-rw-r--r--test/CodeGen/X86/elf-associated.ll5
-rw-r--r--test/CodeGen/X86/fold-tied-op.ll7
-rw-r--r--test/CodeGen/X86/fp128-i128.ll2
-rw-r--r--test/CodeGen/X86/haddsub-2.ll12
-rw-r--r--test/CodeGen/X86/known-signbits-vector.ll61
-rw-r--r--test/CodeGen/X86/leaFixup32.mir508
-rw-r--r--test/CodeGen/X86/leaFixup64.mir1041
-rw-r--r--test/CodeGen/X86/lrshrink.ll57
-rw-r--r--test/CodeGen/X86/madd.ll34
-rw-r--r--test/CodeGen/X86/masked_gather_scatter.ll2
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-128.ll16
-rw-r--r--test/CodeGen/X86/misched-matrix.ll4
-rw-r--r--test/CodeGen/X86/not-and-simplify.ll28
-rw-r--r--test/CodeGen/X86/oddshuffles.ll34
-rw-r--r--test/CodeGen/X86/packss.ll11
-rw-r--r--test/CodeGen/X86/pmul.ll55
-rw-r--r--test/CodeGen/X86/pr28129.ll32
-rw-r--r--test/CodeGen/X86/pr29112.ll8
-rw-r--r--test/CodeGen/X86/pr30562.ll1
-rw-r--r--test/CodeGen/X86/pr31088.ll2
-rw-r--r--test/CodeGen/X86/pr32284.ll71
-rw-r--r--test/CodeGen/X86/pr32907.ll53
-rw-r--r--test/CodeGen/X86/replace_unsupported_masked_mem_intrin.ll37
-rw-r--r--test/CodeGen/X86/rotate.ll16
-rw-r--r--test/CodeGen/X86/sad.ll929
-rw-r--r--test/CodeGen/X86/select.ll28
-rw-r--r--test/CodeGen/X86/setcc-wide-types.ll56
-rw-r--r--test/CodeGen/X86/shrink_vmul_sse.ll2
-rw-r--r--test/CodeGen/X86/shuffle-of-splat-multiuses.ll34
-rw-r--r--test/CodeGen/X86/sse-intrinsics-fast-isel.ll10
-rw-r--r--test/CodeGen/X86/sse1.ll20
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub-2.ll14
-rw-r--r--test/CodeGen/X86/sse41.ll8
-rw-r--r--test/CodeGen/X86/stackmap-frame-setup.ll4
-rw-r--r--test/CodeGen/X86/vec_int_to_fp.ll84
-rw-r--r--test/CodeGen/X86/vec_set-2.ll31
-rw-r--r--test/CodeGen/X86/vec_set-3.ll45
-rw-r--r--test/CodeGen/X86/vec_set-4.ll38
-rw-r--r--test/CodeGen/X86/vec_set-6.ll23
-rw-r--r--test/CodeGen/X86/vec_set-7.ll18
-rw-r--r--test/CodeGen/X86/vec_set-8.ll16
-rw-r--r--test/CodeGen/X86/vec_set-A.ll19
-rw-r--r--test/CodeGen/X86/vec_set-B.ll40
-rw-r--r--test/CodeGen/X86/vec_set-C.ll10
-rw-r--r--test/CodeGen/X86/vec_set.ll63
-rw-r--r--test/CodeGen/X86/vector-bitreverse.ll6
-rw-r--r--test/CodeGen/X86/vector-blend.ll4
-rw-r--r--test/CodeGen/X86/vector-lzcnt-128.ll380
-rw-r--r--test/CodeGen/X86/vector-lzcnt-256.ll536
-rw-r--r--test/CodeGen/X86/vector-narrow-binop.ll111
-rw-r--r--test/CodeGen/X86/vector-pcmp.ll27
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-256.ll580
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-256.ll434
-rw-r--r--test/CodeGen/X86/vector-shift-shl-256.ll377
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v32.ll356
-rw-r--r--test/CodeGen/X86/vector-sqrt.ll8
-rw-r--r--test/CodeGen/X86/viabs.ll107
-rw-r--r--test/CodeGen/X86/vselect-pcmp.ll12
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll14
-rw-r--r--test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll26
-rw-r--r--test/CodeGen/X86/x86-no_caller_saved_registers.ll62
-rw-r--r--test/CodeGen/X86/x86-shrink-wrapping.ll53
-rw-r--r--test/CodeGen/X86/xop-intrinsics-fast-isel.ll8
-rw-r--r--test/DebugInfo/COFF/local-variables.ll5
-rw-r--r--test/DebugInfo/COFF/no-cus.ll25
-rw-r--r--test/DebugInfo/Inputs/typeunit-header.elf-x86-64bin0 -> 840 bytes
-rw-r--r--test/DebugInfo/Inputs/typeunit-header.s49
-rw-r--r--test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test4
-rw-r--r--test/DebugInfo/X86/dbg-declare-inalloca.ll199
-rw-r--r--test/DebugInfo/X86/split-dwarf-cross-unit-reference.ll198
-rw-r--r--test/DebugInfo/typeunit-header.test15
-rw-r--r--test/Feature/intrinsic-noduplicate.ll1
-rw-r--r--test/Instrumentation/MemorySanitizer/msan_basic.ll64
-rw-r--r--test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll68
-rw-r--r--test/Instrumentation/MemorySanitizer/pr32842.ll20
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_arith.ll1
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_cmp.ll1
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_cvt.ll1
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_pack.ll1
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_shift.ll1
-rw-r--r--test/LTO/Resolution/X86/ifunc.ll15
-rw-r--r--test/MC/AArch64/directive-cpu-err.s9
-rw-r--r--test/MC/AArch64/label-arithmetic-diags-elf.s9
-rw-r--r--test/MC/AMDGPU/flat.s66
-rw-r--r--test/MC/AMDGPU/literal16.s8
-rw-r--r--test/MC/AMDGPU/vop2.s38
-rw-r--r--test/MC/AMDGPU/vop3-convert.s14
-rw-r--r--test/MC/AsmParser/altmacro_string_escape.s29
-rw-r--r--test/MC/Disassembler/AMDGPU/flat_vi.txt24
-rw-r--r--test/MC/Disassembler/AMDGPU/literal16_vi.txt6
-rw-r--r--test/MC/Disassembler/AMDGPU/vop2_vi.txt30
-rw-r--r--test/MC/Disassembler/AMDGPU/vop3_vi.txt18
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding-p9vector.txt4
-rw-r--r--test/MC/Disassembler/SystemZ/insns-z13.txt4056
-rw-r--r--test/MC/Disassembler/SystemZ/insns.txt6511
-rw-r--r--test/MC/SystemZ/insn-bad-z13.s792
-rw-r--r--test/MC/SystemZ/insn-bad-z196.s53
-rw-r--r--test/MC/SystemZ/insn-bad-zEC12.s511
-rw-r--r--test/MC/SystemZ/insn-bad.s2264
-rw-r--r--test/MC/SystemZ/insn-good-z13.s1710
-rw-r--r--test/MC/SystemZ/insn-good-z196.s158
-rw-r--r--test/MC/SystemZ/insn-good-zEC12.s16
-rw-r--r--test/MC/SystemZ/insn-good.s2131
-rw-r--r--test/Object/Inputs/COFF/empty-drectve.yaml14
-rw-r--r--test/Object/X86/archive-symbol-table.s19
-rw-r--r--test/Object/X86/nm-ir.ll2
-rw-r--r--test/Object/coff-empty-drectve.test3
-rw-r--r--test/Object/invalid.test4
-rw-r--r--test/Object/wasm-invalid-start.test10
-rw-r--r--test/ObjectYAML/wasm/export_section.yaml28
-rw-r--r--test/ObjectYAML/wasm/function_section.yaml4
-rw-r--r--test/ObjectYAML/wasm/import_section.yaml45
-rw-r--r--test/ObjectYAML/wasm/start_section.yaml9
-rw-r--r--test/TableGen/AsmVariant.td2
-rw-r--r--test/TableGen/RegisterEncoder.td35
-rw-r--r--test/Transforms/CodeExtractor/ExtractedFnEntryCount.ll2
-rw-r--r--test/Transforms/CodeExtractor/MultipleExitBranchProb.ll2
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineAnd.ll4
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineEntryUpdate.ll41
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineHighCost.ll107
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineOr.ll4
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineOrAnd.ll4
-rw-r--r--test/Transforms/CodeExtractor/SingleCondition.ll4
-rw-r--r--test/Transforms/CodeExtractor/X86/InheritTargetAttributes.ll4
-rw-r--r--test/Transforms/CodeGenPrepare/section-samplepgo.ll57
-rw-r--r--test/Transforms/CodeGenPrepare/section.ll20
-rw-r--r--test/Transforms/ConstProp/calls-math-finite.ll83
-rw-r--r--test/Transforms/ConstProp/calls.ll206
-rw-r--r--test/Transforms/ConstProp/sse.ll208
-rw-r--r--test/Transforms/Coroutines/coro-eh-aware-edge-split.ll218
-rw-r--r--test/Transforms/GVN/PRE/2011-06-01-NonLocalMemdepMiscompile.ll7
-rw-r--r--test/Transforms/GVN/PRE/nonintegral.ll39
-rw-r--r--test/Transforms/IndVarSimplify/2011-10-27-lftrnull.ll2
-rw-r--r--test/Transforms/InferFunctionAttrs/annotate.ll126
-rw-r--r--test/Transforms/InferFunctionAttrs/no-proto.ll126
-rw-r--r--test/Transforms/Inline/inline-cold.ll20
-rw-r--r--test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll2
-rw-r--r--test/Transforms/Inline/partial-inline-act.ll2
-rw-r--r--test/Transforms/Inline/prof-update.ll35
-rw-r--r--test/Transforms/InstCombine/AArch64/2012-04-23-Neon-Intrinsics.ll (renamed from test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll)68
-rw-r--r--test/Transforms/InstCombine/AArch64/lit.local.cfg2
-rw-r--r--test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll (renamed from test/Transforms/InstCombine/amdgcn-intrinsics.ll)0
-rw-r--r--test/Transforms/InstCombine/AMDGPU/lit.local.cfg2
-rw-r--r--test/Transforms/InstCombine/ARM/2012-04-23-Neon-Intrinsics.ll65
-rw-r--r--test/Transforms/InstCombine/ARM/constant-fold-hang.ll (renamed from test/Transforms/InstCombine/constant-fold-hang.ll)0
-rw-r--r--test/Transforms/InstCombine/ARM/lit.local.cfg2
-rw-r--r--test/Transforms/InstCombine/ARM/neon-intrinsics.ll (renamed from test/Transforms/InstCombine/neon-intrinsics.ll)0
-rw-r--r--test/Transforms/InstCombine/PowerPC/aligned-altivec.ll (renamed from test/Transforms/InstCombine/aligned-altivec.ll)0
-rw-r--r--test/Transforms/InstCombine/PowerPC/aligned-qpx.ll (renamed from test/Transforms/InstCombine/aligned-qpx.ll)0
-rw-r--r--test/Transforms/InstCombine/PowerPC/lit.local.cfg3
-rw-r--r--test/Transforms/InstCombine/PowerPC/vsx-unaligned.ll (renamed from test/Transforms/InstCombine/vsx-unaligned.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/X86FsubCmpCombine.ll (renamed from test/Transforms/InstCombine/X86FsubCmpCombine.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/blend_x86.ll (renamed from test/Transforms/InstCombine/blend_x86.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/lit.local.cfg2
-rw-r--r--test/Transforms/InstCombine/X86/pr2645-1.ll (renamed from test/Transforms/InstCombine/pr2645-1.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/shufflemask-undef.ll (renamed from test/Transforms/InstCombine/shufflemask-undef.ll)3
-rw-r--r--test/Transforms/InstCombine/X86/x86-avx2.ll (renamed from test/Transforms/InstCombine/x86-avx2.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-avx512.ll (renamed from test/Transforms/InstCombine/x86-avx512.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-crc32-demanded.ll (renamed from test/Transforms/InstCombine/x86-crc32-demanded.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-f16c.ll (renamed from test/Transforms/InstCombine/x86-f16c.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-fma.ll (renamed from test/Transforms/InstCombine/x86-fma.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-insertps.ll (renamed from test/Transforms/InstCombine/x86-insertps.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-masked-memops.ll (renamed from test/Transforms/InstCombine/x86-masked-memops.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-movmsk.ll (renamed from test/Transforms/InstCombine/x86-movmsk.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-muldq.ll (renamed from test/Transforms/InstCombine/x86-muldq.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-pack.ll (renamed from test/Transforms/InstCombine/x86-pack.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-pshufb.ll (renamed from test/Transforms/InstCombine/x86-pshufb.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-sse.ll (renamed from test/Transforms/InstCombine/x86-sse.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-sse2.ll (renamed from test/Transforms/InstCombine/x86-sse2.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-sse41.ll (renamed from test/Transforms/InstCombine/x86-sse41.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-sse4a.ll (renamed from test/Transforms/InstCombine/x86-sse4a.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-vec_demanded_elts.ll110
-rw-r--r--test/Transforms/InstCombine/X86/x86-vector-shifts.ll (renamed from test/Transforms/InstCombine/x86-vector-shifts.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-vperm2.ll (renamed from test/Transforms/InstCombine/x86-vperm2.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-vpermil.ll (renamed from test/Transforms/InstCombine/x86-vpermil.ll)0
-rw-r--r--test/Transforms/InstCombine/X86/x86-xop.ll (renamed from test/Transforms/InstCombine/x86-xop.ll)0
-rw-r--r--test/Transforms/InstCombine/add.ll26
-rw-r--r--test/Transforms/InstCombine/and.ll2
-rw-r--r--test/Transforms/InstCombine/bit-tracking.ll26
-rw-r--r--test/Transforms/InstCombine/cast.ll38
-rw-r--r--test/Transforms/InstCombine/constant-fold-iteration.ll10
-rw-r--r--test/Transforms/InstCombine/demorgan.ll8
-rw-r--r--test/Transforms/InstCombine/icmp.ll15
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll29
-rw-r--r--test/Transforms/InstCombine/logical-select.ll75
-rw-r--r--test/Transforms/InstCombine/not.ll76
-rw-r--r--test/Transforms/InstCombine/or-xor.ll70
-rw-r--r--test/Transforms/InstCombine/or.ll109
-rw-r--r--test/Transforms/InstCombine/sext.ll2
-rw-r--r--test/Transforms/InstCombine/trunc.ll2
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll108
-rw-r--r--test/Transforms/InstCombine/xor2.ll11
-rw-r--r--test/Transforms/InstNamer/basic.ll19
-rw-r--r--test/Transforms/InstSimplify/AndOrXor.ll173
-rw-r--r--test/Transforms/InstSimplify/apint-or.ll72
-rw-r--r--test/Transforms/InstSimplify/compare.ll7
-rw-r--r--test/Transforms/InstSimplify/or.ll181
-rw-r--r--test/Transforms/LoopIdiom/ARM/ctlz.ll185
-rw-r--r--test/Transforms/LoopIdiom/X86/ctlz.ll185
-rw-r--r--test/Transforms/LoopUnroll/not-rotated.ll2
-rw-r--r--test/Transforms/LoopVectorize/X86/svml-calls-finite.ll187
-rw-r--r--test/Transforms/LoopVectorize/induction.ll45
-rw-r--r--test/Transforms/LoopVectorize/pr32859.ll30
-rw-r--r--test/Transforms/NewGVN/pr32934.ll69
-rw-r--r--test/Transforms/NewGVN/pr32952.ll42
-rw-r--r--test/Transforms/NewGVN/verify-memoryphi.ll29
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll22
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/getelementptr.ll43
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/horizontal.ll33
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/remarks.ll32
-rw-r--r--test/Transforms/SLPVectorizer/X86/arith-add.ll649
-rw-r--r--test/Transforms/SLPVectorizer/X86/arith-mul.ll700
-rw-r--r--test/Transforms/SLPVectorizer/X86/arith-sub.ll649
-rw-r--r--test/Transforms/SLPVectorizer/X86/shift-ashr.ll913
-rw-r--r--test/Transforms/SLPVectorizer/X86/shift-lshr.ll862
-rw-r--r--test/Transforms/SLPVectorizer/X86/shift-shl.ll814
-rw-r--r--test/Transforms/SimpleLoopUnswitch/trivial-unswitch.ll199
-rw-r--r--test/Transforms/SpeculativeExecution/spec-other.ll32
-rw-r--r--test/Transforms/SpeculativeExecution/spec-vector.ll73
-rw-r--r--test/Transforms/Util/split-bit-piece.ll110
-rw-r--r--test/Verifier/metadata-function-dbg.ll16
-rw-r--r--test/tools/llvm-pdbdump/Inputs/FilterTest.cpp18
-rw-r--r--test/tools/llvm-pdbdump/Inputs/FilterTest.pdbbin44032 -> 44032 bytes
-rw-r--r--test/tools/llvm-pdbdump/regex-filter.test8
-rw-r--r--test/tools/llvm-pdbdump/symbol-filters.test74
-rw-r--r--test/tools/llvm-profdata/sample-profile-basic.test7
-rw-r--r--test/tools/llvm-readobj/wasm-invalid.test7
393 files changed, 37982 insertions, 14067 deletions
diff --git a/test/Analysis/BasicAA/cs-cs-arm.ll b/test/Analysis/BasicAA/cs-cs-arm.ll
new file mode 100644
index 000000000000..1580af9ea826
--- /dev/null
+++ b/test/Analysis/BasicAA/cs-cs-arm.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
+; REQUIRES: arm
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
+target triple = "arm-apple-ios"
+
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
+declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
+
+define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
+entry:
+ %q = getelementptr i8, i8* %p, i64 16
+ %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
+ call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+ %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
+ %c = add <8 x i16> %a, %b
+ ret <8 x i16> %c
+
+; CHECK-LABEL: Function: test1:
+
+; CHECK: NoAlias: i8* %p, i8* %q
+; CHECK: Just Ref: Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: Both ModRef: Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: Just Ref: Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: Ptr: i8* %q <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
+; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+}
diff --git a/test/Analysis/BasicAA/cs-cs.ll b/test/Analysis/BasicAA/cs-cs.ll
index 0f74dbd92bbd..870794c25165 100644
--- a/test/Analysis/BasicAA/cs-cs.ll
+++ b/test/Analysis/BasicAA/cs-cs.ll
@@ -2,41 +2,12 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "arm-apple-ios"
-declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
-declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
-
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
declare void @a_readonly_func(i8 *) noinline nounwind readonly
declare void @a_writeonly_func(i8 *) noinline nounwind writeonly
-define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
-entry:
- %q = getelementptr i8, i8* %p, i64 16
- %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
- call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
- %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
- %c = add <8 x i16> %a, %b
- ret <8 x i16> %c
-
-; CHECK-LABEL: Function: test1:
-
-; CHECK: NoAlias: i8* %p, i8* %q
-; CHECK: Just Ref: Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
-; CHECK: Both ModRef: Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
-; CHECK: Just Ref: Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: Ptr: i8* %q <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
-; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16)
-; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) #{{[0-9]+}} <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
-}
-
define void @test2(i8* %P, i8* %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
@@ -247,9 +218,9 @@ define void @test7(i8* %P) nounwind ssp {
; CHECK: Just Ref: call void @a_readonly_func(i8* %P) <-> call void @a_writeonly_func(i8* %P)
}
-declare void @an_inaccessiblememonly_func() nounwind inaccessiblememonly
-declare void @an_inaccessibleorargmemonly_func(i8 *) nounwind inaccessiblemem_or_argmemonly
-declare void @an_argmemonly_func(i8 *) nounwind argmemonly
+declare void @an_inaccessiblememonly_func() nounwind inaccessiblememonly
+declare void @an_inaccessibleorargmemonly_func(i8 *) nounwind inaccessiblemem_or_argmemonly
+declare void @an_argmemonly_func(i8 *) nounwind argmemonly
define void @test8(i8* %p) {
entry:
@@ -260,7 +231,7 @@ entry:
call void @an_inaccessiblememonly_func()
call void @an_inaccessibleorargmemonly_func(i8* %q)
call void @an_argmemonly_func(i8* %q)
- ret void
+ ret void
; CHECK-LABEL: Function: test8
; CHECK: NoModRef: Ptr: i8* %p <-> call void @an_inaccessiblememonly_func()
diff --git a/test/Analysis/BasicAA/intrinsics-arm.ll b/test/Analysis/BasicAA/intrinsics-arm.ll
new file mode 100644
index 000000000000..e15ce1c65c64
--- /dev/null
+++ b/test/Analysis/BasicAA/intrinsics-arm.ll
@@ -0,0 +1,31 @@
+; RUN: opt -basicaa -gvn -S < %s | FileCheck %s
+; REQUIRES: arm
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
+
+; BasicAA should prove that these calls don't interfere, since we've
+; specifically special cased exactly these two intrinsics in
+; MemoryLocation::getForArgument.
+
+; CHECK: define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %q = getelementptr i8, i8* %p, i64 16
+; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[ATTR:#[0-9]+]]
+; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK-NEXT: %c = add <8 x i16> %a, %a
+define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
+entry:
+ %q = getelementptr i8, i8* %p, i64 16
+ %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
+ call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+ %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
+ %c = add <8 x i16> %a, %b
+ ret <8 x i16> %c
+}
+
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
+declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
+
+; CHECK: attributes #0 = { argmemonly nounwind readonly }
+; CHECK: attributes #1 = { argmemonly nounwind }
+; CHECK: attributes [[ATTR]] = { nounwind }
diff --git a/test/Analysis/BasicAA/intrinsics.ll b/test/Analysis/BasicAA/intrinsics.ll
index 526a039ef7ac..68e59862bcc1 100644
--- a/test/Analysis/BasicAA/intrinsics.ll
+++ b/test/Analysis/BasicAA/intrinsics.ll
@@ -5,38 +5,22 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
; BasicAA should prove that these calls don't interfere, since they are
; IntrArgReadMem and have noalias pointers.
-; CHECK: define <8 x i16> @test0(i8* noalias %p, i8* noalias %q, <8 x i16> %y) {
+; CHECK: define <8 x i16> @test0(<8 x i16>* noalias %p, <8 x i16>* noalias %q, <8 x i16> %y, <8 x i1> %m, <8 x i16> %pt) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[ATTR:#[0-9]+]]
-; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK-NEXT: %a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %p, i32 16, <8 x i1> %m, <8 x i16> %pt) [[ATTR:#[0-9]+]]
+; CHECK-NEXT: call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %y, <8 x i16>* %q, i32 16, <8 x i1> %m)
; CHECK-NEXT: %c = add <8 x i16> %a, %a
-define <8 x i16> @test0(i8* noalias %p, i8* noalias %q, <8 x i16> %y) {
+define <8 x i16> @test0(<8 x i16>* noalias %p, <8 x i16>* noalias %q, <8 x i16> %y, <8 x i1> %m, <8 x i16> %pt) {
entry:
- %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
- call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
- %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
+ %a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %p, i32 16, <8 x i1> %m, <8 x i16> %pt) nounwind
+ call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %y, <8 x i16>* %q, i32 16, <8 x i1> %m)
+ %b = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %p, i32 16, <8 x i1> %m, <8 x i16> %pt) nounwind
%c = add <8 x i16> %a, %b
ret <8 x i16> %c
}
-; CHECK: define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %q = getelementptr i8, i8* %p, i64 16
-; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[ATTR]]
-; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
-; CHECK-NEXT: %c = add <8 x i16> %a, %a
-define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
-entry:
- %q = getelementptr i8, i8* %p, i64 16
- %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
- call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
- %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
- %c = add <8 x i16> %a, %b
- ret <8 x i16> %c
-}
-
-declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
-declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
+declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) nounwind readonly
+declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>) nounwind
; CHECK: attributes #0 = { argmemonly nounwind readonly }
; CHECK: attributes #1 = { argmemonly nounwind }
diff --git a/test/Analysis/BranchProbabilityInfo/basic.ll b/test/Analysis/BranchProbabilityInfo/basic.ll
index 84936b7761ca..7bee1bd57373 100644
--- a/test/Analysis/BranchProbabilityInfo/basic.ll
+++ b/test/Analysis/BranchProbabilityInfo/basic.ll
@@ -452,7 +452,7 @@ entry:
i32 3, label %case_d
i32 4, label %case_e ], !prof !8
; CHECK: edge entry -> case_a probability is 0x00000800 / 0x80000000 = 0.00%
-; CHECK: edge entry -> case_b probability is 0x07fffe01 / 0x80000000 = 6.25%
+; CHECK: edge entry -> case_b probability is 0x07fffdff / 0x80000000 = 6.25%
; CHECK: edge entry -> case_c probability is 0x67fffdff / 0x80000000 = 81.25% [HOT edge]
; CHECK: edge entry -> case_d probability is 0x07fffdff / 0x80000000 = 6.25%
; CHECK: edge entry -> case_e probability is 0x07fffdff / 0x80000000 = 6.25%
@@ -495,7 +495,7 @@ entry:
i32 4, label %case_e ], !prof !9
; CHECK: edge entry -> case_a probability is 0x00000400 / 0x80000000 = 0.00%
; CHECK: edge entry -> case_b probability is 0x00000400 / 0x80000000 = 0.00%
-; CHECK: edge entry -> case_c probability is 0x6aaaa800 / 0x80000000 = 83.33% [HOT edge]
+; CHECK: edge entry -> case_c probability is 0x6aaaa7ff / 0x80000000 = 83.33% [HOT edge]
; CHECK: edge entry -> case_d probability is 0x0aaaa7ff / 0x80000000 = 8.33%
; CHECK: edge entry -> case_e probability is 0x0aaaa7ff / 0x80000000 = 8.33%
@@ -535,7 +535,7 @@ entry:
i32 4, label %case_e ], !prof !10
; CHECK: edge entry -> case_a probability is 0x00000000 / 0x80000000 = 0.00%
; CHECK: edge entry -> case_b probability is 0x00000400 / 0x80000000 = 0.00%
-; CHECK: edge entry -> case_c probability is 0x6e08fa2e / 0x80000000 = 85.96% [HOT edge]
+; CHECK: edge entry -> case_c probability is 0x6e08fa2d / 0x80000000 = 85.96% [HOT edge]
; CHECK: edge entry -> case_d probability is 0x08fb80e9 / 0x80000000 = 7.02%
; CHECK: edge entry -> case_e probability is 0x08fb80e9 / 0x80000000 = 7.02%
diff --git a/test/Analysis/CostModel/AArch64/free-widening-casts.ll b/test/Analysis/CostModel/AArch64/free-widening-casts.ll
new file mode 100644
index 000000000000..07f32d1d8ba2
--- /dev/null
+++ b/test/Analysis/CostModel/AArch64/free-widening-casts.ll
@@ -0,0 +1,622 @@
+; RUN: opt < %s -mtriple=aarch64--linux-gnu -cost-model -analyze | FileCheck %s --check-prefix=COST
+; RUN: llc < %s -mtriple=aarch64--linux-gnu | FileCheck %s --check-prefix=CODE
+
+; COST-LABEL: uaddl_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i8> %a to <8 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <8 x i8> %b to <8 x i16>
+; CODE-LABEL: uaddl_8h
+; CODE: uaddl v0.8h, v0.8b, v1.8b
+define <8 x i16> @uaddl_8h(<8 x i8> %a, <8 x i8> %b) {
+ %tmp0 = zext <8 x i8> %a to <8 x i16>
+ %tmp1 = zext <8 x i8> %b to <8 x i16>
+ %tmp2 = add <8 x i16> %tmp0, %tmp1
+ ret <8 x i16> %tmp2
+}
+
+; COST-LABEL: uaddl_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i16> %a to <4 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <4 x i16> %b to <4 x i32>
+; CODE-LABEL: uaddl_4s
+; CODE: uaddl v0.4s, v0.4h, v1.4h
+define <4 x i32> @uaddl_4s(<4 x i16> %a, <4 x i16> %b) {
+ %tmp0 = zext <4 x i16> %a to <4 x i32>
+ %tmp1 = zext <4 x i16> %b to <4 x i32>
+ %tmp2 = add <4 x i32> %tmp0, %tmp1
+ ret <4 x i32> %tmp2
+}
+
+; COST-LABEL: uaddl_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <2 x i32> %a to <2 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <2 x i32> %b to <2 x i64>
+; CODE-LABEL: uaddl_2d
+; CODE: uaddl v0.2d, v0.2s, v1.2s
+define <2 x i64> @uaddl_2d(<2 x i32> %a, <2 x i32> %b) {
+ %tmp0 = zext <2 x i32> %a to <2 x i64>
+ %tmp1 = zext <2 x i32> %b to <2 x i64>
+ %tmp2 = add <2 x i64> %tmp0, %tmp1
+ ret <2 x i64> %tmp2
+}
+
+; COST-LABEL: uaddl2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <16 x i8> %a to <16 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <16 x i8> %b to <16 x i16>
+; CODE-LABEL: uaddl2_8h
+; CODE: uaddl2 v2.8h, v0.16b, v1.16b
+; CODE-NEXT: uaddl v0.8h, v0.8b, v1.8b
+define <16 x i16> @uaddl2_8h(<16 x i8> %a, <16 x i8> %b) {
+ %tmp0 = zext <16 x i8> %a to <16 x i16>
+ %tmp1 = zext <16 x i8> %b to <16 x i16>
+ %tmp2 = add <16 x i16> %tmp0, %tmp1
+ ret <16 x i16> %tmp2
+}
+
+; COST-LABEL: uaddl2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i16> %a to <8 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <8 x i16> %b to <8 x i32>
+; CODE-LABEL: uaddl2_4s
+; CODE: uaddl2 v2.4s, v0.8h, v1.8h
+; CODE-NEXT: uaddl v0.4s, v0.4h, v1.4h
+define <8 x i32> @uaddl2_4s(<8 x i16> %a, <8 x i16> %b) {
+ %tmp0 = zext <8 x i16> %a to <8 x i32>
+ %tmp1 = zext <8 x i16> %b to <8 x i32>
+ %tmp2 = add <8 x i32> %tmp0, %tmp1
+ ret <8 x i32> %tmp2
+}
+
+; COST-LABEL: uaddl2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i32> %a to <4 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <4 x i32> %b to <4 x i64>
+; CODE-LABEL: uaddl2_2d
+; CODE: uaddl2 v2.2d, v0.4s, v1.4s
+; CODE-NEXT: uaddl v0.2d, v0.2s, v1.2s
+define <4 x i64> @uaddl2_2d(<4 x i32> %a, <4 x i32> %b) {
+ %tmp0 = zext <4 x i32> %a to <4 x i64>
+ %tmp1 = zext <4 x i32> %b to <4 x i64>
+ %tmp2 = add <4 x i64> %tmp0, %tmp1
+ ret <4 x i64> %tmp2
+}
+
+; COST-LABEL: saddl_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i8> %a to <8 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <8 x i8> %b to <8 x i16>
+; CODE-LABEL: saddl_8h
+; CODE: saddl v0.8h, v0.8b, v1.8b
+define <8 x i16> @saddl_8h(<8 x i8> %a, <8 x i8> %b) {
+ %tmp0 = sext <8 x i8> %a to <8 x i16>
+ %tmp1 = sext <8 x i8> %b to <8 x i16>
+ %tmp2 = add <8 x i16> %tmp0, %tmp1
+ ret <8 x i16> %tmp2
+}
+
+; COST-LABEL: saddl_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i16> %a to <4 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <4 x i16> %b to <4 x i32>
+; CODE-LABEL: saddl_4s
+; CODE: saddl v0.4s, v0.4h, v1.4h
+define <4 x i32> @saddl_4s(<4 x i16> %a, <4 x i16> %b) {
+ %tmp0 = sext <4 x i16> %a to <4 x i32>
+ %tmp1 = sext <4 x i16> %b to <4 x i32>
+ %tmp2 = add <4 x i32> %tmp0, %tmp1
+ ret <4 x i32> %tmp2
+}
+
+; COST-LABEL: saddl_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <2 x i32> %a to <2 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <2 x i32> %b to <2 x i64>
+; CODE-LABEL: saddl_2d
+; CODE: saddl v0.2d, v0.2s, v1.2s
+define <2 x i64> @saddl_2d(<2 x i32> %a, <2 x i32> %b) {
+ %tmp0 = sext <2 x i32> %a to <2 x i64>
+ %tmp1 = sext <2 x i32> %b to <2 x i64>
+ %tmp2 = add <2 x i64> %tmp0, %tmp1
+ ret <2 x i64> %tmp2
+}
+
+; COST-LABEL: saddl2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <16 x i8> %a to <16 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <16 x i8> %b to <16 x i16>
+; CODE-LABEL: saddl2_8h
+; CODE: saddl2 v2.8h, v0.16b, v1.16b
+; CODE-NEXT: saddl v0.8h, v0.8b, v1.8b
+define <16 x i16> @saddl2_8h(<16 x i8> %a, <16 x i8> %b) {
+ %tmp0 = sext <16 x i8> %a to <16 x i16>
+ %tmp1 = sext <16 x i8> %b to <16 x i16>
+ %tmp2 = add <16 x i16> %tmp0, %tmp1
+ ret <16 x i16> %tmp2
+}
+
+; COST-LABEL: saddl2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i16> %a to <8 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <8 x i16> %b to <8 x i32>
+; CODE-LABEL: saddl2_4s
+; CODE: saddl2 v2.4s, v0.8h, v1.8h
+; CODE-NEXT: saddl v0.4s, v0.4h, v1.4h
+define <8 x i32> @saddl2_4s(<8 x i16> %a, <8 x i16> %b) {
+ %tmp0 = sext <8 x i16> %a to <8 x i32>
+ %tmp1 = sext <8 x i16> %b to <8 x i32>
+ %tmp2 = add <8 x i32> %tmp0, %tmp1
+ ret <8 x i32> %tmp2
+}
+
+; COST-LABEL: saddl2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i32> %a to <4 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <4 x i32> %b to <4 x i64>
+; CODE-LABEL: saddl2_2d
+; CODE: saddl2 v2.2d, v0.4s, v1.4s
+; CODE-NEXT: saddl v0.2d, v0.2s, v1.2s
+define <4 x i64> @saddl2_2d(<4 x i32> %a, <4 x i32> %b) {
+ %tmp0 = sext <4 x i32> %a to <4 x i64>
+ %tmp1 = sext <4 x i32> %b to <4 x i64>
+ %tmp2 = add <4 x i64> %tmp0, %tmp1
+ ret <4 x i64> %tmp2
+}
+
+; COST-LABEL: usubl_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i8> %a to <8 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <8 x i8> %b to <8 x i16>
+; CODE-LABEL: usubl_8h
+; CODE: usubl v0.8h, v0.8b, v1.8b
+define <8 x i16> @usubl_8h(<8 x i8> %a, <8 x i8> %b) {
+ %tmp0 = zext <8 x i8> %a to <8 x i16>
+ %tmp1 = zext <8 x i8> %b to <8 x i16>
+ %tmp2 = sub <8 x i16> %tmp0, %tmp1
+ ret <8 x i16> %tmp2
+}
+
+; COST-LABEL: usubl_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i16> %a to <4 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <4 x i16> %b to <4 x i32>
+; CODE-LABEL: usubl_4s
+; CODE: usubl v0.4s, v0.4h, v1.4h
+define <4 x i32> @usubl_4s(<4 x i16> %a, <4 x i16> %b) {
+ %tmp0 = zext <4 x i16> %a to <4 x i32>
+ %tmp1 = zext <4 x i16> %b to <4 x i32>
+ %tmp2 = sub <4 x i32> %tmp0, %tmp1
+ ret <4 x i32> %tmp2
+}
+
+; COST-LABEL: usubl_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <2 x i32> %a to <2 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <2 x i32> %b to <2 x i64>
+; CODE-LABEL: usubl_2d
+; CODE: usubl v0.2d, v0.2s, v1.2s
+define <2 x i64> @usubl_2d(<2 x i32> %a, <2 x i32> %b) {
+ %tmp0 = zext <2 x i32> %a to <2 x i64>
+ %tmp1 = zext <2 x i32> %b to <2 x i64>
+ %tmp2 = sub <2 x i64> %tmp0, %tmp1
+ ret <2 x i64> %tmp2
+}
+
+; COST-LABEL: usubl2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <16 x i8> %a to <16 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <16 x i8> %b to <16 x i16>
+; CODE-LABEL: usubl2_8h
+; CODE: usubl2 v2.8h, v0.16b, v1.16b
+; CODE-NEXT: usubl v0.8h, v0.8b, v1.8b
+define <16 x i16> @usubl2_8h(<16 x i8> %a, <16 x i8> %b) {
+ %tmp0 = zext <16 x i8> %a to <16 x i16>
+ %tmp1 = zext <16 x i8> %b to <16 x i16>
+ %tmp2 = sub <16 x i16> %tmp0, %tmp1
+ ret <16 x i16> %tmp2
+}
+
+; COST-LABEL: usubl2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i16> %a to <8 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <8 x i16> %b to <8 x i32>
+; CODE-LABEL: usubl2_4s
+; CODE: usubl2 v2.4s, v0.8h, v1.8h
+; CODE-NEXT: usubl v0.4s, v0.4h, v1.4h
+define <8 x i32> @usubl2_4s(<8 x i16> %a, <8 x i16> %b) {
+ %tmp0 = zext <8 x i16> %a to <8 x i32>
+ %tmp1 = zext <8 x i16> %b to <8 x i32>
+ %tmp2 = sub <8 x i32> %tmp0, %tmp1
+ ret <8 x i32> %tmp2
+}
+
+; COST-LABEL: usubl2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i32> %a to <4 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <4 x i32> %b to <4 x i64>
+; CODE-LABEL: usubl2_2d
+; CODE: usubl2 v2.2d, v0.4s, v1.4s
+; CODE-NEXT: usubl v0.2d, v0.2s, v1.2s
+define <4 x i64> @usubl2_2d(<4 x i32> %a, <4 x i32> %b) {
+ %tmp0 = zext <4 x i32> %a to <4 x i64>
+ %tmp1 = zext <4 x i32> %b to <4 x i64>
+ %tmp2 = sub <4 x i64> %tmp0, %tmp1
+ ret <4 x i64> %tmp2
+}
+
+; COST-LABEL: ssubl_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i8> %a to <8 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <8 x i8> %b to <8 x i16>
+; CODE-LABEL: ssubl_8h
+; CODE: ssubl v0.8h, v0.8b, v1.8b
+define <8 x i16> @ssubl_8h(<8 x i8> %a, <8 x i8> %b) {
+ %tmp0 = sext <8 x i8> %a to <8 x i16>
+ %tmp1 = sext <8 x i8> %b to <8 x i16>
+ %tmp2 = sub <8 x i16> %tmp0, %tmp1
+ ret <8 x i16> %tmp2
+}
+
+; COST-LABEL: ssubl_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i16> %a to <4 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <4 x i16> %b to <4 x i32>
+; CODE-LABEL: ssubl_4s
+; CODE: ssubl v0.4s, v0.4h, v1.4h
+define <4 x i32> @ssubl_4s(<4 x i16> %a, <4 x i16> %b) {
+ %tmp0 = sext <4 x i16> %a to <4 x i32>
+ %tmp1 = sext <4 x i16> %b to <4 x i32>
+ %tmp2 = sub <4 x i32> %tmp0, %tmp1
+ ret <4 x i32> %tmp2
+}
+
+; COST-LABEL: ssubl_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <2 x i32> %a to <2 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <2 x i32> %b to <2 x i64>
+; CODE-LABEL: ssubl_2d
+; CODE: ssubl v0.2d, v0.2s, v1.2s
+define <2 x i64> @ssubl_2d(<2 x i32> %a, <2 x i32> %b) {
+ %tmp0 = sext <2 x i32> %a to <2 x i64>
+ %tmp1 = sext <2 x i32> %b to <2 x i64>
+ %tmp2 = sub <2 x i64> %tmp0, %tmp1
+ ret <2 x i64> %tmp2
+}
+
+; COST-LABEL: ssubl2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <16 x i8> %a to <16 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <16 x i8> %b to <16 x i16>
+; CODE-LABEL: ssubl2_8h
+; CODE: ssubl2 v2.8h, v0.16b, v1.16b
+; CODE-NEXT: ssubl v0.8h, v0.8b, v1.8b
+define <16 x i16> @ssubl2_8h(<16 x i8> %a, <16 x i8> %b) {
+ %tmp0 = sext <16 x i8> %a to <16 x i16>
+ %tmp1 = sext <16 x i8> %b to <16 x i16>
+ %tmp2 = sub <16 x i16> %tmp0, %tmp1
+ ret <16 x i16> %tmp2
+}
+
+; COST-LABEL: ssubl2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i16> %a to <8 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <8 x i16> %b to <8 x i32>
+; CODE-LABEL: ssubl2_4s
+; CODE: ssubl2 v2.4s, v0.8h, v1.8h
+; CODE-NEXT: ssubl v0.4s, v0.4h, v1.4h
+define <8 x i32> @ssubl2_4s(<8 x i16> %a, <8 x i16> %b) {
+ %tmp0 = sext <8 x i16> %a to <8 x i32>
+ %tmp1 = sext <8 x i16> %b to <8 x i32>
+ %tmp2 = sub <8 x i32> %tmp0, %tmp1
+ ret <8 x i32> %tmp2
+}
+
+; COST-LABEL: ssubl2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i32> %a to <4 x i64>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = sext <4 x i32> %b to <4 x i64>
+; CODE-LABEL: ssubl2_2d
+; CODE: ssubl2 v2.2d, v0.4s, v1.4s
+; CODE-NEXT: ssubl v0.2d, v0.2s, v1.2s
+define <4 x i64> @ssubl2_2d(<4 x i32> %a, <4 x i32> %b) {
+ %tmp0 = sext <4 x i32> %a to <4 x i64>
+ %tmp1 = sext <4 x i32> %b to <4 x i64>
+ %tmp2 = sub <4 x i64> %tmp0, %tmp1
+ ret <4 x i64> %tmp2
+}
+
+; COST-LABEL: uaddw_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i8> %a to <8 x i16>
+; CODE-LABEL: uaddw_8h
+; CODE: uaddw v0.8h, v1.8h, v0.8b
+define <8 x i16> @uaddw_8h(<8 x i8> %a, <8 x i16> %b) {
+ %tmp0 = zext <8 x i8> %a to <8 x i16>
+ %tmp1 = add <8 x i16> %b, %tmp0
+ ret <8 x i16> %tmp1
+}
+
+; COST-LABEL: uaddw_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i16> %a to <4 x i32>
+; CODE-LABEL: uaddw_4s
+; CODE: uaddw v0.4s, v1.4s, v0.4h
+define <4 x i32> @uaddw_4s(<4 x i16> %a, <4 x i32> %b) {
+ %tmp0 = zext <4 x i16> %a to <4 x i32>
+ %tmp1 = add <4 x i32> %b, %tmp0
+ ret <4 x i32> %tmp1
+}
+
+; COST-LABEL: uaddw_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <2 x i32> %a to <2 x i64>
+; CODE-LABEL: uaddw_2d
+; CODE: uaddw v0.2d, v1.2d, v0.2s
+define <2 x i64> @uaddw_2d(<2 x i32> %a, <2 x i64> %b) {
+ %tmp0 = zext <2 x i32> %a to <2 x i64>
+ %tmp1 = add <2 x i64> %b, %tmp0
+ ret <2 x i64> %tmp1
+}
+
+; COST-LABEL: uaddw2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <16 x i8> %a to <16 x i16>
+; CODE-LABEL: uaddw2_8h
+; CODE: uaddw2 v2.8h, v2.8h, v0.16b
+; CODE-NEXT: uaddw v0.8h, v1.8h, v0.8b
+define <16 x i16> @uaddw2_8h(<16 x i8> %a, <16 x i16> %b) {
+ %tmp0 = zext <16 x i8> %a to <16 x i16>
+ %tmp1 = add <16 x i16> %b, %tmp0
+ ret <16 x i16> %tmp1
+}
+
+; COST-LABEL: uaddw2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i16> %a to <8 x i32>
+; CODE-LABEL: uaddw2_4s
+; CODE: uaddw2 v2.4s, v2.4s, v0.8h
+; CODE-NEXT: uaddw v0.4s, v1.4s, v0.4h
+define <8 x i32> @uaddw2_4s(<8 x i16> %a, <8 x i32> %b) {
+ %tmp0 = zext <8 x i16> %a to <8 x i32>
+ %tmp1 = add <8 x i32> %b, %tmp0
+ ret <8 x i32> %tmp1
+}
+
+; COST-LABEL: uaddw2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i32> %a to <4 x i64>
+; CODE-LABEL: uaddw2_2d
+; CODE: uaddw2 v2.2d, v2.2d, v0.4s
+; CODE-NEXT: uaddw v0.2d, v1.2d, v0.2s
+define <4 x i64> @uaddw2_2d(<4 x i32> %a, <4 x i64> %b) {
+ %tmp0 = zext <4 x i32> %a to <4 x i64>
+ %tmp1 = add <4 x i64> %b, %tmp0
+ ret <4 x i64> %tmp1
+}
+
+; COST-LABEL: saddw_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i8> %a to <8 x i16>
+; CODE-LABEL: saddw_8h
+; CODE: saddw v0.8h, v1.8h, v0.8b
+define <8 x i16> @saddw_8h(<8 x i8> %a, <8 x i16> %b) {
+ %tmp0 = sext <8 x i8> %a to <8 x i16>
+ %tmp1 = add <8 x i16> %b, %tmp0
+ ret <8 x i16> %tmp1
+}
+
+; COST-LABEL: saddw_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i16> %a to <4 x i32>
+; CODE-LABEL: saddw_4s
+; CODE: saddw v0.4s, v1.4s, v0.4h
+define <4 x i32> @saddw_4s(<4 x i16> %a, <4 x i32> %b) {
+ %tmp0 = sext <4 x i16> %a to <4 x i32>
+ %tmp1 = add <4 x i32> %b, %tmp0
+ ret <4 x i32> %tmp1
+}
+
+; COST-LABEL: saddw_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <2 x i32> %a to <2 x i64>
+; CODE-LABEL: saddw_2d
+; CODE: saddw v0.2d, v1.2d, v0.2s
+define <2 x i64> @saddw_2d(<2 x i32> %a, <2 x i64> %b) {
+ %tmp0 = sext <2 x i32> %a to <2 x i64>
+ %tmp1 = add <2 x i64> %b, %tmp0
+ ret <2 x i64> %tmp1
+}
+
+; COST-LABEL: saddw2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <16 x i8> %a to <16 x i16>
+; CODE-LABEL: saddw2_8h
+; CODE: saddw2 v2.8h, v2.8h, v0.16b
+; CODE-NEXT: saddw v0.8h, v1.8h, v0.8b
+define <16 x i16> @saddw2_8h(<16 x i8> %a, <16 x i16> %b) {
+ %tmp0 = sext <16 x i8> %a to <16 x i16>
+ %tmp1 = add <16 x i16> %b, %tmp0
+ ret <16 x i16> %tmp1
+}
+
+; COST-LABEL: saddw2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i16> %a to <8 x i32>
+; CODE-LABEL: saddw2_4s
+; CODE: saddw2 v2.4s, v2.4s, v0.8h
+; CODE-NEXT: saddw v0.4s, v1.4s, v0.4h
+define <8 x i32> @saddw2_4s(<8 x i16> %a, <8 x i32> %b) {
+ %tmp0 = sext <8 x i16> %a to <8 x i32>
+ %tmp1 = add <8 x i32> %b, %tmp0
+ ret <8 x i32> %tmp1
+}
+
+; COST-LABEL: saddw2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i32> %a to <4 x i64>
+; CODE-LABEL: saddw2_2d
+; CODE: saddw2 v2.2d, v2.2d, v0.4s
+; CODE-NEXT: saddw v0.2d, v1.2d, v0.2s
+define <4 x i64> @saddw2_2d(<4 x i32> %a, <4 x i64> %b) {
+ %tmp0 = sext <4 x i32> %a to <4 x i64>
+ %tmp1 = add <4 x i64> %b, %tmp0
+ ret <4 x i64> %tmp1
+}
+
+; COST-LABEL: usubw_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i8> %a to <8 x i16>
+; CODE-LABEL: usubw_8h
+; CODE: usubw v0.8h, v1.8h, v0.8b
+define <8 x i16> @usubw_8h(<8 x i8> %a, <8 x i16> %b) {
+ %tmp0 = zext <8 x i8> %a to <8 x i16>
+ %tmp1 = sub <8 x i16> %b, %tmp0
+ ret <8 x i16> %tmp1
+}
+
+; COST-LABEL: usubw_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i16> %a to <4 x i32>
+; CODE-LABEL: usubw_4s
+; CODE: usubw v0.4s, v1.4s, v0.4h
+define <4 x i32> @usubw_4s(<4 x i16> %a, <4 x i32> %b) {
+ %tmp0 = zext <4 x i16> %a to <4 x i32>
+ %tmp1 = sub <4 x i32> %b, %tmp0
+ ret <4 x i32> %tmp1
+}
+
+; COST-LABEL: usubw_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <2 x i32> %a to <2 x i64>
+; CODE-LABEL: usubw_2d
+; CODE: usubw v0.2d, v1.2d, v0.2s
+define <2 x i64> @usubw_2d(<2 x i32> %a, <2 x i64> %b) {
+ %tmp0 = zext <2 x i32> %a to <2 x i64>
+ %tmp1 = sub <2 x i64> %b, %tmp0
+ ret <2 x i64> %tmp1
+}
+
+; COST-LABEL: usubw2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <16 x i8> %a to <16 x i16>
+; CODE-LABEL: usubw2_8h
+; CODE: usubw2 v2.8h, v2.8h, v0.16b
+; CODE-NEXT: usubw v0.8h, v1.8h, v0.8b
+define <16 x i16> @usubw2_8h(<16 x i8> %a, <16 x i16> %b) {
+ %tmp0 = zext <16 x i8> %a to <16 x i16>
+ %tmp1 = sub <16 x i16> %b, %tmp0
+ ret <16 x i16> %tmp1
+}
+
+; COST-LABEL: usubw2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <8 x i16> %a to <8 x i32>
+; CODE-LABEL: usubw2_4s
+; CODE: usubw2 v2.4s, v2.4s, v0.8h
+; CODE-NEXT: usubw v0.4s, v1.4s, v0.4h
+define <8 x i32> @usubw2_4s(<8 x i16> %a, <8 x i32> %b) {
+ %tmp0 = zext <8 x i16> %a to <8 x i32>
+ %tmp1 = sub <8 x i32> %b, %tmp0
+ ret <8 x i32> %tmp1
+}
+
+; COST-LABEL: usubw2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = zext <4 x i32> %a to <4 x i64>
+; CODE-LABEL: usubw2_2d
+; CODE: usubw2 v2.2d, v2.2d, v0.4s
+; CODE-NEXT: usubw v0.2d, v1.2d, v0.2s
+define <4 x i64> @usubw2_2d(<4 x i32> %a, <4 x i64> %b) {
+ %tmp0 = zext <4 x i32> %a to <4 x i64>
+ %tmp1 = sub <4 x i64> %b, %tmp0
+ ret <4 x i64> %tmp1
+}
+
+; COST-LABEL: ssubw_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i8> %a to <8 x i16>
+; CODE-LABEL: ssubw_8h
+; CODE: ssubw v0.8h, v1.8h, v0.8b
+define <8 x i16> @ssubw_8h(<8 x i8> %a, <8 x i16> %b) {
+ %tmp0 = sext <8 x i8> %a to <8 x i16>
+ %tmp1 = sub <8 x i16> %b, %tmp0
+ ret <8 x i16> %tmp1
+}
+
+; COST-LABEL: ssubw_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i16> %a to <4 x i32>
+; CODE-LABEL: ssubw_4s
+; CODE: ssubw v0.4s, v1.4s, v0.4h
+define <4 x i32> @ssubw_4s(<4 x i16> %a, <4 x i32> %b) {
+ %tmp0 = sext <4 x i16> %a to <4 x i32>
+ %tmp1 = sub <4 x i32> %b, %tmp0
+ ret <4 x i32> %tmp1
+}
+
+; COST-LABEL: ssubw_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <2 x i32> %a to <2 x i64>
+; CODE-LABEL: ssubw_2d
+; CODE: ssubw v0.2d, v1.2d, v0.2s
+define <2 x i64> @ssubw_2d(<2 x i32> %a, <2 x i64> %b) {
+ %tmp0 = sext <2 x i32> %a to <2 x i64>
+ %tmp1 = sub <2 x i64> %b, %tmp0
+ ret <2 x i64> %tmp1
+}
+
+; COST-LABEL: ssubw2_8h
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <16 x i8> %a to <16 x i16>
+; CODE-LABEL: ssubw2_8h
+; CODE: ssubw2 v2.8h, v2.8h, v0.16b
+; CODE-NEXT: ssubw v0.8h, v1.8h, v0.8b
+define <16 x i16> @ssubw2_8h(<16 x i8> %a, <16 x i16> %b) {
+ %tmp0 = sext <16 x i8> %a to <16 x i16>
+ %tmp1 = sub <16 x i16> %b, %tmp0
+ ret <16 x i16> %tmp1
+}
+
+; COST-LABEL: ssubw2_4s
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <8 x i16> %a to <8 x i32>
+; CODE-LABEL: ssubw2_4s
+; CODE: ssubw2 v2.4s, v2.4s, v0.8h
+; CODE-NEXT: ssubw v0.4s, v1.4s, v0.4h
+define <8 x i32> @ssubw2_4s(<8 x i16> %a, <8 x i32> %b) {
+ %tmp0 = sext <8 x i16> %a to <8 x i32>
+ %tmp1 = sub <8 x i32> %b, %tmp0
+ ret <8 x i32> %tmp1
+}
+
+; COST-LABEL: ssubw2_2d
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp0 = sext <4 x i32> %a to <4 x i64>
+; CODE-LABEL: ssubw2_2d
+; CODE: ssubw2 v2.2d, v2.2d, v0.4s
+; CODE-NEXT: ssubw v0.2d, v1.2d, v0.2s
+define <4 x i64> @ssubw2_2d(<4 x i32> %a, <4 x i64> %b) {
+ %tmp0 = sext <4 x i32> %a to <4 x i64>
+ %tmp1 = sub <4 x i64> %b, %tmp0
+ ret <4 x i64> %tmp1
+}
+
+; COST-LABEL: neg_wrong_operand_order
+; COST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %tmp0 = zext <8 x i8> %a to <8 x i16>
+define <8 x i16> @neg_wrong_operand_order(<8 x i8> %a, <8 x i16> %b) {
+ %tmp0 = zext <8 x i8> %a to <8 x i16>
+ %tmp1 = sub <8 x i16> %tmp0, %b
+ ret <8 x i16> %tmp1
+}
+
+; COST-LABEL: neg_non_widening_op
+; COST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %tmp0 = zext <8 x i8> %a to <8 x i16>
+define <8 x i16> @neg_non_widening_op(<8 x i8> %a, <8 x i16> %b) {
+ %tmp0 = zext <8 x i8> %a to <8 x i16>
+ %tmp1 = udiv <8 x i16> %b, %tmp0
+ ret <8 x i16> %tmp1
+}
+
+; COST-LABEL: neg_dissimilar_operand_kind_0
+; COST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %tmp0 = sext <8 x i8> %a to <8 x i16>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <8 x i8> %b to <8 x i16>
+define <8 x i16> @neg_dissimilar_operand_kind_0(<8 x i8> %a, <8 x i8> %b) {
+ %tmp0 = sext <8 x i8> %a to <8 x i16>
+ %tmp1 = zext <8 x i8> %b to <8 x i16>
+ %tmp2 = add <8 x i16> %tmp0, %tmp1
+ ret <8 x i16> %tmp2
+}
+
+; COST-LABEL: neg_dissimilar_operand_kind_1
+; COST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %tmp0 = zext <4 x i8> %a to <4 x i32>
+; COST-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %tmp1 = zext <4 x i16> %b to <4 x i32>
+define <4 x i32> @neg_dissimilar_operand_kind_1(<4 x i8> %a, <4 x i16> %b) {
+ %tmp0 = zext <4 x i8> %a to <4 x i32>
+ %tmp1 = zext <4 x i16> %b to <4 x i32>
+ %tmp2 = add <4 x i32> %tmp0, %tmp1
+ ret <4 x i32> %tmp2
+}
+
+; COST-LABEL: neg_illegal_vector_type_0
+; COST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %tmp0 = zext <16 x i4> %a to <16 x i8>
+define <16 x i8> @neg_illegal_vector_type_0(<16 x i4> %a, <16 x i8> %b) {
+ %tmp0 = zext <16 x i4> %a to <16 x i8>
+ %tmp1 = sub <16 x i8> %b, %tmp0
+ ret <16 x i8> %tmp1
+}
+
+; COST-LABEL: neg_llegal_vector_type_1
+; COST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %tmp0 = zext <1 x i16> %a to <1 x i32>
+define <1 x i32> @neg_llegal_vector_type_1(<1 x i16> %a, <1 x i32> %b) {
+ %tmp0 = zext <1 x i16> %a to <1 x i32>
+ %tmp1 = add <1 x i32> %b, %tmp0
+ ret <1 x i32> %tmp1
+}
+
+; COST-LABEL: neg_llegal_vector_type_2
+; COST-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %tmp0 = zext <4 x i16> %a to <4 x i64>
+define <4 x i64> @neg_llegal_vector_type_2(<4 x i16> %a, <4 x i64> %b) {
+ %tmp0 = zext <4 x i16> %a to <4 x i64>
+ %tmp1 = add <4 x i64> %b, %tmp0
+ ret <4 x i64> %tmp1
+}
+
+; COST-LABEL: neg_llegal_vector_type_3
+; COST-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %tmp0 = zext <3 x i34> %a to <3 x i68>
+define <3 x i68> @neg_llegal_vector_type_3(<3 x i34> %a, <3 x i68> %b) {
+ %tmp0 = zext <3 x i34> %a to <3 x i68>
+ %tmp1 = add <3 x i68> %b, %tmp0
+ ret <3 x i68> %tmp1
+}
diff --git a/test/Analysis/CostModel/AMDGPU/extractelement.ll b/test/Analysis/CostModel/AMDGPU/extractelement.ll
index 1efbb5873acb..54c8b6c52365 100644
--- a/test/Analysis/CostModel/AMDGPU/extractelement.ll
+++ b/test/Analysis/CostModel/AMDGPU/extractelement.ll
@@ -1,7 +1,9 @@
-; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa %s | FileCheck -check-prefixes=GCN,CI %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji %s | FileCheck -check-prefixes=GCN,VI %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 %s | FileCheck -check-prefixes=GCN,GFX9 %s
-; CHECK: 'extractelement_v2i32'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i32>
+; GCN: 'extractelement_v2i32'
+; GCN: estimated cost of 0 for {{.*}} extractelement <2 x i32>
define amdgpu_kernel void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
%elt = extractelement <2 x i32> %vec, i32 1
@@ -9,8 +11,8 @@ define amdgpu_kernel void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32
ret void
}
-; CHECK: 'extractelement_v2f32'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x float>
+; GCN: 'extractelement_v2f32'
+; GCN: estimated cost of 0 for {{.*}} extractelement <2 x float>
define amdgpu_kernel void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%elt = extractelement <2 x float> %vec, i32 1
@@ -18,8 +20,8 @@ define amdgpu_kernel void @extractelement_v2f32(float addrspace(1)* %out, <2 x f
ret void
}
-; CHECK: 'extractelement_v3i32'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <3 x i32>
+; GCN: 'extractelement_v3i32'
+; GCN: estimated cost of 0 for {{.*}} extractelement <3 x i32>
define amdgpu_kernel void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) {
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
%elt = extractelement <3 x i32> %vec, i32 1
@@ -27,8 +29,8 @@ define amdgpu_kernel void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32
ret void
}
-; CHECK: 'extractelement_v4i32'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i32>
+; GCN: 'extractelement_v4i32'
+; GCN: estimated cost of 0 for {{.*}} extractelement <4 x i32>
define amdgpu_kernel void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) {
%vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
%elt = extractelement <4 x i32> %vec, i32 1
@@ -36,8 +38,8 @@ define amdgpu_kernel void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32
ret void
}
-; CHECK: 'extractelement_v8i32'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <8 x i32>
+; GCN: 'extractelement_v8i32'
+; GCN: estimated cost of 0 for {{.*}} extractelement <8 x i32>
define amdgpu_kernel void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) {
%vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
%elt = extractelement <8 x i32> %vec, i32 1
@@ -46,8 +48,8 @@ define amdgpu_kernel void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32
}
; FIXME: Should be non-0
-; CHECK: 'extractelement_v8i32_dynindex'
-; CHECK: estimated cost of 2 for {{.*}} extractelement <8 x i32>
+; GCN: 'extractelement_v8i32_dynindex'
+; GCN: estimated cost of 2 for {{.*}} extractelement <8 x i32>
define amdgpu_kernel void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr, i32 %idx) {
%vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
%elt = extractelement <8 x i32> %vec, i32 %idx
@@ -55,8 +57,8 @@ define amdgpu_kernel void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out,
ret void
}
-; CHECK: 'extractelement_v2i64'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i64>
+; GCN: 'extractelement_v2i64'
+; GCN: estimated cost of 0 for {{.*}} extractelement <2 x i64>
define amdgpu_kernel void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
%elt = extractelement <2 x i64> %vec, i64 1
@@ -64,8 +66,8 @@ define amdgpu_kernel void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64
ret void
}
-; CHECK: 'extractelement_v3i64'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <3 x i64>
+; GCN: 'extractelement_v3i64'
+; GCN: estimated cost of 0 for {{.*}} extractelement <3 x i64>
define amdgpu_kernel void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr) {
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
%elt = extractelement <3 x i64> %vec, i64 1
@@ -73,8 +75,8 @@ define amdgpu_kernel void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64
ret void
}
-; CHECK: 'extractelement_v4i64'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i64>
+; GCN: 'extractelement_v4i64'
+; GCN: estimated cost of 0 for {{.*}} extractelement <4 x i64>
define amdgpu_kernel void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr) {
%vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
%elt = extractelement <4 x i64> %vec, i64 1
@@ -82,8 +84,8 @@ define amdgpu_kernel void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64
ret void
}
-; CHECK: 'extractelement_v8i64'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <8 x i64>
+; GCN: 'extractelement_v8i64'
+; GCN: estimated cost of 0 for {{.*}} extractelement <8 x i64>
define amdgpu_kernel void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr) {
%vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr
%elt = extractelement <8 x i64> %vec, i64 1
@@ -91,8 +93,8 @@ define amdgpu_kernel void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64
ret void
}
-; CHECK: 'extractelement_v4i8'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i8>
+; GCN: 'extractelement_v4i8'
+; GCN: estimated cost of 1 for {{.*}} extractelement <4 x i8>
define amdgpu_kernel void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %vaddr) {
%vec = load <4 x i8>, <4 x i8> addrspace(1)* %vaddr
%elt = extractelement <4 x i8> %vec, i8 1
@@ -100,11 +102,31 @@ define amdgpu_kernel void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> a
ret void
}
-; CHECK: 'extractelement_v2i16'
-; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i16>
-define amdgpu_kernel void @extractelement_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+; GCN: 'extractelement_0_v2i16':
+; CI: estimated cost of 1 for {{.*}} extractelement <2 x i16> %vec, i16 0
+; VI: estimated cost of 0 for {{.*}} extractelement <2 x i16>
+; GFX9: estimated cost of 0 for {{.*}} extractelement <2 x i16>
+define amdgpu_kernel void @extractelement_0_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %elt = extractelement <2 x i16> %vec, i16 0
+ store i16 %elt, i16 addrspace(1)* %out
+ ret void
+}
+
+; GCN: 'extractelement_1_v2i16':
+; GCN: estimated cost of 1 for {{.*}} extractelement <2 x i16>
+define amdgpu_kernel void @extractelement_1_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
%elt = extractelement <2 x i16> %vec, i16 1
store i16 %elt, i16 addrspace(1)* %out
ret void
}
+
+; GCN: 'extractelement_var_v2i16'
+; GCN: estimated cost of 1 for {{.*}} extractelement <2 x i16>
+define amdgpu_kernel void @extractelement_var_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, i32 %idx) {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %elt = extractelement <2 x i16> %vec, i32 %idx
+ store i16 %elt, i16 addrspace(1)* %out
+ ret void
+}
diff --git a/test/Analysis/CostModel/AMDGPU/insertelement.ll b/test/Analysis/CostModel/AMDGPU/insertelement.ll
index 6f296a3e7a34..67ab2607acd5 100644
--- a/test/Analysis/CostModel/AMDGPU/insertelement.ll
+++ b/test/Analysis/CostModel/AMDGPU/insertelement.ll
@@ -1,37 +1,50 @@
-; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa %s | FileCheck -check-prefixes=GCN,CI %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji %s | FileCheck -check-prefixes=GCN,VI %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 %s | FileCheck -check-prefixes=GCN,GFX9 %s
-; CHECK: 'insertelement_v2i32'
-; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i32>
+; GCN-LABEL: 'insertelement_v2i32'
+; GCN: estimated cost of 0 for {{.*}} insertelement <2 x i32>
define amdgpu_kernel void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
- %insert = insertelement <2 x i32> %vec, i32 1, i32 123
+ %insert = insertelement <2 x i32> %vec, i32 123, i32 1
store <2 x i32> %insert, <2 x i32> addrspace(1)* %out
ret void
}
-; CHECK: 'insertelement_v2i64'
-; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i64>
+; GCN-LABEL: 'insertelement_v2i64'
+; GCN: estimated cost of 0 for {{.*}} insertelement <2 x i64>
define amdgpu_kernel void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
- %insert = insertelement <2 x i64> %vec, i64 1, i64 123
+ %insert = insertelement <2 x i64> %vec, i64 123, i64 1
store <2 x i64> %insert, <2 x i64> addrspace(1)* %out
ret void
}
-; CHECK: 'insertelement_v2i16'
-; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i16>
-define amdgpu_kernel void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+; GCN-LABEL: 'insertelement_0_v2i16'
+; CI: estimated cost of 1 for {{.*}} insertelement <2 x i16>
+; VI: estimated cost of 0 for {{.*}} insertelement <2 x i16>
+; GFX9: estimated cost of 0 for {{.*}} insertelement <2 x i16>
+define amdgpu_kernel void @insertelement_0_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
- %insert = insertelement <2 x i16> %vec, i16 1, i16 123
+ %insert = insertelement <2 x i16> %vec, i16 123, i16 0
store <2 x i16> %insert, <2 x i16> addrspace(1)* %out
ret void
}
-; CHECK: 'insertelement_v2i8'
-; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i8>
-define amdgpu_kernel void @insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) {
+; GCN-LABEL: 'insertelement_1_v2i16'
+; GCN: estimated cost of 1 for {{.*}} insertelement <2 x i16>
+define amdgpu_kernel void @insertelement_1_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %insert = insertelement <2 x i16> %vec, i16 123, i16 1
+ store <2 x i16> %insert, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: 'insertelement_1_v2i8'
+; GCN: estimated cost of 1 for {{.*}} insertelement <2 x i8>
+define amdgpu_kernel void @insertelement_1_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) {
%vec = load <2 x i8>, <2 x i8> addrspace(1)* %vaddr
- %insert = insertelement <2 x i8> %vec, i8 1, i8 123
+ %insert = insertelement <2 x i8> %vec, i8 123, i8 1
store <2 x i8> %insert, <2 x i8> addrspace(1)* %out
ret void
}
diff --git a/test/Analysis/CostModel/AMDGPU/shufflevector.ll b/test/Analysis/CostModel/AMDGPU/shufflevector.ll
new file mode 100644
index 000000000000..cc756c82fed3
--- /dev/null
+++ b/test/Analysis/CostModel/AMDGPU/shufflevector.ll
@@ -0,0 +1,43 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 %s | FileCheck -check-prefixes=GFX9,GCN %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji %s | FileCheck -check-prefixes=VI,GCN %s
+
+; GFX9: estimated cost of 0 for {{.*}} shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> zeroinitializer
+define amdgpu_kernel void @shufflevector_00_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> zeroinitializer
+ store <2 x i16> %shuf, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GFX9: estimated cost of 0 for {{.*}} shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
+define amdgpu_kernel void @shufflevector_01_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
+ store <2 x i16> %shuf, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GFX9: estimated cost of 0 for {{.*}} shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
+define amdgpu_kernel void @shufflevector_10_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
+ store <2 x i16> %shuf, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GFX9: estimated cost of 0 for {{.*}} shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
+define amdgpu_kernel void @shufflevector_11_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
+ store <2 x i16> %shuf, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN: estimated cost of 2 for {{.*}} shufflevector <2 x i16> %vec0, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+define amdgpu_kernel void @shufflevector_02_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr0, <2 x i16> addrspace(1)* %vaddr1) {
+ %vec0 = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr0
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr1
+ %shuf = shufflevector <2 x i16> %vec0, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+ store <2 x i16> %shuf, <2 x i16> addrspace(1)* %out
+ ret void
+}
diff --git a/test/Analysis/CostModel/X86/div.ll b/test/Analysis/CostModel/X86/div.ll
index 0ac06ff75ebe..dabaaef3596a 100644
--- a/test/Analysis/CostModel/X86/div.ll
+++ b/test/Analysis/CostModel/X86/div.ll
@@ -139,14 +139,14 @@ define i32 @sdiv_uniformconst() {
; SSE2: cost of 38 {{.*}} %V8i32 = sdiv
; SSSE3: cost of 38 {{.*}} %V8i32 = sdiv
; SSE42: cost of 30 {{.*}} %V8i32 = sdiv
- ; AVX1: cost of 30 {{.*}} %V8i32 = sdiv
+ ; AVX1: cost of 32 {{.*}} %V8i32 = sdiv
; AVX2: cost of 15 {{.*}} %V8i32 = sdiv
; AVX512: cost of 15 {{.*}} %V8i32 = sdiv
%V8i32 = sdiv <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
; SSE2: cost of 76 {{.*}} %V16i32 = sdiv
; SSSE3: cost of 76 {{.*}} %V16i32 = sdiv
; SSE42: cost of 60 {{.*}} %V16i32 = sdiv
- ; AVX1: cost of 60 {{.*}} %V16i32 = sdiv
+ ; AVX1: cost of 64 {{.*}} %V16i32 = sdiv
; AVX2: cost of 30 {{.*}} %V16i32 = sdiv
; AVX512: cost of 15 {{.*}} %V16i32 = sdiv
%V16i32 = sdiv <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
@@ -157,12 +157,12 @@ define i32 @sdiv_uniformconst() {
; AVX: cost of 6 {{.*}} %V8i16 = sdiv
%V8i16 = sdiv <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
; SSE: cost of 12 {{.*}} %V16i16 = sdiv
- ; AVX1: cost of 12 {{.*}} %V16i16 = sdiv
+ ; AVX1: cost of 14 {{.*}} %V16i16 = sdiv
; AVX2: cost of 6 {{.*}} %V16i16 = sdiv
; AVX512: cost of 6 {{.*}} %V16i16 = sdiv
%V16i16 = sdiv <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
; SSE: cost of 24 {{.*}} %V32i16 = sdiv
- ; AVX1: cost of 24 {{.*}} %V32i16 = sdiv
+ ; AVX1: cost of 28 {{.*}} %V32i16 = sdiv
; AVX2: cost of 12 {{.*}} %V32i16 = sdiv
; AVX512F: cost of 12 {{.*}} %V32i16 = sdiv
; AVX512BW: cost of 6 {{.*}} %V32i16 = sdiv
@@ -203,12 +203,12 @@ define i32 @udiv_uniformconst() {
; AVX: cost of 15 {{.*}} %V4i32 = udiv
%V4i32 = udiv <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7>
; SSE: cost of 30 {{.*}} %V8i32 = udiv
- ; AVX1: cost of 30 {{.*}} %V8i32 = udiv
+ ; AVX1: cost of 32 {{.*}} %V8i32 = udiv
; AVX2: cost of 15 {{.*}} %V8i32 = udiv
; AVX512: cost of 15 {{.*}} %V8i32 = udiv
%V8i32 = udiv <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
; SSE: cost of 60 {{.*}} %V16i32 = udiv
- ; AVX1: cost of 60 {{.*}} %V16i32 = udiv
+ ; AVX1: cost of 64 {{.*}} %V16i32 = udiv
; AVX2: cost of 30 {{.*}} %V16i32 = udiv
; AVX512: cost of 15 {{.*}} %V16i32 = udiv
%V16i32 = udiv <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
@@ -219,12 +219,12 @@ define i32 @udiv_uniformconst() {
; AVX: cost of 6 {{.*}} %V8i16 = udiv
%V8i16 = udiv <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
; SSE: cost of 12 {{.*}} %V16i16 = udiv
- ; AVX1: cost of 12 {{.*}} %V16i16 = udiv
+ ; AVX1: cost of 14 {{.*}} %V16i16 = udiv
; AVX2: cost of 6 {{.*}} %V16i16 = udiv
; AVX512: cost of 6 {{.*}} %V16i16 = udiv
%V16i16 = udiv <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
; SSE: cost of 24 {{.*}} %V32i16 = udiv
- ; AVX1: cost of 24 {{.*}} %V32i16 = udiv
+ ; AVX1: cost of 28 {{.*}} %V32i16 = udiv
; AVX2: cost of 12 {{.*}} %V32i16 = udiv
; AVX512F: cost of 12 {{.*}} %V32i16 = udiv
; AVX512BW: cost of 6 {{.*}} %V32i16 = udiv
@@ -269,14 +269,14 @@ define i32 @sdiv_uniformconstpow2() {
; SSE2: cost of 38 {{.*}} %V8i32 = sdiv
; SSSE3: cost of 38 {{.*}} %V8i32 = sdiv
; SSE42: cost of 30 {{.*}} %V8i32 = sdiv
- ; AVX1: cost of 30 {{.*}} %V8i32 = sdiv
+ ; AVX1: cost of 32 {{.*}} %V8i32 = sdiv
; AVX2: cost of 15 {{.*}} %V8i32 = sdiv
; AVX512: cost of 15 {{.*}} %V8i32 = sdiv
%V8i32 = sdiv <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; SSE2: cost of 76 {{.*}} %V16i32 = sdiv
; SSSE3: cost of 76 {{.*}} %V16i32 = sdiv
; SSE42: cost of 60 {{.*}} %V16i32 = sdiv
- ; AVX1: cost of 60 {{.*}} %V16i32 = sdiv
+ ; AVX1: cost of 64 {{.*}} %V16i32 = sdiv
; AVX2: cost of 30 {{.*}} %V16i32 = sdiv
; AVX512: cost of 15 {{.*}} %V16i32 = sdiv
%V16i32 = sdiv <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -287,12 +287,12 @@ define i32 @sdiv_uniformconstpow2() {
; AVX: cost of 6 {{.*}} %V8i16 = sdiv
%V8i16 = sdiv <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
; SSE: cost of 12 {{.*}} %V16i16 = sdiv
- ; AVX1: cost of 12 {{.*}} %V16i16 = sdiv
+ ; AVX1: cost of 14 {{.*}} %V16i16 = sdiv
; AVX2: cost of 6 {{.*}} %V16i16 = sdiv
; AVX512: cost of 6 {{.*}} %V16i16 = sdiv
%V16i16 = sdiv <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
; SSE: cost of 24 {{.*}} %V32i16 = sdiv
- ; AVX1: cost of 24 {{.*}} %V32i16 = sdiv
+ ; AVX1: cost of 28 {{.*}} %V32i16 = sdiv
; AVX2: cost of 12 {{.*}} %V32i16 = sdiv
; AVX512F: cost of 12 {{.*}} %V32i16 = sdiv
; AVX512BW: cost of 6 {{.*}} %V32i16 = sdiv
@@ -333,12 +333,12 @@ define i32 @udiv_uniformconstpow2() {
; AVX: cost of 15 {{.*}} %V4i32 = udiv
%V4i32 = udiv <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16>
; SSE: cost of 30 {{.*}} %V8i32 = udiv
- ; AVX1: cost of 30 {{.*}} %V8i32 = udiv
+ ; AVX1: cost of 32 {{.*}} %V8i32 = udiv
; AVX2: cost of 15 {{.*}} %V8i32 = udiv
; AVX512: cost of 15 {{.*}} %V8i32 = udiv
%V8i32 = udiv <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; SSE: cost of 60 {{.*}} %V16i32 = udiv
- ; AVX1: cost of 60 {{.*}} %V16i32 = udiv
+ ; AVX1: cost of 64 {{.*}} %V16i32 = udiv
; AVX2: cost of 30 {{.*}} %V16i32 = udiv
; AVX512: cost of 15 {{.*}} %V16i32 = udiv
%V16i32 = udiv <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -349,12 +349,12 @@ define i32 @udiv_uniformconstpow2() {
; AVX: cost of 6 {{.*}} %V8i16 = udiv
%V8i16 = udiv <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
; SSE: cost of 12 {{.*}} %V16i16 = udiv
- ; AVX1: cost of 12 {{.*}} %V16i16 = udiv
+ ; AVX1: cost of 14 {{.*}} %V16i16 = udiv
; AVX2: cost of 6 {{.*}} %V16i16 = udiv
; AVX512: cost of 6 {{.*}} %V16i16 = udiv
%V16i16 = udiv <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
; SSE: cost of 24 {{.*}} %V32i16 = udiv
- ; AVX1: cost of 24 {{.*}} %V32i16 = udiv
+ ; AVX1: cost of 28 {{.*}} %V32i16 = udiv
; AVX2: cost of 12 {{.*}} %V32i16 = udiv
; AVX512F: cost of 12 {{.*}} %V32i16 = udiv
; AVX512BW: cost of 6 {{.*}} %V32i16 = udiv
diff --git a/test/Analysis/CostModel/X86/vshift-ashr-cost.ll b/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
index a23b13fb2e25..eabc2330ddc6 100644
--- a/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
@@ -33,10 +33,10 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v4i64':
; SSE2: Found an estimated cost of 24 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; AVX: Found an estimated cost of 26 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = ashr <4 x i64> %a, %b
ret <4 x i64> %shift
}
@@ -45,10 +45,10 @@ define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v8i64':
; SSE2: Found an estimated cost of 48 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 52 for instruction: %shift
; AVX2: Found an estimated cost of 8 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = ashr <8 x i64> %a, %b
ret <8 x i64> %shift
}
@@ -70,10 +70,10 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v8i32':
; SSE2: Found an estimated cost of 32 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; AVX: Found an estimated cost of 26 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = ashr <8 x i32> %a, %b
ret <8 x i32> %shift
@@ -83,10 +83,10 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v16i32':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 52 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <16 x i32> %a, %b
ret <16 x i32> %shift
@@ -109,11 +109,11 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v16i16':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
+; AVX: Found an estimated cost of 30 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
; AVX512F: Found an estimated cost of 10 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = ashr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -122,11 +122,11 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v32i16':
; SSE2: Found an estimated cost of 128 for instruction: %shift
; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
+; AVX: Found an estimated cost of 60 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = ashr <32 x i16> %a, %b
ret <32 x i16> %shift
}
@@ -147,11 +147,11 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v32i8':
; SSE2: Found an estimated cost of 108 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 50 for instruction: %shift
; AVX2: Found an estimated cost of 24 for instruction: %shift
; AVX512F: Found an estimated cost of 24 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = ashr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -160,11 +160,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v64i8':
; SSE2: Found an estimated cost of 216 for instruction: %shift
; SSE41: Found an estimated cost of 96 for instruction: %shift
-; AVX: Found an estimated cost of 96 for instruction: %shift
+; AVX: Found an estimated cost of 100 for instruction: %shift
; AVX2: Found an estimated cost of 48 for instruction: %shift
; AVX512F: Found an estimated cost of 48 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = ashr <64 x i8> %a, %b
ret <64 x i8> %shift
}
@@ -191,11 +191,10 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i64':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
-; AVX2: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
+; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%insert = insertelement <4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = ashr <4 x i64> %a, %splat
@@ -206,11 +205,10 @@ define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i64':
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
-; AVX2: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
+; AVX2: Found an estimated cost of 8 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%insert = insertelement <8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
%shift = ashr <8 x i64> %a, %splat
@@ -235,10 +233,10 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i32':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -250,10 +248,10 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i32':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <16 x i32> %insert, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -279,10 +277,10 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i16':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
@@ -294,11 +292,11 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i16':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <32 x i16> %insert, <32 x i16> undef, <32 x i32> zeroinitializer
@@ -324,10 +322,10 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i8':
; SSE2: Found an estimated cost of 108 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 50 for instruction: %shift
; AVX2: Found an estimated cost of 24 for instruction: %shift
; AVX512: Found an estimated cost of 24 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%insert = insertelement <32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i8> %a, %splat
@@ -338,11 +336,11 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v64i8':
; SSE2: Found an estimated cost of 216 for instruction: %shift
; SSE41: Found an estimated cost of 96 for instruction: %shift
-; AVX: Found an estimated cost of 96 for instruction: %shift
+; AVX: Found an estimated cost of 100 for instruction: %shift
; AVX2: Found an estimated cost of 48 for instruction: %shift
; AVX512F: Found an estimated cost of 48 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%insert = insertelement <64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <64 x i8> %insert, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = ashr <64 x i8> %a, %splat
@@ -369,10 +367,10 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v4i64':
; SSE2: Found an estimated cost of 24 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; AVX: Found an estimated cost of 26 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 15, i64 31>
ret <4 x i64> %shift
}
@@ -381,10 +379,10 @@ define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v8i64':
; SSE2: Found an estimated cost of 48 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 52 for instruction: %shift
; AVX2: Found an estimated cost of 8 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = ashr <8 x i64> %a, <i64 1, i64 7, i64 15, i64 31, i64 1, i64 7, i64 15, i64 31>
ret <8 x i64> %shift
}
@@ -406,10 +404,10 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v8i32':
; SSE2: Found an estimated cost of 32 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; AVX: Found an estimated cost of 26 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
ret <8 x i32> %shift
@@ -419,10 +417,10 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v16i32':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 52 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
ret <16 x i32> %shift
@@ -445,11 +443,11 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v16i16':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
+; AVX: Found an estimated cost of 30 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
; AVX512F: Found an estimated cost of 10 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <16 x i16> %shift
}
@@ -458,11 +456,11 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v32i16':
; SSE2: Found an estimated cost of 128 for instruction: %shift
; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
+; AVX: Found an estimated cost of 60 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <32 x i16> %shift
}
@@ -483,10 +481,10 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v32i8':
; SSE2: Found an estimated cost of 108 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 50 for instruction: %shift
; AVX2: Found an estimated cost of 24 for instruction: %shift
; AVX512: Found an estimated cost of 24 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -495,11 +493,11 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v64i8':
; SSE2: Found an estimated cost of 216 for instruction: %shift
; SSE41: Found an estimated cost of 96 for instruction: %shift
-; AVX: Found an estimated cost of 96 for instruction: %shift
+; AVX: Found an estimated cost of 100 for instruction: %shift
; AVX2: Found an estimated cost of 48 for instruction: %shift
; AVX512F: Found an estimated cost of 48 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = ashr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
}
@@ -524,10 +522,11 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v4i64':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
@@ -536,10 +535,11 @@ define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v8i64':
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
; AVX2: Found an estimated cost of 8 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
ret <8 x i64> %shift
}
@@ -560,10 +560,10 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v8i32':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
@@ -573,10 +573,10 @@ define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i32':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <16 x i32> %shift
@@ -598,10 +598,10 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i16':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
@@ -611,11 +611,11 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i16':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
@@ -628,7 +628,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) {
; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512: Found an estimated cost of 4 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %shift
}
@@ -637,10 +637,10 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i8':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512: Found an estimated cost of 4 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
@@ -650,11 +650,11 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v64i8':
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
; AVX2: Found an estimated cost of 8 for instruction: %shift
; AVX512F: Found an estimated cost of 8 for instruction: %shift
; AVX512BW: Found an estimated cost of 4 for instruction: %shift
-; XOPAVX: Found an estimated cost of 16 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
diff --git a/test/Analysis/CostModel/X86/vshift-lshr-cost.ll b/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
index 546b2bb50f26..6e890369d677 100644
--- a/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
@@ -34,10 +34,10 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v4i64':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <4 x i64> %a, %b
ret <4 x i64> %shift
@@ -47,10 +47,10 @@ define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v8i64':
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <8 x i64> %a, %b
ret <8 x i64> %shift
@@ -73,10 +73,10 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v8i32':
; SSE2: Found an estimated cost of 32 for instruction: %shift
; SSE41: Found an estimated cost of 22 for instruction: %shift
-; AVX: Found an estimated cost of 22 for instruction: %shift
+; AVX: Found an estimated cost of 24 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <8 x i32> %a, %b
ret <8 x i32> %shift
@@ -86,10 +86,10 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v16i32':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 44 for instruction: %shift
-; AVX: Found an estimated cost of 44 for instruction: %shift
+; AVX: Found an estimated cost of 48 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <16 x i32> %a, %b
ret <16 x i32> %shift
@@ -112,11 +112,11 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v16i16':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
+; AVX: Found an estimated cost of 30 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
; AVX512F: Found an estimated cost of 10 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = lshr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -125,11 +125,11 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v32i16':
; SSE2: Found an estimated cost of 128 for instruction: %shift
; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
+; AVX: Found an estimated cost of 60 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = lshr <32 x i16> %a, %b
ret <32 x i16> %shift
}
@@ -150,10 +150,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; AVX: Found an estimated cost of 26 for instruction: %shift
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = lshr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -162,11 +162,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 52 for instruction: %shift
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = lshr <64 x i8> %a, %b
ret <64 x i8> %shift
}
@@ -193,10 +193,10 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i64':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -208,10 +208,10 @@ define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i64':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
@@ -237,10 +237,10 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i32':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -252,10 +252,10 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i32':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <16 x i32> %insert, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -281,10 +281,10 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i16':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
@@ -296,11 +296,11 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i16':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <32 x i16> %insert, <32 x i16> undef, <32 x i32> zeroinitializer
@@ -326,10 +326,10 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; AVX: Found an estimated cost of 26 for instruction: %shift
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%insert = insertelement <32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i8> %a, %splat
@@ -340,11 +340,11 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 52 for instruction: %shift
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%insert = insertelement <64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <64 x i8> %insert, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = lshr <64 x i8> %a, %splat
@@ -372,10 +372,10 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v4i64':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 15, i64 31>
ret <4 x i64> %shift
@@ -385,10 +385,10 @@ define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v8i64':
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <8 x i64> %a, <i64 1, i64 7, i64 15, i64 31, i64 1, i64 7, i64 15, i64 31>
ret <8 x i64> %shift
@@ -411,10 +411,10 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v8i32':
; SSE2: Found an estimated cost of 32 for instruction: %shift
; SSE41: Found an estimated cost of 22 for instruction: %shift
-; AVX: Found an estimated cost of 22 for instruction: %shift
+; AVX: Found an estimated cost of 24 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
ret <8 x i32> %shift
@@ -424,10 +424,10 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v16i32':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 44 for instruction: %shift
-; AVX: Found an estimated cost of 44 for instruction: %shift
+; AVX: Found an estimated cost of 48 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
ret <16 x i32> %shift
@@ -450,11 +450,11 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v16i16':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
+; AVX: Found an estimated cost of 30 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
; AVX512F: Found an estimated cost of 10 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <16 x i16> %shift
}
@@ -463,11 +463,11 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v32i16':
; SSE2: Found an estimated cost of 128 for instruction: %shift
; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
+; AVX: Found an estimated cost of 60 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = lshr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <32 x i16> %shift
}
@@ -488,10 +488,10 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; AVX: Found an estimated cost of 26 for instruction: %shift
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 6 for instruction: %shift
%shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -500,11 +500,11 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; AVX: Found an estimated cost of 52 for instruction: %shift
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOP: Found an estimated cost of 12 for instruction: %shift
%shift = lshr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
}
@@ -529,10 +529,10 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v4i64':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
@@ -542,10 +542,10 @@ define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v8i64':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
ret <8 x i64> %shift
@@ -567,10 +567,10 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v8i32':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
@@ -580,10 +580,10 @@ define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i32':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <16 x i32> %shift
@@ -605,10 +605,10 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i16':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
@@ -618,11 +618,11 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i16':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
@@ -644,10 +644,10 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i8':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 6 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 6 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
@@ -657,11 +657,11 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v64i8':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 12 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512F: Found an estimated cost of 4 for instruction: %shift
; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 12 for instruction: %shift
; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = lshr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
diff --git a/test/Analysis/CostModel/X86/vshift-shl-cost.ll b/test/Analysis/CostModel/X86/vshift-shl-cost.ll
index 90356f5ce8be..5e604bb7983e 100644
--- a/test/Analysis/CostModel/X86/vshift-shl-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-shl-cost.ll
@@ -35,10 +35,10 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v4i64':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <4 x i64> %a, %b
ret <4 x i64> %shift
@@ -48,10 +48,10 @@ define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v8i64':
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <8 x i64> %a, %b
ret <8 x i64> %shift
@@ -74,10 +74,10 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v8i32':
; SSE2: Found an estimated cost of 20 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <8 x i32> %a, %b
ret <8 x i32> %shift
@@ -87,10 +87,10 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v16i32':
; SSE2: Found an estimated cost of 40 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <16 x i32> %a, %b
ret <16 x i32> %shift
@@ -113,11 +113,11 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v16i16':
; SSE2: Found an estimated cost of 64 for instruction: %shift
; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
+; AVX: Found an estimated cost of 30 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
; AVX512F: Found an estimated cost of 10 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = shl <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -126,11 +126,11 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v32i16':
; SSE2: Found an estimated cost of 128 for instruction: %shift
; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
+; AVX: Found an estimated cost of 60 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = shl <32 x i16> %a, %b
ret <32 x i16> %shift
}
@@ -151,10 +151,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 22 for instruction: %shift
-; AVX: Found an estimated cost of 22 for instruction: %shift
+; AVX: Found an estimated cost of 24 for instruction: %shift
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = shl <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -163,11 +163,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 44 for instruction: %shift
-; AVX: Found an estimated cost of 44 for instruction: %shift
+; AVX: Found an estimated cost of 48 for instruction: %shift
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = shl <64 x i8> %a, %b
ret <64 x i8> %shift
}
@@ -194,10 +194,10 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i64':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -209,10 +209,10 @@ define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i64':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
@@ -238,10 +238,10 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i32':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -253,10 +253,10 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i32':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <16 x i32> %insert, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -282,10 +282,10 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i16':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%insert = insertelement <16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
@@ -297,11 +297,11 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i16':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%insert = insertelement <32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <32 x i16> %insert, <32 x i16> undef, <32 x i32> zeroinitializer
@@ -327,10 +327,10 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 22 for instruction: %shift
-; AVX: Found an estimated cost of 22 for instruction: %shift
+; AVX: Found an estimated cost of 24 for instruction: %shift
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 4 for instruction: %shift
%insert = insertelement <32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i8> %a, %splat
@@ -341,11 +341,11 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 44 for instruction: %shift
-; AVX: Found an estimated cost of 44 for instruction: %shift
+; AVX: Found an estimated cost of 48 for instruction: %shift
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 8 for instruction: %shift
%insert = insertelement <64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <64 x i8> %insert, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = shl <64 x i8> %a, %splat
@@ -373,10 +373,10 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v4i64':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 10 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <4 x i64> %a, <i64 1, i64 7, i64 15, i64 31>
ret <4 x i64> %shift
@@ -386,10 +386,10 @@ define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v8i64':
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 20 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <8 x i64> %a, <i64 1, i64 7, i64 15, i64 31, i64 1, i64 7, i64 15, i64 31>
ret <8 x i64> %shift
@@ -415,7 +415,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) {
; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
ret <8 x i32> %shift
@@ -428,7 +428,7 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) {
; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
ret <16 x i32> %shift
@@ -453,7 +453,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) {
; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <16 x i16> %shift
@@ -467,7 +467,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <32 x i16> %shift
@@ -489,10 +489,10 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 22 for instruction: %shift
-; AVX: Found an estimated cost of 22 for instruction: %shift
+; AVX: Found an estimated cost of 24 for instruction: %shift
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -501,11 +501,11 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'constant_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 44 for instruction: %shift
-; AVX: Found an estimated cost of 44 for instruction: %shift
+; AVX: Found an estimated cost of 48 for instruction: %shift
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
}
@@ -531,10 +531,10 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v4i64':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
@@ -544,10 +544,10 @@ define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v8i64':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
ret <8 x i64> %shift
@@ -570,10 +570,10 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v8i32':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
@@ -583,10 +583,10 @@ define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i32':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <16 x i32> %shift
@@ -608,10 +608,10 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i16':
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
@@ -621,11 +621,11 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i16':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
@@ -638,7 +638,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) {
; AVX: Found an estimated cost of 2 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 2 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
%shift = shl <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %shift
}
@@ -647,7 +647,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i8':
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 6 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 2 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
@@ -660,7 +660,7 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v64i8':
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 12 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512F: Found an estimated cost of 4 for instruction: %shift
; AVX512BW: Found an estimated cost of 2 for instruction: %shift
@@ -761,7 +761,7 @@ define <16 x i16> @test6(<16 x i16> %a) {
; SSE41: Found an estimated cost of 2 for instruction: %shl
; AVX: Found an estimated cost of 4 for instruction: %shl
; AVX2: Found an estimated cost of 1 for instruction: %shl
-; XOPAVX: Found an estimated cost of 2 for instruction: %shl
+; XOPAVX: Found an estimated cost of 4 for instruction: %shl
; XOPAVX2: Found an estimated cost of 1 for instruction: %shl
@@ -778,7 +778,7 @@ define <8 x i32> @test7(<8 x i32> %a) {
; SSE41: Found an estimated cost of 2 for instruction: %shl
; AVX: Found an estimated cost of 4 for instruction: %shl
; AVX2: Found an estimated cost of 1 for instruction: %shl
-; XOPAVX: Found an estimated cost of 2 for instruction: %shl
+; XOPAVX: Found an estimated cost of 4 for instruction: %shl
; XOPAVX2: Found an estimated cost of 1 for instruction: %shl
@@ -794,9 +794,9 @@ define <4 x i64> @test8(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'test8':
; SSE2: Found an estimated cost of 8 for instruction: %shl
; SSE41: Found an estimated cost of 8 for instruction: %shl
-; AVX: Found an estimated cost of 8 for instruction: %shl
+; AVX: Found an estimated cost of 10 for instruction: %shl
; AVX2: Found an estimated cost of 1 for instruction: %shl
-; XOPAVX: Found an estimated cost of 2 for instruction: %shl
+; XOPAVX: Found an estimated cost of 4 for instruction: %shl
; XOPAVX2: Found an estimated cost of 1 for instruction: %shl
@@ -811,7 +811,7 @@ define <32 x i16> @test9(<32 x i16> %a) {
; SSE41: Found an estimated cost of 4 for instruction: %shl
; AVX: Found an estimated cost of 8 for instruction: %shl
; AVX2: Found an estimated cost of 2 for instruction: %shl
-; XOPAVX: Found an estimated cost of 4 for instruction: %shl
+; XOPAVX: Found an estimated cost of 8 for instruction: %shl
; XOPAVX2: Found an estimated cost of 2 for instruction: %shl
@@ -826,7 +826,7 @@ define <16 x i32> @test10(<16 x i32> %a) {
; SSE41: Found an estimated cost of 4 for instruction: %shl
; AVX: Found an estimated cost of 8 for instruction: %shl
; AVX2: Found an estimated cost of 2 for instruction: %shl
-; XOPAVX: Found an estimated cost of 4 for instruction: %shl
+; XOPAVX: Found an estimated cost of 8 for instruction: %shl
; XOPAVX2: Found an estimated cost of 2 for instruction: %shl
@@ -842,7 +842,7 @@ define <8 x i64> @test11(<8 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'test11':
; SSE2: Found an estimated cost of 16 for instruction: %shl
; SSE41: Found an estimated cost of 16 for instruction: %shl
-; AVX: Found an estimated cost of 16 for instruction: %shl
+; AVX: Found an estimated cost of 20 for instruction: %shl
; AVX2: Found an estimated cost of 2 for instruction: %shl
-; XOPAVX: Found an estimated cost of 4 for instruction: %shl
+; XOPAVX: Found an estimated cost of 8 for instruction: %shl
; XOPAVX2: Found an estimated cost of 2 for instruction: %shl
diff --git a/test/Analysis/ScalarEvolution/different-loops-recs.ll b/test/Analysis/ScalarEvolution/different-loops-recs.ll
new file mode 100644
index 000000000000..ad3d1e0bd110
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/different-loops-recs.ll
@@ -0,0 +1,454 @@
+; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s
+
+; This test set ensures that we can correctly operate with recurrencies from
+; different loops.
+
+; Check that we can evaluate a sum of phis from two different loops in any
+; order.
+
+define void @test_00() {
+
+; CHECK-LABEL: Classifying expressions for: @test_00
+; CHECK: %sum1 = add i32 %phi1, %phi2
+; CHECK-NEXT: --> {14,+,3}<%loop1>
+; CHECK: %sum2 = add i32 %sum1, %phi3
+; CHECK-NEXT: --> {20,+,6}<%loop1>
+; CHECK: %sum3 = add i32 %phi4, %phi5
+; CHECK-NEXT: --> {116,+,3}<%loop2>
+; CHECK: %sum4 = add i32 %sum3, %phi6
+; CHECK-NEXT: --> {159,+,6}<%loop2>
+; CHECK: %s1 = add i32 %phi1, %phi4
+; CHECK-NEXT: --> {{{{}}73,+,1}<%loop1>,+,1}<%loop2>
+; CHECK: %s2 = add i32 %phi5, %phi2
+; CHECK-NEXT: --> {{{{}}57,+,2}<%loop1>,+,2}<%loop2>
+; CHECK: %s3 = add i32 %sum1, %sum3
+; CHECK-NEXT: --> {{{{}}130,+,3}<%loop1>,+,3}<%loop2>
+; CHECK: %s4 = add i32 %sum4, %sum2
+; CHECK-NEXT: --> {{{{}}179,+,6}<%loop1>,+,6}<%loop2>
+; CHECK: %s5 = add i32 %phi3, %sum3
+; CHECK-NEXT: --> {{{{}}122,+,3}<%loop1>,+,3}<%loop2>
+; CHECK: %s6 = add i32 %sum2, %phi6
+; CHECK-NEXT: --> {{{{}}63,+,6}<%loop1>,+,3}<%loop2>
+
+entry:
+ br label %loop1
+
+loop1:
+ %phi1 = phi i32 [ 10, %entry ], [ %phi1.inc, %loop1 ]
+ %phi2 = phi i32 [ 4, %entry ], [ %phi2.inc, %loop1 ]
+ %phi3 = phi i32 [ 6, %entry ], [ %phi3.inc, %loop1 ]
+ %phi1.inc = add i32 %phi1, 1
+ %phi2.inc = add i32 %phi2, 2
+ %phi3.inc = add i32 %phi3, 3
+ %sum1 = add i32 %phi1, %phi2
+ %sum2 = add i32 %sum1, %phi3
+ %cond1 = icmp ult i32 %sum2, 1000
+ br i1 %cond1, label %loop1, label %loop2
+
+loop2:
+ %phi4 = phi i32 [ 63, %loop1 ], [ %phi4.inc, %loop2 ]
+ %phi5 = phi i32 [ 53, %loop1 ], [ %phi5.inc, %loop2 ]
+ %phi6 = phi i32 [ 43, %loop1 ], [ %phi6.inc, %loop2 ]
+ %phi4.inc = add i32 %phi4, 1
+ %phi5.inc = add i32 %phi5, 2
+ %phi6.inc = add i32 %phi6, 3
+ %sum3 = add i32 %phi4, %phi5
+ %sum4 = add i32 %sum3, %phi6
+ %cond2 = icmp ult i32 %sum4, 1000
+ br i1 %cond2, label %loop2, label %exit
+
+exit:
+ %s1 = add i32 %phi1, %phi4
+ %s2 = add i32 %phi5, %phi2
+ %s3 = add i32 %sum1, %sum3
+ %s4 = add i32 %sum4, %sum2
+ %s5 = add i32 %phi3, %sum3
+ %s6 = add i32 %sum2, %phi6
+ ret void
+}
+
+; Check that we can evaluate a sum of phis+invariants from two different loops
+; in any order.
+
+define void @test_01(i32 %a, i32 %b) {
+
+; CHECK-LABEL: Classifying expressions for: @test_01
+; CHECK: %sum1 = add i32 %phi1, %phi2
+; CHECK-NEXT: --> {(%a + %b),+,3}<%loop1>
+; CHECK: %sum2 = add i32 %sum1, %phi3
+; CHECK-NEXT: --> {(6 + %a + %b),+,6}<%loop1>
+; CHECK: %is1 = add i32 %sum2, %a
+; CHECK-NEXT: --> {(6 + (2 * %a) + %b),+,6}<%loop1>
+; CHECK: %sum3 = add i32 %phi4, %phi5
+; CHECK-NEXT: --> {116,+,3}<%loop2>
+; CHECK: %sum4 = add i32 %sum3, %phi6
+; CHECK-NEXT: --> {159,+,6}<%loop2>
+; CHECK: %is2 = add i32 %sum4, %b
+; CHECK-NEXT: --> {(159 + %b),+,6}<%loop2>
+; CHECK: %ec2 = add i32 %is1, %is2
+; CHECK-NEXT: --> {{{{}}(165 + (2 * %a) + (2 * %b)),+,6}<%loop1>,+,6}<%loop2>
+; CHECK: %s1 = add i32 %phi1, %is1
+; CHECK-NEXT: --> {(6 + (3 * %a) + %b),+,7}<%loop1>
+; CHECK: %s2 = add i32 %is2, %phi4
+; CHECK-NEXT: --> {(222 + %b),+,7}<%loop2>
+; CHECK: %s3 = add i32 %is1, %phi5
+; CHECK-NEXT: --> {{{{}}(59 + (2 * %a) + %b),+,6}<%loop1>,+,2}<%loop2>
+; CHECK: %s4 = add i32 %phi2, %is2
+; CHECK-NEXT: --> {{{{}}(159 + (2 * %b)),+,2}<%loop1>,+,6}<%loop2>
+; CHECK: %s5 = add i32 %is1, %is2
+; CHECK-NEXT: --> {{{{}}(165 + (2 * %a) + (2 * %b)),+,6}<%loop1>,+,6}<%loop2>
+; CHECK: %s6 = add i32 %is2, %is1
+; CHECK-NEXT: --> {{{{}}(165 + (2 * %a) + (2 * %b)),+,6}<%loop1>,+,6}<%loop2>
+
+entry:
+ br label %loop1
+
+loop1:
+ %phi1 = phi i32 [ %a, %entry ], [ %phi1.inc, %loop1 ]
+ %phi2 = phi i32 [ %b, %entry ], [ %phi2.inc, %loop1 ]
+ %phi3 = phi i32 [ 6, %entry ], [ %phi3.inc, %loop1 ]
+ %phi1.inc = add i32 %phi1, 1
+ %phi2.inc = add i32 %phi2, 2
+ %phi3.inc = add i32 %phi3, 3
+ %sum1 = add i32 %phi1, %phi2
+ %sum2 = add i32 %sum1, %phi3
+ %is1 = add i32 %sum2, %a
+ %cond1 = icmp ult i32 %is1, 1000
+ br i1 %cond1, label %loop1, label %loop2
+
+loop2:
+ %phi4 = phi i32 [ 63, %loop1 ], [ %phi4.inc, %loop2 ]
+ %phi5 = phi i32 [ 53, %loop1 ], [ %phi5.inc, %loop2 ]
+ %phi6 = phi i32 [ 43, %loop1 ], [ %phi6.inc, %loop2 ]
+ %phi4.inc = add i32 %phi4, 1
+ %phi5.inc = add i32 %phi5, 2
+ %phi6.inc = add i32 %phi6, 3
+ %sum3 = add i32 %phi4, %phi5
+ %sum4 = add i32 %sum3, %phi6
+ %is2 = add i32 %sum4, %b
+ %ec2 = add i32 %is1, %is2
+ %cond2 = icmp ult i32 %ec2, 1000
+ br i1 %cond2, label %loop2, label %exit
+
+exit:
+ %s1 = add i32 %phi1, %is1
+ %s2 = add i32 %is2, %phi4
+ %s3 = add i32 %is1, %phi5
+ %s4 = add i32 %phi2, %is2
+ %s5 = add i32 %is1, %is2
+ %s6 = add i32 %is2, %is1
+ ret void
+}
+
+; Check that we can correctly evaluate a sum of phis+variants from two different
+; loops in any order.
+
+define void @test_02(i32 %a, i32 %b, i32* %p) {
+
+; CHECK-LABEL: Classifying expressions for: @test_02
+; CHECK: %sum1 = add i32 %phi1, %phi2
+; CHECK-NEXT: --> {(%a + %b),+,3}<%loop1>
+; CHECK: %sum2 = add i32 %sum1, %phi3
+; CHECK-NEXT: --> {(6 + %a + %b),+,6}<%loop1>
+; CHECK: %is1 = add i32 %sum2, %v1
+; CHECK-NEXT: --> ({(6 + %a + %b),+,6}<%loop1> + %v1)
+; CHECK: %sum3 = add i32 %phi4, %phi5
+; CHECK-NEXT: --> {(%a + %b),+,3}<%loop2>
+; CHECK: %sum4 = add i32 %sum3, %phi6
+; CHECK-NEXT: --> {(43 + %a + %b),+,6}<%loop2>
+; CHECK: %is2 = add i32 %sum4, %v2
+; CHECK-NEXT: --> ({(43 + %a + %b),+,6}<%loop2> + %v2)
+; CHECK: %is3 = add i32 %v1, %sum2
+; CHECK-NEXT: --> ({(6 + %a + %b),+,6}<%loop1> + %v1)
+; CHECK: %ec2 = add i32 %is1, %is3
+; CHECK-NEXT: --> (2 * ({(6 + %a + %b),+,6}<%loop1> + %v1))
+; CHECK: %s1 = add i32 %phi1, %is1
+; CHECK-NEXT: --> ({(6 + (2 * %a) + %b),+,7}<%loop1> + %v1)
+; CHECK: %s2 = add i32 %is2, %phi4
+; CHECK-NEXT: --> ({(43 + (2 * %a) + %b),+,7}<%loop2> + %v2)
+; CHECK: %s3 = add i32 %is1, %phi5
+; CHECK-NEXT: --> {({(6 + (2 * %b) + %a),+,6}<%loop1> + %v1),+,2}<%loop2>
+; CHECK: %s4 = add i32 %phi2, %is2
+; CHECK-NEXT: --> ({{{{}}(43 + (2 * %b) + %a),+,2}<%loop1>,+,6}<%loop2> + %v2)
+; CHECK: %s5 = add i32 %is1, %is2
+; CHECK-NEXT: --> ({({(49 + (2 * %a) + (2 * %b)),+,6}<%loop1> + %v1),+,6}<%loop2> + %v2)
+; CHECK: %s6 = add i32 %is2, %is1
+; CHECK-NEXT: --> ({({(49 + (2 * %a) + (2 * %b)),+,6}<%loop1> + %v1),+,6}<%loop2> + %v2)
+
+entry:
+ br label %loop1
+
+loop1:
+ %phi1 = phi i32 [ %a, %entry ], [ %phi1.inc, %loop1 ]
+ %phi2 = phi i32 [ %b, %entry ], [ %phi2.inc, %loop1 ]
+ %phi3 = phi i32 [ 6, %entry ], [ %phi3.inc, %loop1 ]
+ %phi1.inc = add i32 %phi1, 1
+ %phi2.inc = add i32 %phi2, 2
+ %phi3.inc = add i32 %phi3, 3
+ %v1 = load i32, i32* %p
+ %sum1 = add i32 %phi1, %phi2
+ %sum2 = add i32 %sum1, %phi3
+ %is1 = add i32 %sum2, %v1
+ %cond1 = icmp ult i32 %is1, 1000
+ br i1 %cond1, label %loop1, label %loop2
+
+loop2:
+ %phi4 = phi i32 [ %a, %loop1 ], [ %phi4.inc, %loop2 ]
+ %phi5 = phi i32 [ %b, %loop1 ], [ %phi5.inc, %loop2 ]
+ %phi6 = phi i32 [ 43, %loop1 ], [ %phi6.inc, %loop2 ]
+ %phi4.inc = add i32 %phi4, 1
+ %phi5.inc = add i32 %phi5, 2
+ %phi6.inc = add i32 %phi6, 3
+ %v2 = load i32, i32* %p
+ %sum3 = add i32 %phi4, %phi5
+ %sum4 = add i32 %sum3, %phi6
+ %is2 = add i32 %sum4, %v2
+ %is3 = add i32 %v1, %sum2
+ %ec2 = add i32 %is1, %is3
+ %cond2 = icmp ult i32 %ec2, 1000
+ br i1 %cond2, label %loop2, label %exit
+
+exit:
+ %s1 = add i32 %phi1, %is1
+ %s2 = add i32 %is2, %phi4
+ %s3 = add i32 %is1, %phi5
+ %s4 = add i32 %phi2, %is2
+ %s5 = add i32 %is1, %is2
+ %s6 = add i32 %is2, %is1
+ ret void
+}
+
+; Mix of previous use cases that demonstrates %s3 can be incorrectly treated as
+; a recurrence of loop1 because of operands order if we pick recurrencies in an
+; incorrect order.
+
+define void @test_03(i32 %a, i32 %b, i32 %c, i32* %p) {
+
+; CHECK-LABEL: Classifying expressions for: @test_03
+; CHECK: %v1 = load i32, i32* %p
+; CHECK-NEXT: --> %v1
+; CHECK: %s1 = add i32 %phi1, %v1
+; CHECK-NEXT: --> {(%a + %v1),+,1}<%loop1>
+; CHECK: %s2 = add i32 %s1, %b
+; CHECK-NEXT: --> {(%a + %b + %v1),+,1}<%loop1>
+; CHECK: %s3 = add i32 %s2, %phi2
+; CHECK-NEXT: --> ({{{{}}((2 * %a) + %b),+,1}<%loop1>,+,2}<%loop2> + %v1)
+
+entry:
+ br label %loop1
+
+loop1:
+ %phi1 = phi i32 [ %a, %entry ], [ %phi1.inc, %loop1 ]
+ %phi1.inc = add i32 %phi1, 1
+ %cond1 = icmp ult i32 %phi1, %c
+ br i1 %cond1, label %loop1, label %loop2
+
+loop2:
+ %phi2 = phi i32 [ %a, %loop1 ], [ %phi2.inc, %loop2 ]
+ %phi2.inc = add i32 %phi2, 2
+ %v1 = load i32, i32* %p
+ %s1 = add i32 %phi1, %v1
+ %s2 = add i32 %s1, %b
+ %s3 = add i32 %s2, %phi2
+ %cond2 = icmp ult i32 %s3, %c
+ br i1 %cond2, label %loop2, label %exit
+
+exit:
+
+ ret void
+}
+
+; Another mix of previous use cases that demonstrates that incorrect picking of
+; a loop for a recurrence may cause a crash of SCEV analysis.
+define void @test_04() {
+
+; CHECK-LABEL: Classifying expressions for: @test_04
+; CHECK: %tmp = phi i64 [ 2, %bb ], [ %tmp4, %bb3 ]
+; CHECK-NEXT: --> {2,+,1}<nuw><nsw><%loop1>
+; CHECK: %tmp2 = trunc i64 %tmp to i32
+; CHECK-NEXT: --> {2,+,1}<%loop1>
+; CHECK: %tmp4 = add nuw nsw i64 %tmp, 1
+; CHECK-NEXT: --> {3,+,1}<nuw><%loop1>
+; CHECK: %tmp7 = phi i64 [ %tmp15, %loop2 ], [ 2, %loop1 ]
+; CHECK-NEXT: --> {2,+,1}<nuw><nsw><%loop2>
+; CHECK: %tmp10 = sub i64 %tmp9, %tmp7
+; CHECK-NEXT: --> ((sext i8 %tmp8 to i64) + {-2,+,-1}<nw><%loop2>)
+; CHECK: %tmp11 = add i64 %tmp10, undef
+; CHECK-NEXT: --> ((sext i8 %tmp8 to i64) + {(-2 + undef),+,-1}<nw><%loop2>)
+; CHECK: %tmp13 = trunc i64 %tmp11 to i32
+; CHECK-NEXT: --> ((sext i8 %tmp8 to i32) + {(trunc i64 (-2 + undef) to i32),+,-1}<%loop2>)
+; CHECK: %tmp14 = sub i32 %tmp13, %tmp2
+; CHECK-NEXT: --> ((sext i8 %tmp8 to i32) + {{{{}}(-2 + (trunc i64 (-2 + undef) to i32)),+,-1}<%loop1>,+,-1}<%loop2>)
+; CHECK: %tmp15 = add nuw nsw i64 %tmp7, 1
+; CHECK-NEXT: --> {3,+,1}<nuw><nsw><%loop2>
+
+bb:
+ br label %loop1
+
+loop1:
+ %tmp = phi i64 [ 2, %bb ], [ %tmp4, %bb3 ]
+ %tmp2 = trunc i64 %tmp to i32
+ br i1 undef, label %loop2, label %bb3
+
+bb3:
+ %tmp4 = add nuw nsw i64 %tmp, 1
+ br label %loop1
+
+bb5:
+ ret void
+
+loop2:
+ %tmp7 = phi i64 [ %tmp15, %loop2 ], [ 2, %loop1 ]
+ %tmp8 = load i8, i8 addrspace(1)* undef, align 1
+ %tmp9 = sext i8 %tmp8 to i64
+ %tmp10 = sub i64 %tmp9, %tmp7
+ %tmp11 = add i64 %tmp10, undef
+ %tmp13 = trunc i64 %tmp11 to i32
+ %tmp14 = sub i32 %tmp13, %tmp2
+ %tmp15 = add nuw nsw i64 %tmp7, 1
+ %tmp16 = icmp slt i64 %tmp15, %tmp
+ br i1 %tmp16, label %loop2, label %bb5
+}
+
+@A = weak global [1000 x i32] zeroinitializer, align 32
+
+; Demonstrate a situation when we can add two recs with different degrees from
+; the same loop.
+define void @test_05(i32 %N) {
+
+; CHECK-LABEL: Classifying expressions for: @test_05
+; CHECK: %SQ = mul i32 %i.0, %i.0
+; CHECK-NEXT: --> {4,+,5,+,2}<%bb3>
+; CHECK: %tmp4 = mul i32 %i.0, 2
+; CHECK-NEXT: --> {4,+,2}<%bb3>
+; CHECK: %tmp5 = sub i32 %SQ, %tmp4
+; CHECK-NEXT: --> {0,+,3,+,2}<%bb3>
+
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ br label %bb3
+
+bb: ; preds = %bb3
+ %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ store i32 123, i32* %tmp
+ %tmp2 = add i32 %i.0, 1 ; <i32> [#uses=1]
+ br label %bb3
+
+bb3: ; preds = %bb, %entry
+ %i.0 = phi i32 [ 2, %entry ], [ %tmp2, %bb ] ; <i32> [#uses=3]
+ %SQ = mul i32 %i.0, %i.0
+ %tmp4 = mul i32 %i.0, 2
+ %tmp5 = sub i32 %SQ, %tmp4
+ %tmp3 = icmp sle i32 %tmp5, 9999 ; <i1> [#uses=1]
+ br i1 %tmp3, label %bb, label %bb5
+
+bb5: ; preds = %bb3
+ br label %return
+
+return: ; preds = %bb5
+ ret void
+}
+
+; Check that we can add Phis from different loops with different nesting, nested
+; loop comes first.
+define void @test_06() {
+
+; CHECK-LABEL: Classifying expressions for: @test_06
+; CHECK: %s1 = add i32 %phi1, %phi2
+; CHECK-NEXT: --> {{{{}}30,+,1}<%loop1>,+,2}<%loop2>
+; CHECK: %s2 = add i32 %phi2, %phi1
+; CHECK-NEXT: --> {{{{}}30,+,1}<%loop1>,+,2}<%loop2>
+; CHECK: %s3 = add i32 %phi1, %phi3
+; CHECK-NEXT: --> {{{{}}40,+,1}<%loop1>,+,3}<%loop3>
+; CHECK: %s4 = add i32 %phi3, %phi1
+; CHECK-NEXT: --> {{{{}}40,+,1}<%loop1>,+,3}<%loop3>
+; CHECK: %s5 = add i32 %phi2, %phi3
+; CHECK-NEXT: --> {{{{}}50,+,2}<%loop2>,+,3}<%loop3>
+; CHECK: %s6 = add i32 %phi3, %phi2
+; CHECK-NEXT: --> {{{{}}50,+,2}<%loop2>,+,3}<%loop3>
+
+entry:
+ br label %loop1
+
+loop1:
+ %phi1 = phi i32 [ 10, %entry ], [ %phi1.inc, %loop1.exit ]
+ br label %loop2
+
+loop2:
+ %phi2 = phi i32 [ 20, %loop1 ], [ %phi2.inc, %loop2 ]
+ %phi2.inc = add i32 %phi2, 2
+ %cond2 = icmp ult i32 %phi2.inc, 1000
+ br i1 %cond2, label %loop2, label %loop1.exit
+
+loop1.exit:
+ %phi1.inc = add i32 %phi1, 1
+ %cond1 = icmp ult i32 %phi1.inc, 1000
+ br i1 %cond1, label %loop1, label %loop3
+
+loop3:
+ %phi3 = phi i32 [ 30, %loop1.exit ], [ %phi3.inc, %loop3 ]
+ %phi3.inc = add i32 %phi3, 3
+ %cond3 = icmp ult i32 %phi3.inc, 1000
+ br i1 %cond3, label %loop3, label %exit
+
+exit:
+ %s1 = add i32 %phi1, %phi2
+ %s2 = add i32 %phi2, %phi1
+ %s3 = add i32 %phi1, %phi3
+ %s4 = add i32 %phi3, %phi1
+ %s5 = add i32 %phi2, %phi3
+ %s6 = add i32 %phi3, %phi2
+ ret void
+}
+
+; Check that we can add Phis from different loops with different nesting, nested
+; loop comes second.
+define void @test_07() {
+
+; CHECK-LABEL: Classifying expressions for: @test_07
+; CHECK: %s1 = add i32 %phi1, %phi2
+; CHECK-NEXT: --> {{{{}}30,+,1}<%loop1>,+,2}<%loop2>
+; CHECK: %s2 = add i32 %phi2, %phi1
+; CHECK-NEXT: --> {{{{}}30,+,1}<%loop1>,+,2}<%loop2>
+; CHECK: %s3 = add i32 %phi1, %phi3
+; CHECK-NEXT: --> {{{{}}40,+,3}<%loop3>,+,1}<%loop1>
+; CHECK: %s4 = add i32 %phi3, %phi1
+; CHECK-NEXT: --> {{{{}}40,+,3}<%loop3>,+,1}<%loop1>
+; CHECK: %s5 = add i32 %phi2, %phi3
+; CHECK-NEXT: --> {{{{}}50,+,3}<%loop3>,+,2}<%loop2>
+; CHECK: %s6 = add i32 %phi3, %phi2
+; CHECK-NEXT: --> {{{{}}50,+,3}<%loop3>,+,2}<%loop2>
+
+entry:
+ br label %loop3
+
+loop3:
+ %phi3 = phi i32 [ 30, %entry ], [ %phi3.inc, %loop3 ]
+ %phi3.inc = add i32 %phi3, 3
+ %cond3 = icmp ult i32 %phi3.inc, 1000
+ br i1 %cond3, label %loop3, label %loop1
+
+loop1:
+ %phi1 = phi i32 [ 10, %loop3 ], [ %phi1.inc, %loop1.exit ]
+ br label %loop2
+
+loop2:
+ %phi2 = phi i32 [ 20, %loop1 ], [ %phi2.inc, %loop2 ]
+ %phi2.inc = add i32 %phi2, 2
+ %cond2 = icmp ult i32 %phi2.inc, 1000
+ br i1 %cond2, label %loop2, label %loop1.exit
+
+loop1.exit:
+ %phi1.inc = add i32 %phi1, 1
+ %cond1 = icmp ult i32 %phi1.inc, 1000
+ br i1 %cond1, label %exit, label %loop1
+
+exit:
+ %s1 = add i32 %phi1, %phi2
+ %s2 = add i32 %phi2, %phi1
+ %s3 = add i32 %phi1, %phi3
+ %s4 = add i32 %phi3, %phi1
+ %s5 = add i32 %phi2, %phi3
+ %s6 = add i32 %phi3, %phi2
+ ret void
+}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/intrinsics.ll b/test/Analysis/TypeBasedAliasAnalysis/intrinsics.ll
index eab314eaa9c2..655d4558a5e1 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/intrinsics.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/intrinsics.ll
@@ -5,22 +5,22 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
; TBAA should prove that these calls don't interfere, since they are
; IntrArgReadMem and have TBAA metadata.
-; CHECK: define <8 x i16> @test0(i8* %p, i8* %q, <8 x i16> %y) {
+; CHECK: define <8 x i16> @test0(<8 x i16>* %p, <8 x i16>* %q, <8 x i16> %y, <8 x i1> %m, <8 x i16> %pt) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[NUW:#[0-9]+]]
-; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK-NEXT: %a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %p, i32 16, <8 x i1> %m, <8 x i16> %pt) [[NUW:#[0-9]+]]
+; CHECK-NEXT: call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %y, <8 x i16>* %q, i32 16, <8 x i1> %m)
; CHECK-NEXT: %c = add <8 x i16> %a, %a
-define <8 x i16> @test0(i8* %p, i8* %q, <8 x i16> %y) {
+define <8 x i16> @test0(<8 x i16>* %p, <8 x i16>* %q, <8 x i16> %y, <8 x i1> %m, <8 x i16> %pt) {
entry:
- %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind, !tbaa !2
- call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16), !tbaa !1
- %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind, !tbaa !2
+ %a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %p, i32 16, <8 x i1> %m, <8 x i16> %pt) nounwind, !tbaa !2
+ call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %y, <8 x i16>* %q, i32 16, <8 x i1> %m), !tbaa !1
+ %b = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %p, i32 16, <8 x i1> %m, <8 x i16> %pt) nounwind, !tbaa !2
%c = add <8 x i16> %a, %b
ret <8 x i16> %c
}
-declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
-declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
+declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) nounwind readonly
+declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>) nounwind
; CHECK: attributes #0 = { argmemonly nounwind readonly }
; CHECK: attributes #1 = { argmemonly nounwind }
diff --git a/test/Assembler/globalvariable-attributes.ll b/test/Assembler/globalvariable-attributes.ll
new file mode 100644
index 000000000000..64227a451c25
--- /dev/null
+++ b/test/Assembler/globalvariable-attributes.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
+
+@g1 = global i32 7 "key" = "value" "key2" = "value2"
+@g2 = global i32 2, align 4 "key3" = "value3"
+@g3 = global i32 2 #0
+@g4 = global i32 2, align 4 "key5" = "value5" #0
+
+attributes #0 = { "string" = "value" nobuiltin norecurse }
+
+; CHECK: @g1 = global i32 7 #0
+; CHECK: @g2 = global i32 2, align 4 #1
+; CHECK: @g3 = global i32 2 #2
+; CHECK: @g4 = global i32 2, align 4 #3
+
+; CHECK: attributes #0 = { "key"="value" "key2"="value2" }
+; CHECK: attributes #1 = { "key3"="value3" }
+; CHECK: attributes #2 = { nobuiltin norecurse "string"="value" }
+; CHECK: attributes #3 = { nobuiltin norecurse "key5"="value5" "string"="value" }
+
diff --git a/test/Bitcode/globalvariable-attributes.ll b/test/Bitcode/globalvariable-attributes.ll
new file mode 100644
index 000000000000..cbab3b71e58a
--- /dev/null
+++ b/test/Bitcode/globalvariable-attributes.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+@g1 = global i32 7 "key" = "value" "key2" = "value2"
+@g2 = global i32 2, align 4 "key3" = "value3"
+@g3 = global i32 2 #0
+@g4 = global i32 2, align 4 "key5" = "value5" #0
+
+attributes #0 = { "string" = "value" nobuiltin norecurse }
+
+; CHECK: @g1 = global i32 7 #0
+; CHECK: @g2 = global i32 2, align 4 #1
+; CHECK: @g3 = global i32 2 #2
+; CHECK: @g4 = global i32 2, align 4 #3
+
+; CHECK: attributes #0 = { "key"="value" "key2"="value2" }
+; CHECK: attributes #1 = { "key3"="value3" }
+; CHECK: attributes #2 = { nobuiltin norecurse "string"="value" }
+; CHECK: attributes #3 = { nobuiltin norecurse "key5"="value5" "string"="value" }
+
diff --git a/test/Bitcode/ptest-old.ll b/test/Bitcode/ptest-old.ll
index c1e1cae37368..53ffef900b57 100644
--- a/test/Bitcode/ptest-old.ll
+++ b/test/Bitcode/ptest-old.ll
@@ -1,5 +1,6 @@
; RUN: llvm-as < %s | llvm-dis | FileCheck %s
; RUN: verify-uselistorder < %s
+; REQUIRES: x86
define i32 @foo(<4 x float> %bar) nounwind {
entry:
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
index 982bb5cb7e53..b64d5bd52bfc 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
@@ -29,7 +29,7 @@
; CHECK-NEXT: <VERSION
; CHECK-NEXT: <VALUE_GUID op0=25 op1=123/>
; op4=hot1 op6=cold op8=hot2 op10=hot4 op12=none1 op14=hot3 op16=none2 op18=none3 op20=123
-; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=3 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=3/>
+; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=1 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=3/>
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
new file mode 100644
index 000000000000..875f397646a6
--- /dev/null
+++ b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
@@ -0,0 +1,121 @@
+; Test to check the callgraph in summary when there is PGO
+; RUN: opt -module-summary %s -o %t.o
+; RUN: llvm-bcanalyzer -dump %t.o | FileCheck %s
+; RUN: opt -module-summary %p/Inputs/thinlto-function-summary-callgraph-profile-summary.ll -o %t2.o
+; RUN: llvm-lto -thinlto -o %t3 %t.o %t2.o
+; RUN: llvm-bcanalyzer -dump %t3.thinlto.bc | FileCheck %s --check-prefix=COMBINED
+
+
+; CHECK: <SOURCE_FILENAME
+; "hot_function"
+; CHECK-NEXT: <FUNCTION op0=0 op1=12
+; "hot1"
+; CHECK-NEXT: <FUNCTION op0=12 op1=4
+; "hot2"
+; CHECK-NEXT: <FUNCTION op0=16 op1=4
+; "hot3"
+; CHECK-NEXT: <FUNCTION op0=20 op1=4
+; "hot4"
+; CHECK-NEXT: <FUNCTION op0=24 op1=4
+; "cold"
+; CHECK-NEXT: <FUNCTION op0=28 op1=4
+; "none1"
+; CHECK-NEXT: <FUNCTION op0=32 op1=5
+; "none2"
+; CHECK-NEXT: <FUNCTION op0=37 op1=5
+; "none3"
+; CHECK-NEXT: <FUNCTION op0=42 op1=5
+; CHECK-LABEL: <GLOBALVAL_SUMMARY_BLOCK
+; CHECK-NEXT: <VERSION
+; CHECK-NEXT: <VALUE_GUID op0=25 op1=123/>
+; op4=hot1 op6=cold op8=hot2 op10=hot4 op12=none1 op14=hot3 op16=none2 op18=none3 op20=123
+; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=3 op6=5 op7=1 op8=2 op9=3 op10=4 op11=3 op12=6 op13=2 op14=3 op15=3 op16=7 op17=2 op18=8 op19=2 op20=25 op21=3/>
+; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
+
+; CHECK: <STRTAB_BLOCK
+; CHECK-NEXT: blob data = 'hot_functionhot1hot2hot3hot4coldnone1none2none3'
+
+; COMBINED: <GLOBALVAL_SUMMARY_BLOCK
+; COMBINED-NEXT: <VERSION
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <VALUE_GUID
+; COMBINED-NEXT: <COMBINED abbrevid=
+; COMBINED-NEXT: <COMBINED abbrevid=
+; COMBINED-NEXT: <COMBINED abbrevid=
+; COMBINED-NEXT: <COMBINED abbrevid=
+; COMBINED-NEXT: <COMBINED abbrevid=
+; COMBINED-NEXT: <COMBINED abbrevid=
+; COMBINED-NEXT: <COMBINED_PROFILE {{.*}} op5=[[HOT1:.*]] op6=3 op7=[[COLD:.*]] op8=1 op9=[[HOT2:.*]] op10=3 op11=[[NONE1:.*]] op12=2 op13=[[HOT3:.*]] op14=3 op15=[[NONE2:.*]] op16=2 op17=[[NONE3:.*]] op18=2/>
+; COMBINED_NEXT: <COMBINED abbrevid=
+; COMBINED_NEXT: </GLOBALVAL_SUMMARY_BLOCK>
+
+
+; ModuleID = 'thinlto-function-summary-callgraph.ll'
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; This function have high profile count, so entry block is hot.
+define void @hot_function(i1 %a, i1 %a2) !prof !20 {
+entry:
+ call void @hot1()
+ br i1 %a, label %Cold, label %Hot, !prof !41
+Cold: ; 1/1000 goes here
+ call void @cold()
+ call void @hot2()
+ call void @hot4(), !prof !15
+ call void @none1()
+ br label %exit
+Hot: ; 999/1000 goes here
+ call void @hot2()
+ call void @hot3()
+ br i1 %a2, label %None1, label %None2, !prof !42
+None1: ; half goes here
+ call void @none1()
+ call void @none2()
+ br label %exit
+None2: ; half goes here
+ call void @none3()
+ br label %exit
+exit:
+ ret void
+}
+
+declare void @hot1() #1
+declare void @hot2() #1
+declare void @hot3() #1
+declare void @hot4() #1
+declare void @cold() #1
+declare void @none1() #1
+declare void @none2() #1
+declare void @none3() #1
+
+
+!41 = !{!"branch_weights", i32 1, i32 1000}
+!42 = !{!"branch_weights", i32 1, i32 1}
+
+
+
+!llvm.module.flags = !{!1}
+!20 = !{!"function_entry_count", i64 110, i64 123}
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"SampleProfile"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 10}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+!15 = !{!"branch_weights", i32 100}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir b/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
index 739fdd5cb4c5..0f054f1d940c 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
@@ -74,6 +74,21 @@
%res = bitcast <2 x i32> %vres to i64
ret i64 %res
}
+
+ define i64 @floatingPointLoad(i64 %arg1, double* %addr) {
+ %varg1 = bitcast i64 %arg1 to double
+ %varg2 = load double, double* %addr
+ %vres = fadd double %varg1, %varg2
+ %res = bitcast double %vres to i64
+ ret i64 %res
+ }
+
+ define void @floatingPointStore(i64 %arg1, double* %addr) {
+ %varg1 = bitcast i64 %arg1 to double
+ %vres = fadd double %varg1, %varg1
+ store double %vres, double* %addr
+ ret void
+ }
...
---
@@ -650,3 +665,84 @@ body: |
RET_ReallyLR implicit %x0
...
+
+---
+# Make sure we map what looks like floating point
+# loads to floating point register bank.
+# CHECK-LABEL: name: floatingPointLoad
+name: floatingPointLoad
+legalized: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: fpr }
+# CHECK-NEXT: - { id: 3, class: fpr }
+# CHECK-NEXT: - { id: 4, class: fpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+
+# No repairing should be necessary for both modes.
+# CHECK: %0(s64) = COPY %x0
+# CHECK-NEXT: %1(p0) = COPY %x1
+# CHECK-NEXT: %2(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
+# %0 has been mapped to GPR, we need to repair to match FPR.
+# CHECK-NEXT: %4(s64) = COPY %0
+# CHECK-NEXT: %3(s64) = G_FADD %4, %2
+# CHECK-NEXT: %x0 = COPY %3(s64)
+# CHECK-NEXT: RET_ReallyLR implicit %x0
+
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(p0) = COPY %x1
+ %2(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
+ %3(s64) = G_FADD %0, %2
+ %x0 = COPY %3(s64)
+ RET_ReallyLR implicit %x0
+
+...
+
+---
+# Make sure we map what looks like floating point
+# stores to floating point register bank.
+# CHECK-LABEL: name: floatingPointStore
+name: floatingPointStore
+legalized: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: fpr }
+# CHECK-NEXT: - { id: 3, class: fpr }
+# CHECK-NEXT: - { id: 4, class: fpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+
+# CHECK: %0(s64) = COPY %x0
+# CHECK-NEXT: %1(p0) = COPY %x1
+# %0 has been mapped to GPR, we need to repair to match FPR.
+# CHECK-NEXT: %3(s64) = COPY %0
+# CHECK-NEXT: %4(s64) = COPY %0
+# CHECK-NEXT: %2(s64) = G_FADD %3, %4
+# CHECK-NEXT: G_STORE %2(s64), %1(p0) :: (store 8 into %ir.addr)
+# CHECK-NEXT: RET_ReallyLR
+
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(p0) = COPY %x1
+ %2(s64) = G_FADD %0, %0
+ G_STORE %2(s64), %1(p0) :: (store 8 into %ir.addr)
+ RET_ReallyLR
+
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
index f8d95c88cc8f..44705a9c9f65 100644
--- a/test/CodeGen/AArch64/GlobalISel/call-translator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=aarch64-linux-gnu -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
; CHECK-LABEL: name: test_trivial_call
-; CHECK: ADJCALLSTACKDOWN 0, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def %sp, implicit %sp
; CHECK: BL @trivial_callee, csr_aarch64_aapcs, implicit-def %lr
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp
declare void @trivial_callee()
@@ -186,7 +186,7 @@ define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
; CHECK: [[C42:%[0-9]+]](s64) = G_CONSTANT i64 42
; CHECK: [[C12:%[0-9]+]](s64) = G_CONSTANT i64 12
; CHECK: [[PTR:%[0-9]+]](p0) = G_CONSTANT i64 0
-; CHECK: ADJCALLSTACKDOWN 24, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 24, 0, implicit-def %sp, implicit %sp
; CHECK: [[SP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[C42_OFFS:%[0-9]+]](s64) = G_CONSTANT i64 0
; CHECK: [[C42_LOC:%[0-9]+]](p0) = G_GEP [[SP]], [[C42_OFFS]](s64)
diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll
index 2682fa7dcce1..fc1aeb7b37d9 100644
--- a/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -378,11 +378,11 @@ define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
; CHECK-NEXT: cmp x0, #13
; CHECK-NOT: ccmp
; CHECK-NEXT: cset [[REG1:w[0-9]+]], gt
+; CHECK-NEXT: and [[REG4:w[0-9]+]], [[REG0]], [[REG1]]
; CHECK-NEXT: cmp x2, #2
; CHECK-NEXT: cset [[REG2:w[0-9]+]], lt
; CHECK-NEXT: cmp x2, #4
; CHECK-NEXT: cset [[REG3:w[0-9]+]], gt
-; CHECK-NEXT: and [[REG4:w[0-9]+]], [[REG0]], [[REG1]]
; CHECK-NEXT: and [[REG5:w[0-9]+]], [[REG2]], [[REG3]]
; CHECK-NEXT: orr [[REG6:w[0-9]+]], [[REG4]], [[REG5]]
; CHECK-NEXT: cmp [[REG6]], #0
diff --git a/test/CodeGen/AArch64/arm64-fml-combines.ll b/test/CodeGen/AArch64/arm64-fml-combines.ll
index 840d1dcbf060..f97498825279 100644
--- a/test/CodeGen/AArch64/arm64-fml-combines.ll
+++ b/test/CodeGen/AArch64/arm64-fml-combines.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -O=3 -mtriple=arm64-apple-ios -mcpu=cyclone -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=arm64-apple-ios -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=arm64-apple-ios -fp-contract=fast | FileCheck %s
+
define void @foo_2d(double* %src) {
entry:
%arrayidx1 = getelementptr inbounds double, double* %src, i64 5
@@ -126,3 +128,23 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
ret void
}
+
+; CHECK-LABEL: test1:
+; CHECK: fnmadd s0, s0, s1, s2
+define float @test1(float %a, float %b, float %c) {
+entry:
+ %0 = fmul float %a, %b
+ %mul = fsub float -0.000000e+00, %0
+ %sub1 = fsub float %mul, %c
+ ret float %sub1
+}
+
+; CHECK-LABEL: test2:
+; CHECK: fnmadd d0, d0, d1, d2
+define double @test2(double %a, double %b, double %c) {
+entry:
+ %0 = fmul double %a, %b
+ %mul = fsub double -0.000000e+00, %0
+ %sub1 = fsub double %mul, %c
+ ret double %sub1
+}
diff --git a/test/CodeGen/AArch64/arm64-hello.ll b/test/CodeGen/AArch64/arm64-hello.ll
index caaf8615cd4a..a8d1c2482520 100644
--- a/test/CodeGen/AArch64/arm64-hello.ll
+++ b/test/CodeGen/AArch64/arm64-hello.ll
@@ -6,8 +6,8 @@
; CHECK-NEXT: stp x29, x30, [sp, #16]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: stur wzr, [x29, #-4]
-; CHECK: adrp x0, L_.str@PAGE
-; CHECK: add x0, x0, L_.str@PAGEOFF
+; CHECK: adrp x0, l_.str@PAGE
+; CHECK: add x0, x0, l_.str@PAGEOFF
; CHECK-NEXT: bl _puts
; CHECK-NEXT: ldp x29, x30, [sp, #16]
; CHECK-NEXT: add sp, sp, #32
diff --git a/test/CodeGen/AArch64/arm64-misched-multimmo.ll b/test/CodeGen/AArch64/arm64-misched-multimmo.ll
index 3593668e0156..4c0195b93a44 100644
--- a/test/CodeGen/AArch64/arm64-misched-multimmo.ll
+++ b/test/CodeGen/AArch64/arm64-misched-multimmo.ll
@@ -12,7 +12,7 @@
; CHECK: Successors:
; CHECK-NOT: ch SU(4)
; CHECK: SU(3)
-; CHECK: SU(4): STRWui %WZR, %X{{[0-9]+}}
+; CHECK: SU(5): STRWui %WZR, %X{{[0-9]+}}
define i32 @foo() {
entry:
%0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 0), align 4
diff --git a/test/CodeGen/AArch64/macho-global-symbols.ll b/test/CodeGen/AArch64/macho-global-symbols.ll
new file mode 100644
index 000000000000..d68abad57ccd
--- /dev/null
+++ b/test/CodeGen/AArch64/macho-global-symbols.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple=arm64-apple-ios %s -o - | FileCheck %s
+
+; All global symbols must be at-most linker-private for AArch64 because we don't
+; use section-relative relocations in MachO.
+
+define i8* @private_sym() {
+; CHECK-LABEL: private_sym:
+; CHECK: adrp [[HIBITS:x[0-9]+]], l_var@PAGE
+; CHECK: add x0, [[HIBITS]], l_var@PAGEOFF
+
+ ret i8* getelementptr([2 x i8], [2 x i8]* @var, i32 0, i32 0)
+}
+
+; CHECK: .section __TEXT,__cstring
+; CHECK: l_var:
+; CHECK: .asciz "\002"
+@var = private unnamed_addr constant [2 x i8] [i8 2, i8 0]
diff --git a/test/CodeGen/AArch64/misched-fusion-aes.ll b/test/CodeGen/AArch64/misched-fusion-aes.ll
index f29dfb3a9802..4c682e594e66 100644
--- a/test/CodeGen/AArch64/misched-fusion-aes.ll
+++ b/test/CodeGen/AArch64/misched-fusion-aes.ll
@@ -1,4 +1,5 @@
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA57
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA72
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKM1
declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
@@ -87,6 +88,22 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
; CHECKA57: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
+; CHECKA72: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECKA72: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
+; CHECKA72: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
+; CHECKA72: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
+; CHECKA72: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
+; CHECKA72: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
+; CHECKA72: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
+; CHECKA72: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
; CHECKM1: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1: aesmc {{v[0-7].16b}}, [[VA]]
; CHECKM1: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
@@ -187,6 +204,22 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
; CHECKA57: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
+; CHECKA72: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VA]]
+; CHECKA72: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VB]]
+; CHECKA72: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
+; CHECKA72: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
+; CHECKA72: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
+; CHECKA72: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
+; CHECKA72: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
+; CHECKA72: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
; CHECKM1: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1: aesimc {{v[0-7].16b}}, [[VA]]
; CHECKM1: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
diff --git a/test/CodeGen/AArch64/stackmap-frame-setup.ll b/test/CodeGen/AArch64/stackmap-frame-setup.ll
index 5646703fa403..677ff8dc2530 100644
--- a/test/CodeGen/AArch64/stackmap-frame-setup.ll
+++ b/test/CodeGen/AArch64/stackmap-frame-setup.ll
@@ -7,11 +7,11 @@ entry:
store i64 11, i64* %metadata
store i64 12, i64* %metadata
store i64 13, i64* %metadata
-; ISEL: ADJCALLSTACKDOWN 0, implicit-def
+; ISEL: ADJCALLSTACKDOWN 0, 0, implicit-def
; ISEL-NEXT: STACKMAP
; ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
-; FAST-ISEL: ADJCALLSTACKDOWN 0, implicit-def
+; FAST-ISEL: ADJCALLSTACKDOWN 0, 0, implicit-def
; FAST-ISEL-NEXT: STACKMAP
; FAST-ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
ret void
diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
index 56a9e7022db9..2a3d3887ed69 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
@@ -14,7 +14,7 @@ regBankSelected: true
# GCN: global_addrspace
# GCN: [[PTR:%[0-9]+]] = COPY %vgpr0_vgpr1
-# GCN: FLAT_LOAD_DWORD [[PTR]], 0, 0, 0
+# GCN: FLAT_LOAD_DWORD [[PTR]], 0, 0
body: |
bb.0:
diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
index ea435725bf25..89be3bde94a8 100644
--- a/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
+++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
@@ -15,7 +15,7 @@ regBankSelected: true
# GCN: global_addrspace
# GCN: [[PTR:%[0-9]+]] = COPY %vgpr0_vgpr1
# GCN: [[VAL:%[0-9]+]] = COPY %vgpr2
-# GCN: FLAT_STORE_DWORD [[PTR]], [[VAL]], 0, 0, 0
+# GCN: FLAT_STORE_DWORD [[PTR]], [[VAL]], 0, 0
body: |
bb.0:
diff --git a/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir b/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir
new file mode 100644
index 000000000000..8839ba8e0ab2
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir
@@ -0,0 +1,20 @@
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ define void @test_constant() {
+ entry:
+ ret void
+ }
+...
+
+---
+name: test_constant
+registers:
+ - { id: 0, class: _ }
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_constant
+ ; CHECK: %0(s32) = G_CONSTANT i32 5
+
+ %0(s32) = G_CONSTANT i32 5
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/lit.local.cfg b/test/CodeGen/AMDGPU/GlobalISel/lit.local.cfg
new file mode 100644
index 000000000000..e99d1bb8446c
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'global-isel' in config.root.available_features:
+ config.unsupported = True
diff --git a/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index 62b47beb1251..bc992ed77ffd 100644
--- a/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -219,19 +219,19 @@ body: |
%34 = V_MOV_B32_e32 63, implicit %exec
%27 = V_AND_B32_e64 %26, %24, implicit %exec
- FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %27, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%28 = V_AND_B32_e64 %24, %26, implicit %exec
- FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %28, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%29 = V_AND_B32_e32 %26, %24, implicit %exec
- FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %29, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%30 = V_AND_B32_e64 %26, %26, implicit %exec
- FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %30, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%31 = V_AND_B32_e64 %34, %34, implicit %exec
- FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %37, %31, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
S_ENDPGM
@@ -407,34 +407,34 @@ body: |
%27 = S_MOV_B32 -4
%11 = V_LSHLREV_B32_e64 12, %10, implicit %exec
- FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %11, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%12 = V_LSHLREV_B32_e64 %7, 12, implicit %exec
- FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %12, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%13 = V_LSHL_B32_e64 %7, 12, implicit %exec
- FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %13, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%14 = V_LSHL_B32_e64 12, %7, implicit %exec
- FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %14, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%15 = V_LSHL_B32_e64 12, %24, implicit %exec
- FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %15, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%22 = V_LSHL_B32_e64 %6, 12, implicit %exec
- FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %22, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%23 = V_LSHL_B32_e64 %6, 32, implicit %exec
- FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %23, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%25 = V_LSHL_B32_e32 %6, %6, implicit %exec
- FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %25, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%26 = V_LSHLREV_B32_e32 11, %24, implicit %exec
- FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %26, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%28 = V_LSHL_B32_e32 %27, %6, implicit %exec
- FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %28, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
S_ENDPGM
@@ -615,34 +615,34 @@ body: |
%35 = V_MOV_B32_e32 2, implicit %exec
%11 = V_ASHRREV_I32_e64 8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %11, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%12 = V_ASHRREV_I32_e64 %8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %12, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%13 = V_ASHR_I32_e64 %7, 3, implicit %exec
- FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %13, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%14 = V_ASHR_I32_e64 7, %32, implicit %exec
- FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %14, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%15 = V_ASHR_I32_e64 %27, %24, implicit %exec
- FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %15, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%22 = V_ASHR_I32_e64 %6, 4, implicit %exec
- FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %22, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%23 = V_ASHR_I32_e64 %6, %33, implicit %exec
- FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %23, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%25 = V_ASHR_I32_e32 %34, %34, implicit %exec
- FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %25, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%26 = V_ASHRREV_I32_e32 11, %10, implicit %exec
- FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %26, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%28 = V_ASHR_I32_e32 %27, %35, implicit %exec
- FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %28, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
S_ENDPGM
@@ -824,34 +824,34 @@ body: |
%35 = V_MOV_B32_e32 2, implicit %exec
%11 = V_LSHRREV_B32_e64 8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %11, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%12 = V_LSHRREV_B32_e64 %8, %10, implicit %exec
- FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %12, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%13 = V_LSHR_B32_e64 %7, 3, implicit %exec
- FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %13, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%14 = V_LSHR_B32_e64 7, %32, implicit %exec
- FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %14, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%15 = V_LSHR_B32_e64 %27, %24, implicit %exec
- FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %15, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%22 = V_LSHR_B32_e64 %6, 4, implicit %exec
- FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %22, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%23 = V_LSHR_B32_e64 %6, %33, implicit %exec
- FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %23, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%25 = V_LSHR_B32_e32 %34, %34, implicit %exec
- FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %25, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%26 = V_LSHRREV_B32_e32 11, %10, implicit %exec
- FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %26, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
%28 = V_LSHR_B32_e32 %27, %35, implicit %exec
- FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
+ FLAT_STORE_DWORD %20, %28, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
S_ENDPGM
diff --git a/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll b/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
index 0831d250b9e7..8611cd080e15 100644
--- a/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
+++ b/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
@@ -25,7 +25,7 @@ define amdgpu_kernel void @fold_mi_s_and_0(i32 addrspace(1)* %out, i32 %x) #0 {
}
; GCN-LABEL: {{^}}fold_mi_v_or_0:
-; GCN: v_mbcnt_lo_u32_b32_e64 [[RESULT:v[0-9]+]]
+; GCN: v_mbcnt_lo_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]]
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @fold_mi_v_or_0(i32 addrspace(1)* %out) {
@@ -50,7 +50,7 @@ define amdgpu_kernel void @fold_mi_s_or_0(i32 addrspace(1)* %out, i32 %x) #0 {
}
; GCN-LABEL: {{^}}fold_mi_v_xor_0:
-; GCN: v_mbcnt_lo_u32_b32_e64 [[RESULT:v[0-9]+]]
+; GCN: v_mbcnt_lo_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]]
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @fold_mi_v_xor_0(i32 addrspace(1)* %out) {
@@ -86,8 +86,8 @@ define amdgpu_kernel void @fold_mi_s_not_0(i32 addrspace(1)* %out, i32 %x) #0 {
}
; GCN-LABEL: {{^}}fold_mi_v_not_0:
-; GCN: v_bcnt_u32_b32_e64 v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, 0{{$}}
-; GCN: v_bcnt_u32_b32_e{{[0-9]+}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, v[[RESULT_LO]]{{$}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, 0{{$}}
+; GCN: v_bcnt_u32_b32{{(_e32)*(_e64)*}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, v[[RESULT_LO]]{{$}}
; GCN-NEXT: v_not_b32_e32 v[[RESULT_LO]]
; GCN-NEXT: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], -1{{$}}
; GCN-NEXT: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
@@ -104,8 +104,8 @@ define amdgpu_kernel void @fold_mi_v_not_0(i64 addrspace(1)* %out) {
; GCN: buffer_load_dwordx2
; GCN: buffer_load_dwordx2 v{{\[}}[[VREG1_LO:[0-9]+]]:[[VREG1_HI:[0-9]+]]{{\]}}
-; GCN: v_bcnt_u32_b32_e64 v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, 0{{$}}
-; GCN: v_bcnt_u32_b32_e{{[0-9]+}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, v[[RESULT_LO]]{{$}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, 0{{$}}
+; GCN: v_bcnt_u32_b32{{(_e32)*(_e64)*}} v[[RESULT_LO:[0-9]+]], v{{[0-9]+}}, v[[RESULT_LO]]{{$}}
; GCN-DAG: v_not_b32_e32 v[[RESULT_LO]], v[[RESULT_LO]]
; GCN-DAG: v_or_b32_e32 v[[RESULT_LO]], v[[VREG1_LO]], v[[RESULT_LO]]
; GCN-DAG: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], v[[VREG1_HI]]
diff --git a/test/CodeGen/AMDGPU/ctpop.ll b/test/CodeGen/AMDGPU/ctpop.ll
index a29e72ea57cb..aa913ad406d2 100644
--- a/test/CodeGen/AMDGPU/ctpop.ll
+++ b/test/CodeGen/AMDGPU/ctpop.ll
@@ -25,7 +25,7 @@ define amdgpu_kernel void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val)
; XXX - Why 0 in register?
; FUNC-LABEL: {{^}}v_ctpop_i32:
; GCN: buffer_load_dword [[VAL:v[0-9]+]],
-; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 0
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 0
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
@@ -40,9 +40,9 @@ define amdgpu_kernel void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrs
; FUNC-LABEL: {{^}}v_ctpop_add_chain_i32:
; GCN: buffer_load_dword [[VAL1:v[0-9]+]],
; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
-; GCN: v_bcnt_u32_b32_e64 [[MIDRESULT:v[0-9]+]], [[VAL1]], 0
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], [[VAL1]], 0
; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
-; VI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
+; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
@@ -61,7 +61,7 @@ define amdgpu_kernel void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out,
; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
; GCN: s_waitcnt
-; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
+; GCN-NEXT: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1, i32 %sval) nounwind {
@@ -73,8 +73,8 @@ define amdgpu_kernel void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out,
}
; FUNC-LABEL: {{^}}v_ctpop_v2i32:
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
; GCN: s_endpgm
; EG: BCNT_INT
@@ -87,10 +87,10 @@ define amdgpu_kernel void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <
}
; FUNC-LABEL: {{^}}v_ctpop_v4i32:
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
; GCN: s_endpgm
; EG: BCNT_INT
@@ -105,14 +105,14 @@ define amdgpu_kernel void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <
}
; FUNC-LABEL: {{^}}v_ctpop_v8i32:
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
; GCN: s_endpgm
; EG: BCNT_INT
@@ -131,22 +131,22 @@ define amdgpu_kernel void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <
}
; FUNC-LABEL: {{^}}v_ctpop_v16i32:
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
-; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
; GCN: s_endpgm
; EG: BCNT_INT
@@ -174,7 +174,7 @@ define amdgpu_kernel void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out,
; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant:
; GCN: buffer_load_dword [[VAL:v[0-9]+]],
-; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 4
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
@@ -189,7 +189,7 @@ define amdgpu_kernel void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noa
; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant_inv:
; GCN: buffer_load_dword [[VAL:v[0-9]+]],
-; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 4
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
@@ -206,7 +206,7 @@ define amdgpu_kernel void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)*
; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
; GCN-DAG: v_mov_b32_e32 [[LIT:v[0-9]+]], 0x1869f
; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
-; VI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
+; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
@@ -220,7 +220,7 @@ define amdgpu_kernel void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %ou
; FUNC-LABEL: {{^}}v_ctpop_i32_add_var:
; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
-; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
@@ -236,7 +236,7 @@ define amdgpu_kernel void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i
; FUNC-LABEL: {{^}}v_ctpop_i32_add_var_inv:
; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
-; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
@@ -253,7 +253,7 @@ define amdgpu_kernel void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %ou
; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], {{0$}}
; GCN-DAG: buffer_load_dword [[VAR:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0 offset:16
; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
-; VI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/ctpop64.ll b/test/CodeGen/AMDGPU/ctpop64.ll
index 2610684ad9ee..f18bd9fd8174 100644
--- a/test/CodeGen/AMDGPU/ctpop64.ll
+++ b/test/CodeGen/AMDGPU/ctpop64.ll
@@ -26,9 +26,9 @@ define amdgpu_kernel void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val)
; FUNC-LABEL: {{^}}v_ctpop_i64:
; GCN: buffer_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
-; GCN: v_bcnt_u32_b32_e64 [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
-; VI-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
+; VI-NEXT: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
define amdgpu_kernel void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
@@ -41,9 +41,9 @@ define amdgpu_kernel void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrs
; FUNC-LABEL: {{^}}v_ctpop_i64_user:
; GCN: buffer_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
-; GCN: v_bcnt_u32_b32_e64 [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
-; VI-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
+; VI-NEXT: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
; GCN-DAG: v_or_b32_e32 v[[RESULT_LO:[0-9]+]], s{{[0-9]+}}, [[RESULT]]
; GCN-DAG: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], s{{[0-9]+}}
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
@@ -171,11 +171,11 @@ define amdgpu_kernel void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val)
; FUNC-LABEL: {{^}}v_ctpop_i128:
; GCN: buffer_load_dwordx4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: v_bcnt_u32_b32_e64 [[MIDRESULT0:v[0-9]+]], v{{[0-9]+}}, 0
-; GCN-DAG: v_bcnt_u32_b32{{_e32|_e64}} [[MIDRESULT1:v[0-9]+]], v[[VAL3]], [[MIDRESULT0]]
+; GCN-DAG: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT0:v[0-9]+]], v{{[0-9]+}}, 0
+; GCN-DAG: v_bcnt_u32_b32{{(_e32)*(_e64)*}} [[MIDRESULT1:v[0-9]+]], v[[VAL3]], [[MIDRESULT0]]
-; GCN-DAG: v_bcnt_u32_b32_e64 [[MIDRESULT2:v[0-9]+]], v[[VAL0]], 0
-; GCN-DAG: v_bcnt_u32_b32{{_e32|_e64}} [[MIDRESULT3:v[0-9]+]], v{{[0-9]+}}, [[MIDRESULT2]]
+; GCN-DAG: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT2:v[0-9]+]], v[[VAL0]], 0
+; GCN-DAG: v_bcnt_u32_b32{{(_e32)*(_e64)*}} [[MIDRESULT3:v[0-9]+]], v{{[0-9]+}}, [[MIDRESULT2]]
; GCN: v_add_i32_e32 [[RESULT:v[0-9]+]], vcc, [[MIDRESULT1]], [[MIDRESULT2]]
diff --git a/test/CodeGen/AMDGPU/fneg-combines.ll b/test/CodeGen/AMDGPU/fneg-combines.ll
index 1c0e9a2f13ce..66bf9d0ffb00 100644
--- a/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -1471,11 +1471,10 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(float addr
; GCN-LABEL: {{^}}v_fneg_mul_legacy_multi_use_mul_legacy_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], 0x80000000, [[ADD]]
-; GCN: v_mul_legacy_f32_e32 [[MUL:v[0-9]+]], 4.0, [[ADD]]
-; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
-; GCN: buffer_store_dword [[MUL]]
+; GCN: v_mul_legacy_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
+; GCN-NEXT: v_mul_legacy_f32_e64 [[MUL:v[0-9]+]], -[[ADD]], 4.0
+; GCN-NEXT: buffer_store_dword [[ADD]]
+; GCN-NEXT: buffer_store_dword [[MUL]]
define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/test/CodeGen/AMDGPU/fneg.f16.ll b/test/CodeGen/AMDGPU/fneg.f16.ll
index 626a0b50cce8..ed36666db807 100644
--- a/test/CodeGen/AMDGPU/fneg.f16.ll
+++ b/test/CodeGen/AMDGPU/fneg.f16.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=amdgcn -mcpu=kaveri -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=CIVI -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=gfx901 -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN -check-prefix=GFX89 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN -check-prefix=GFX89 %s
; FIXME: Should be able to do scalar op
; GCN-LABEL: {{^}}s_fneg_f16:
@@ -129,6 +129,41 @@ define amdgpu_kernel void @v_fneg_fold_v2f16(<2 x half> addrspace(1)* %out, <2 x
ret void
}
+; GCN-LABEL: {{^}}v_extract_fneg_fold_v2f16:
+; GCN: flat_load_dword [[VAL:v[0-9]+]]
+; CI-DAG: v_mul_f32_e32 v{{[0-9]+}}, -4.0, v{{[0-9]+}}
+; CI-DAG: v_sub_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}
+
+; GFX89: v_lshrrev_b32_e32 [[ELT1:v[0-9]+]], 16, [[VAL]]
+; GFX89-DAG: v_mul_f16_e32 v{{[0-9]+}}, -4.0, [[VAL]]
+; GFX89-DAG: v_sub_f16_e32 v{{[0-9]+}}, 2.0, [[ELT1]]
+define amdgpu_kernel void @v_extract_fneg_fold_v2f16(<2 x half> addrspace(1)* %in) #0 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ %fneg = fsub <2 x half> <half -0.0, half -0.0>, %val
+ %elt0 = extractelement <2 x half> %fneg, i32 0
+ %elt1 = extractelement <2 x half> %fneg, i32 1
+
+ %fmul0 = fmul half %elt0, 4.0
+ %fadd1 = fadd half %elt1, 2.0
+ store volatile half %fmul0, half addrspace(1)* undef
+ store volatile half %fadd1, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_extract_fneg_no_fold_v2f16:
+; GCN: flat_load_dword [[VAL:v[0-9]+]]
+; GCN: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80008000, [[VAL]]
+; GCN: v_lshrrev_b32_e32 [[ELT1:v[0-9]+]], 16, [[NEG]]
+define amdgpu_kernel void @v_extract_fneg_no_fold_v2f16(<2 x half> addrspace(1)* %in) #0 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ %fneg = fsub <2 x half> <half -0.0, half -0.0>, %val
+ %elt0 = extractelement <2 x half> %fneg, i32 0
+ %elt1 = extractelement <2 x half> %fneg, i32 1
+ store volatile half %elt0, half addrspace(1)* undef
+ store volatile half %elt1, half addrspace(1)* undef
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/inserted-wait-states.mir b/test/CodeGen/AMDGPU/inserted-wait-states.mir
index c6fe6debd225..ff9fcd1c693f 100644
--- a/test/CodeGen/AMDGPU/inserted-wait-states.mir
+++ b/test/CodeGen/AMDGPU/inserted-wait-states.mir
@@ -246,15 +246,15 @@ body: |
S_BRANCH %bb.1
bb.1:
- FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %vgpr2_vgpr3, 0, 0, implicit %exec, implicit %flat_scr
%vgpr3 = V_MOV_B32_e32 0, implicit %exec
- FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4, 0, 0, implicit %exec, implicit %flat_scr
%vgpr3 = V_MOV_B32_e32 0, implicit %exec
- FLAT_STORE_DWORDX4 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORDX4 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr
%vgpr3 = V_MOV_B32_e32 0, implicit %exec
- FLAT_ATOMIC_CMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_ATOMIC_CMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, implicit %exec, implicit %flat_scr
%vgpr3 = V_MOV_B32_e32 0, implicit %exec
- FLAT_ATOMIC_FCMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_ATOMIC_FCMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, implicit %exec, implicit %flat_scr
%vgpr3 = V_MOV_B32_e32 0, implicit %exec
S_ENDPGM
diff --git a/test/CodeGen/AMDGPU/limit-coalesce.mir b/test/CodeGen/AMDGPU/limit-coalesce.mir
index 106a96e32dc3..a0d2d6c097a2 100644
--- a/test/CodeGen/AMDGPU/limit-coalesce.mir
+++ b/test/CodeGen/AMDGPU/limit-coalesce.mir
@@ -57,15 +57,15 @@ body: |
%4.sub1 = COPY %3.sub0
undef %5.sub0 = COPY %4.sub1
%5.sub1 = COPY %4.sub0
- FLAT_STORE_DWORDX2 %vgpr0_vgpr1, killed %5, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORDX2 %vgpr0_vgpr1, killed %5, 0, 0, implicit %exec, implicit %flat_scr
%6 = IMPLICIT_DEF
undef %7.sub0_sub1 = COPY %6
%7.sub2 = COPY %3.sub0
- FLAT_STORE_DWORDX3 %vgpr0_vgpr1, killed %7, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORDX3 %vgpr0_vgpr1, killed %7, 0, 0, implicit %exec, implicit %flat_scr
%8 = IMPLICIT_DEF
undef %9.sub0_sub1_sub2 = COPY %8
%9.sub3 = COPY %3.sub0
- FLAT_STORE_DWORDX4 %vgpr0_vgpr1, killed %9, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORDX4 %vgpr0_vgpr1, killed %9, 0, 0, implicit %exec, implicit %flat_scr
...
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
index b92eb34750d9..7179d02fc6dd 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
@@ -7,7 +7,7 @@
; GCN-DAG: s_load_dword [[SY:s[0-9]+]], s[0:1], 0x{{c|30}}
; GCN: v_mov_b32_e32 [[VY:v[0-9]+]], [[SY]]
; SI: v_cvt_pkrtz_f16_f32_e32 v{{[0-9]+}}, [[X]], [[VY]]
-; GFX89: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[X]], [[VY]]
+; GFX89: v_cvt_pkrtz_f16_f32 v{{[0-9]+}}, [[X]], [[VY]]
define amdgpu_kernel void @s_cvt_pkrtz_v2f16_f32(<2 x half> addrspace(1)* %out, float %x, float %y) #0 {
%result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y)
store <2 x half> %result, <2 x half> addrspace(1)* %out
@@ -16,7 +16,7 @@ define amdgpu_kernel void @s_cvt_pkrtz_v2f16_f32(<2 x half> addrspace(1)* %out,
; GCN-LABEL: {{^}}s_cvt_pkrtz_samereg_v2f16_f32:
; GCN: s_load_dword [[X:s[0-9]+]]
-; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[X]], [[X]]
+; GCN: v_cvt_pkrtz_f16_f32{{(_e64)*}} v{{[0-9]+}}, [[X]], [[X]]
define amdgpu_kernel void @s_cvt_pkrtz_samereg_v2f16_f32(<2 x half> addrspace(1)* %out, float %x) #0 {
%result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %x)
store <2 x half> %result, <2 x half> addrspace(1)* %out
@@ -39,7 +39,7 @@ define amdgpu_kernel void @s_cvt_pkrtz_undef_undef(<2 x half> addrspace(1)* %out
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; SI: v_cvt_pkrtz_f16_f32_e32 v{{[0-9]+}}, [[A]], [[B]]
-; GFX89: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[A]], [[B]]
+; GFX89: v_cvt_pkrtz_f16_f32 v{{[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -55,7 +55,7 @@ define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32(<2 x half> addrspace(1)* %out,
; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_reg_imm:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[A]], 1.0
+; GCN: v_cvt_pkrtz_f16_f32{{(_e64)*}} v{{[0-9]+}}, [[A]], 1.0
define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_reg_imm(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -70,7 +70,7 @@ define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_reg_imm(<2 x half> addrspace(1)
; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_imm_reg:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; SI: v_cvt_pkrtz_f16_f32_e32 v{{[0-9]+}}, 1.0, [[A]]
-; GFX89: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, 1.0, [[A]]
+; GFX89: v_cvt_pkrtz_f16_f32 v{{[0-9]+}}, 1.0, [[A]]
define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_imm_reg(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -85,7 +85,7 @@ define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_imm_reg(<2 x half> addrspace(1)
; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_lo:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, -[[A]], [[B]]
+; GCN: v_cvt_pkrtz_f16_f32{{(_e64)*}} v{{[0-9]+}}, -[[A]], [[B]]
define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_lo(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -103,7 +103,7 @@ define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_lo(<2 x half> addrspace(1)
; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_hi:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[A]], -[[B]]
+; GCN: v_cvt_pkrtz_f16_f32{{(_e64)*}} v{{[0-9]+}}, [[A]], -[[B]]
define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_hi(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -121,7 +121,7 @@ define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_hi(<2 x half> addrspace(1)
; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_lo_hi:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, -[[A]], -[[B]]
+; GCN: v_cvt_pkrtz_f16_f32{{(_e64)*}} v{{[0-9]+}}, -[[A]], -[[B]]
define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_lo_hi(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -140,7 +140,7 @@ define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_lo_hi(<2 x half> addrspace
; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_fabs_lo_fneg_hi:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, -|[[A]]|, -[[B]]
+; GCN: v_cvt_pkrtz_f16_f32{{(_e64)*}} v{{[0-9]+}}, -|[[A]]|, -[[B]]
define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_fabs_lo_fneg_hi(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll
index ab76c870796b..144c8f428ab0 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll
@@ -2,9 +2,9 @@
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}mbcnt_intrinsics:
-; GCN: v_mbcnt_lo_u32_b32_e64 [[LO:v[0-9]+]], -1, 0
+; GCN: v_mbcnt_lo_u32_b32{{(_e64)*}} [[LO:v[0-9]+]], -1, 0
; SI: v_mbcnt_hi_u32_b32_e32 {{v[0-9]+}}, -1, [[LO]]
-; VI: v_mbcnt_hi_u32_b32_e64 {{v[0-9]+}}, -1, [[LO]]
+; VI: v_mbcnt_hi_u32_b32 {{v[0-9]+}}, -1, [[LO]]
define amdgpu_ps void @mbcnt_intrinsics(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3) {
main_body:
%lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
diff --git a/test/CodeGen/AMDGPU/madak.ll b/test/CodeGen/AMDGPU/madak.ll
index eb4066a2a0a8..5f1fb0e2d732 100644
--- a/test/CodeGen/AMDGPU/madak.ll
+++ b/test/CodeGen/AMDGPU/madak.ll
@@ -9,7 +9,7 @@ declare float @llvm.fabs.f32(float) nounwind readnone
; GCN-LABEL: {{^}}madak_f32:
; GCN: buffer_load_dword [[VA:v[0-9]+]]
; GCN: buffer_load_dword [[VB:v[0-9]+]]
-; GCN: v_madak_f32_e32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
+; GCN: v_madak_f32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
define amdgpu_kernel void @madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
@@ -63,7 +63,7 @@ define amdgpu_kernel void @madak_2_use_f32(float addrspace(1)* noalias %out, flo
; GCN-LABEL: {{^}}madak_m_inline_imm_f32:
; GCN: buffer_load_dword [[VA:v[0-9]+]]
-; GCN: v_madak_f32_e32 {{v[0-9]+}}, 4.0, [[VA]], 0x41200000
+; GCN: v_madak_f32 {{v[0-9]+}}, 4.0, [[VA]], 0x41200000
define amdgpu_kernel void @madak_m_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
@@ -198,7 +198,7 @@ define amdgpu_kernel void @no_madak_src1_modifier_f32(float addrspace(1)* noalia
; GCN: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xa|0x28}}
; GCN: v_mov_b32_e32 [[SGPR0_VCOPY:v[0-9]+]], [[SGPR0]]
; GCN: buffer_load_dword [[VGPR:v[0-9]+]]
-; GCN: v_madak_f32_e32 [[MADAK:v[0-9]+]], 0.5, [[SGPR0_VCOPY]], 0x42280000
+; GCN: v_madak_f32 [[MADAK:v[0-9]+]], 0.5, [[SGPR0_VCOPY]], 0x42280000
; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[VGPR]], [[MADAK]]
; GCN: buffer_store_dword [[MUL]]
define amdgpu_kernel void @madak_constant_bus_violation(i32 %arg1, float %sgpr0, float %sgpr1) #0 {
diff --git a/test/CodeGen/AMDGPU/promote-alloca-volatile.ll b/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
index 9c43a6dc60f4..d7655993a2d9 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
@@ -1,26 +1,26 @@
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-promote-alloca < %s | FileCheck %s
; CHECK-LABEL: @volatile_load(
-; CHECK: alloca [5 x i32]
+; CHECK: alloca [4 x i32]
; CHECK: load volatile i32, i32*
define amdgpu_kernel void @volatile_load(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
- %stack = alloca [5 x i32], align 4
+ %stack = alloca [4 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
- %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %tmp
+ %arrayidx1 = getelementptr inbounds [4 x i32], [4 x i32]* %stack, i32 0, i32 %tmp
%load = load volatile i32, i32* %arrayidx1
store i32 %load, i32 addrspace(1)* %out
ret void
}
; CHECK-LABEL: @volatile_store(
-; CHECK: alloca [5 x i32]
+; CHECK: alloca [4 x i32]
; CHECK: store volatile i32 %tmp, i32*
define amdgpu_kernel void @volatile_store(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
- %stack = alloca [5 x i32], align 4
+ %stack = alloca [4 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
- %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %tmp
+ %arrayidx1 = getelementptr inbounds [4 x i32], [4 x i32]* %stack, i32 0, i32 %tmp
store volatile i32 %tmp, i32* %arrayidx1
ret void
}
diff --git a/test/CodeGen/AMDGPU/v_madak_f16.ll b/test/CodeGen/AMDGPU/v_madak_f16.ll
index bfb10503aaea..0148ff470b78 100644
--- a/test/CodeGen/AMDGPU/v_madak_f16.ll
+++ b/test/CodeGen/AMDGPU/v_madak_f16.ll
@@ -4,7 +4,7 @@
; GCN-LABEL: {{^}}madak_f16
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; VI: v_madak_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], 0x4900{{$}}
+; VI: v_madak_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], 0x4900{{$}}
; VI: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
define amdgpu_kernel void @madak_f16(
diff --git a/test/CodeGen/AMDGPU/waitcnt.mir b/test/CodeGen/AMDGPU/waitcnt.mir
index 38662e83b359..f754415dccb4 100644
--- a/test/CodeGen/AMDGPU/waitcnt.mir
+++ b/test/CodeGen/AMDGPU/waitcnt.mir
@@ -51,21 +51,21 @@ name: flat_zero_waitcnt
body: |
bb.0:
successors: %bb.1
- %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.global4)
- %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.global4)
+ %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_BRANCH %bb.1
bb.1:
successors: %bb.2
- %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
- %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, implicit %exec, implicit %flat_scr
+ %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_BRANCH %bb.2
bb.2:
- %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.flat4)
- %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.flat16)
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.flat4)
+ %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.flat16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_ENDPGM
...
@@ -86,11 +86,11 @@ name: single_fallthrough_successor_no_end_block_wait
body: |
bb.0:
successors: %bb.1
- %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, implicit %exec, implicit %flat_scr
bb.1:
%vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec
- FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
...
---
@@ -114,15 +114,15 @@ name: single_branch_successor_not_next_block
body: |
bb.0:
successors: %bb.2
- %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, implicit %exec, implicit %flat_scr
S_BRANCH %bb.2
bb.1:
- FLAT_STORE_DWORD %vgpr8_vgpr9, %vgpr10, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORD %vgpr8_vgpr9, %vgpr10, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
bb.2:
%vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec
- FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
+ FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 83ab2659ef4a..72c3b715d36e 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -4,6 +4,8 @@
define void @test_sext_s1() { ret void }
define void @test_sext_s8() { ret void }
define void @test_zext_s16() { ret void }
+ define void @test_anyext_s8() { ret void }
+ define void @test_anyext_s16() { ret void }
define void @test_trunc_s32_16() { ret void }
@@ -149,6 +151,58 @@ body: |
; CHECK: BX_RET 14, _, implicit %r0
...
---
+name: test_anyext_s8
+# CHECK-LABEL: name: test_anyext_s8
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s8) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = G_ANYEXT %0(s8)
+ ; CHECK: [[VREGEXT:%[0-9]+]] = COPY [[VREGX]]
+
+ %r0 = COPY %1(s32)
+ ; CHECK: %r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_anyext_s16
+# CHECK-LABEL: name: test_anyext_s16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s16) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = G_ANYEXT %0(s16)
+ ; CHECK: [[VREGEXT:%[0-9]+]] = COPY [[VREGX]]
+
+ %r0 = COPY %1(s32)
+ ; CHECK: %r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
name: test_trunc_s32_16
# CHECK-LABEL: name: test_trunc_s32_16
legalized: true
@@ -187,9 +241,15 @@ registers:
- { id: 0, class: gprb }
- { id: 1, class: gprb }
- { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+ - { id: 5, class: gprb }
# CHECK-DAG: id: 0, class: gpr
# CHECK-DAG: id: 1, class: gpr
# CHECK-DAG: id: 2, class: gpr
+# CHECK-DAG: id: 3, class: gpr
+# CHECK-DAG: id: 4, class: gpr
+# CHECK-DAG: id: 5, class: gpr
body: |
bb.0:
liveins: %r0, %r1
@@ -200,11 +260,20 @@ body: |
%1(s8) = COPY %r1
; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
- %2(s8) = G_ADD %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]] = ADDrr [[VREGX]], [[VREGY]], 14, _, _
+ %2(s32) = G_ANYEXT %0(s8)
+ ; CHECK: [[VREGXEXT:%[0-9]+]] = COPY [[VREGX]]
- %r0 = COPY %2(s8)
- ; CHECK: %r0 = COPY [[VREGSUM]]
+ %3(s32) = G_ANYEXT %1(s8)
+ ; CHECK: [[VREGYEXT:%[0-9]+]] = COPY [[VREGY]]
+
+ %4(s32) = G_ADD %2, %3
+ ; CHECK: [[VREGSUM:%[0-9]+]] = ADDrr [[VREGXEXT]], [[VREGYEXT]], 14, _, _
+
+ %5(s8) = G_TRUNC %4(s32)
+ ; CHECK: [[VREGSUMTR:%[0-9]+]] = COPY [[VREGSUM]]
+
+ %r0 = COPY %5(s8)
+ ; CHECK: %r0 = COPY [[VREGSUMTR]]
BX_RET 14, _, implicit %r0
; CHECK: BX_RET 14, _, implicit %r0
@@ -220,9 +289,15 @@ registers:
- { id: 0, class: gprb }
- { id: 1, class: gprb }
- { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+ - { id: 5, class: gprb }
# CHECK-DAG: id: 0, class: gpr
# CHECK-DAG: id: 1, class: gpr
# CHECK-DAG: id: 2, class: gpr
+# CHECK-DAG: id: 3, class: gpr
+# CHECK-DAG: id: 4, class: gpr
+# CHECK-DAG: id: 5, class: gpr
body: |
bb.0:
liveins: %r0, %r1
@@ -233,11 +308,20 @@ body: |
%1(s16) = COPY %r1
; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
- %2(s16) = G_ADD %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]] = ADDrr [[VREGX]], [[VREGY]], 14, _, _
+ %2(s32) = G_ANYEXT %0(s16)
+ ; CHECK: [[VREGXEXT:%[0-9]+]] = COPY [[VREGX]]
- %r0 = COPY %2(s16)
- ; CHECK: %r0 = COPY [[VREGSUM]]
+ %3(s32) = G_ANYEXT %1(s16)
+ ; CHECK: [[VREGYEXT:%[0-9]+]] = COPY [[VREGY]]
+
+ %4(s32) = G_ADD %2, %3
+ ; CHECK: [[VREGSUM:%[0-9]+]] = ADDrr [[VREGXEXT]], [[VREGYEXT]], 14, _, _
+
+ %5(s16) = G_TRUNC %4(s32)
+ ; CHECK: [[VREGSUMTR:%[0-9]+]] = COPY [[VREGSUM]]
+
+ %r0 = COPY %5(s16)
+ ; CHECK: %r0 = COPY [[VREGSUMTR]]
BX_RET 14, _, implicit %r0
; CHECK: BX_RET 14, _, implicit %r0
@@ -352,9 +436,15 @@ registers:
- { id: 0, class: gprb }
- { id: 1, class: gprb }
- { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+ - { id: 5, class: gprb }
# CHECK-DAG: id: 0, class: gpr
# CHECK-DAG: id: 1, class: gpr
# CHECK-DAG: id: 2, class: gpr
+# CHECK-DAG: id: 3, class: gpr
+# CHECK-DAG: id: 4, class: gpr
+# CHECK-DAG: id: 5, class: gpr
body: |
bb.0:
liveins: %r0, %r1
@@ -365,11 +455,20 @@ body: |
%1(s8) = COPY %r1
; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
- %2(s8) = G_SUB %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]] = SUBrr [[VREGX]], [[VREGY]], 14, _, _
+ %2(s32) = G_ANYEXT %0(s8)
+ ; CHECK: [[VREGXEXT:%[0-9]+]] = COPY [[VREGX]]
- %r0 = COPY %2(s8)
- ; CHECK: %r0 = COPY [[VREGRES]]
+ %3(s32) = G_ANYEXT %1(s8)
+ ; CHECK: [[VREGYEXT:%[0-9]+]] = COPY [[VREGY]]
+
+ %4(s32) = G_SUB %2, %3
+ ; CHECK: [[VREGRES:%[0-9]+]] = SUBrr [[VREGXEXT]], [[VREGYEXT]], 14, _, _
+
+ %5(s8) = G_TRUNC %4(s32)
+ ; CHECK: [[VREGRESTR:%[0-9]+]] = COPY [[VREGRES]]
+
+ %r0 = COPY %5(s8)
+ ; CHECK: %r0 = COPY [[VREGRESTR]]
BX_RET 14, _, implicit %r0
; CHECK: BX_RET 14, _, implicit %r0
@@ -385,9 +484,15 @@ registers:
- { id: 0, class: gprb }
- { id: 1, class: gprb }
- { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+ - { id: 5, class: gprb }
# CHECK-DAG: id: 0, class: gpr
# CHECK-DAG: id: 1, class: gpr
# CHECK-DAG: id: 2, class: gpr
+# CHECK-DAG: id: 3, class: gpr
+# CHECK-DAG: id: 4, class: gpr
+# CHECK-DAG: id: 5, class: gpr
body: |
bb.0:
liveins: %r0, %r1
@@ -398,11 +503,20 @@ body: |
%1(s16) = COPY %r1
; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
- %2(s16) = G_SUB %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]] = SUBrr [[VREGX]], [[VREGY]], 14, _, _
+ %2(s32) = G_ANYEXT %0(s16)
+ ; CHECK: [[VREGXEXT:%[0-9]+]] = COPY [[VREGX]]
- %r0 = COPY %2(s16)
- ; CHECK: %r0 = COPY [[VREGRES]]
+ %3(s32) = G_ANYEXT %1(s16)
+ ; CHECK: [[VREGYEXT:%[0-9]+]] = COPY [[VREGY]]
+
+ %4(s32) = G_SUB %2, %3
+ ; CHECK: [[VREGRES:%[0-9]+]] = SUBrr [[VREGXEXT]], [[VREGYEXT]], 14, _, _
+
+ %5(s16) = G_TRUNC %4(s32)
+ ; CHECK: [[VREGRESTR:%[0-9]+]] = COPY [[VREGRES]]
+
+ %r0 = COPY %5(s16)
+ ; CHECK: %r0 = COPY [[VREGRESTR]]
BX_RET 14, _, implicit %r0
; CHECK: BX_RET 14, _, implicit %r0
@@ -451,9 +565,15 @@ registers:
- { id: 0, class: gprb }
- { id: 1, class: gprb }
- { id: 2, class: gprb }
-# CHECK-DAG: id: 0, class: gprnopc
-# CHECK-DAG: id: 1, class: gprnopc
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+ - { id: 5, class: gprb }
+# CHECK-DAG: id: 0, class: gpr
+# CHECK-DAG: id: 1, class: gpr
# CHECK-DAG: id: 2, class: gprnopc
+# CHECK-DAG: id: 3, class: gprnopc
+# CHECK-DAG: id: 4, class: gprnopc
+# CHECK-DAG: id: 5, class: gpr
body: |
bb.0:
liveins: %r0, %r1
@@ -464,11 +584,20 @@ body: |
%1(s8) = COPY %r1
; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
- %2(s8) = G_MUL %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]] = MUL [[VREGX]], [[VREGY]], 14, _, _
+ %2(s32) = G_ANYEXT %0(s8)
+ ; CHECK: [[VREGXEXT:%[0-9]+]] = COPY [[VREGX]]
- %r0 = COPY %2(s8)
- ; CHECK: %r0 = COPY [[VREGRES]]
+ %3(s32) = G_ANYEXT %1(s8)
+ ; CHECK: [[VREGYEXT:%[0-9]+]] = COPY [[VREGY]]
+
+ %4(s32) = G_MUL %2, %3
+ ; CHECK: [[VREGRES:%[0-9]+]] = MUL [[VREGXEXT]], [[VREGYEXT]], 14, _, _
+
+ %5(s8) = G_TRUNC %4(s32)
+ ; CHECK: [[VREGRESTR:%[0-9]+]] = COPY [[VREGRES]]
+
+ %r0 = COPY %5(s8)
+ ; CHECK: %r0 = COPY [[VREGRESTR]]
BX_RET 14, _, implicit %r0
; CHECK: BX_RET 14, _, implicit %r0
@@ -484,9 +613,15 @@ registers:
- { id: 0, class: gprb }
- { id: 1, class: gprb }
- { id: 2, class: gprb }
-# CHECK-DAG: id: 0, class: gprnopc
-# CHECK-DAG: id: 1, class: gprnopc
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+ - { id: 5, class: gprb }
+# CHECK-DAG: id: 0, class: gpr
+# CHECK-DAG: id: 1, class: gpr
# CHECK-DAG: id: 2, class: gprnopc
+# CHECK-DAG: id: 3, class: gprnopc
+# CHECK-DAG: id: 4, class: gprnopc
+# CHECK-DAG: id: 5, class: gpr
body: |
bb.0:
liveins: %r0, %r1
@@ -497,11 +632,20 @@ body: |
%1(s16) = COPY %r1
; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
- %2(s16) = G_MUL %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]] = MUL [[VREGX]], [[VREGY]], 14, _, _
+ %2(s32) = G_ANYEXT %0(s16)
+ ; CHECK: [[VREGXEXT:%[0-9]+]] = COPY [[VREGX]]
- %r0 = COPY %2(s16)
- ; CHECK: %r0 = COPY [[VREGRES]]
+ %3(s32) = G_ANYEXT %1(s16)
+ ; CHECK: [[VREGYEXT:%[0-9]+]] = COPY [[VREGY]]
+
+ %4(s32) = G_MUL %2, %3
+ ; CHECK: [[VREGRES:%[0-9]+]] = MUL [[VREGXEXT]], [[VREGYEXT]], 14, _, _
+
+ %5(s16) = G_TRUNC %4(s32)
+ ; CHECK: [[VREGRESTR:%[0-9]+]] = COPY [[VREGRES]]
+
+ %r0 = COPY %5(s16)
+ ; CHECK: %r0 = COPY [[VREGRESTR]]
BX_RET 14, _, implicit %r0
; CHECK: BX_RET 14, _, implicit %r0
diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
index 44fe7410b42c..53577dbd76f6 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
@@ -421,7 +421,7 @@ entry:
define arm_aapcscc void @test_indirect_call(void() *%fptr) {
; CHECK-LABEL: name: test_indirect_call
; CHECK: [[FPTR:%[0-9]+]](p0) = COPY %r0
-; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK: BLX [[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp
; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
entry:
@@ -433,7 +433,7 @@ declare arm_aapcscc void @call_target()
define arm_aapcscc void @test_direct_call() {
; CHECK-LABEL: name: test_direct_call
-; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK: BLX @call_target, csr_aapcs, implicit-def %lr, implicit %sp
; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
entry:
@@ -447,7 +447,7 @@ define arm_aapcscc i32* @test_call_simple_reg_params(i32 *%a, i32 %b) {
; CHECK-LABEL: name: test_call_simple_reg_params
; CHECK-DAG: [[AVREG:%[0-9]+]](p0) = COPY %r0
; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %r1
-; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK-DAG: %r0 = COPY [[BVREG]]
; CHECK-DAG: %r1 = COPY [[AVREG]]
; CHECK: BLX @simple_reg_params_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit-def %r0
@@ -466,7 +466,7 @@ define arm_aapcscc i32* @test_call_simple_stack_params(i32 *%a, i32 %b) {
; CHECK-LABEL: name: test_call_simple_stack_params
; CHECK-DAG: [[AVREG:%[0-9]+]](p0) = COPY %r0
; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %r1
-; CHECK: ADJCALLSTACKDOWN 8, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 8, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK-DAG: %r0 = COPY [[BVREG]]
; CHECK-DAG: %r1 = COPY [[AVREG]]
; CHECK-DAG: %r2 = COPY [[BVREG]]
@@ -496,7 +496,7 @@ define arm_aapcscc signext i16 @test_call_ext_params(i8 %a, i16 %b, i1 %c) {
; CHECK-DAG: [[AVREG:%[0-9]+]](s8) = COPY %r0
; CHECK-DAG: [[BVREG:%[0-9]+]](s16) = COPY %r1
; CHECK-DAG: [[CVREG:%[0-9]+]](s1) = COPY %r2
-; CHECK: ADJCALLSTACKDOWN 20, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 20, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK: [[SEXTA:%[0-9]+]](s32) = G_SEXT [[AVREG]](s8)
; CHECK: %r0 = COPY [[SEXTA]]
; CHECK: [[ZEXTA:%[0-9]+]](s32) = G_ZEXT [[AVREG]](s8)
@@ -547,7 +547,7 @@ define arm_aapcs_vfpcc double @test_call_vfpcc_fp_params(double %a, float %b) {
; CHECK-LABEL: name: test_call_vfpcc_fp_params
; CHECK-DAG: [[AVREG:%[0-9]+]](s64) = COPY %d0
; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %s2
-; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK-DAG: %s0 = COPY [[BVREG]]
; CHECK-DAG: %d1 = COPY [[AVREG]]
; CHECK: BLX @vfpcc_fp_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %d1, implicit-def %d0
@@ -569,7 +569,7 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; LITTLE-DAG: [[AVREG:%[0-9]+]](s64) = G_SEQUENCE [[A1]](s32), 0, [[A2]](s32), 32
; BIG-DAG: [[AVREG:%[0-9]+]](s64) = G_SEQUENCE [[A2]](s32), 0, [[A1]](s32), 32
; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %r2
-; CHECK: ADJCALLSTACKDOWN 16, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 16, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK-DAG: %r0 = COPY [[BVREG]]
; CHECK-DAG: [[A1:%[0-9]+]](s32) = G_EXTRACT [[AVREG]](s64), 0
; CHECK-DAG: [[A2:%[0-9]+]](s32) = G_EXTRACT [[AVREG]](s64), 32
@@ -608,7 +608,7 @@ declare arm_aapcscc float @different_call_conv_target(float)
define arm_aapcs_vfpcc float @test_call_different_call_conv(float %x) {
; CHECK-LABEL: name: test_call_different_call_conv
; CHECK: [[X:%[0-9]+]](s32) = COPY %s0
-; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK: %r0 = COPY [[X]]
; CHECK: BLX @different_call_conv_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit-def %r0
; CHECK: [[R:%[0-9]+]](s32) = COPY %r0
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 625d35acf17b..f6ac92597cb2 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -91,8 +91,9 @@ body: |
%0(s8) = COPY %r0
%1(s8) = COPY %r1
%2(s8) = G_ADD %0, %1
- ; G_ADD with s8 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; G_ADD with s8 should widen
+ ; CHECK: {{%[0-9]+}}(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
BX_RET 14, _, implicit %r0
...
@@ -115,8 +116,9 @@ body: |
%0(s16) = COPY %r0
%1(s16) = COPY %r1
%2(s16) = G_ADD %0, %1
- ; G_ADD with s16 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; G_ADD with s16 should widen
+ ; CHECK: {{%[0-9]+}}(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
BX_RET 14, _, implicit %r0
@@ -165,8 +167,9 @@ body: |
%0(s8) = COPY %r0
%1(s8) = COPY %r1
%2(s8) = G_SUB %0, %1
- ; G_SUB with s8 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; G_SUB with s8 should widen
+ ; CHECK: {{%[0-9]+}}(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
BX_RET 14, _, implicit %r0
...
@@ -189,8 +192,9 @@ body: |
%0(s16) = COPY %r0
%1(s16) = COPY %r1
%2(s16) = G_SUB %0, %1
- ; G_SUB with s16 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; G_SUB with s16 should widen
+ ; CHECK: {{%[0-9]+}}(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
BX_RET 14, _, implicit %r0
@@ -239,8 +243,9 @@ body: |
%0(s8) = COPY %r0
%1(s8) = COPY %r1
%2(s8) = G_MUL %0, %1
- ; G_MUL with s8 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; G_MUL with s8 should widen
+ ; CHECK: {{%[0-9]+}}(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s8)
BX_RET 14, _, implicit %r0
...
@@ -263,8 +268,9 @@ body: |
%0(s16) = COPY %r0
%1(s16) = COPY %r1
%2(s16) = G_MUL %0, %1
- ; G_MUL with s16 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; G_MUL with s16 should widen
+ ; CHECK: {{%[0-9]+}}(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s16)
BX_RET 14, _, implicit %r0
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index 4e94fb4e3481..dfccc47c277c 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -25,6 +25,9 @@
define void @test_constants() { ret void }
+ define void @test_anyext_s8_32() { ret void }
+ define void @test_anyext_s16_32() { ret void }
+
define void @test_trunc_s32_16() { ret void }
define void @test_fadd_s32() #0 { ret void }
@@ -71,19 +74,28 @@ selected: false
# CHECK: - { id: 0, class: gprb }
# CHECK: - { id: 1, class: gprb }
# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0:
liveins: %r0, %r1
%0(s16) = COPY %r0
%1(s16) = COPY %r1
- %2(s16) = G_ADD %0, %1
- %r0 = COPY %2(s16)
+ %2(s32) = G_ANYEXT %0(s16)
+ %3(s32) = G_ANYEXT %1(s16)
+ %4(s32) = G_ADD %2, %3
+ %5(s16) = G_TRUNC %4(s32)
+ %r0 = COPY %5(s16)
BX_RET 14, _, implicit %r0
...
@@ -97,19 +109,28 @@ selected: false
# CHECK: - { id: 0, class: gprb }
# CHECK: - { id: 1, class: gprb }
# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0:
liveins: %r0, %r1
%0(s8) = COPY %r0
%1(s8) = COPY %r1
- %2(s8) = G_ADD %0, %1
- %r0 = COPY %2(s8)
+ %2(s32) = G_ANYEXT %0(s8)
+ %3(s32) = G_ANYEXT %1(s8)
+ %4(s32) = G_ADD %2, %3
+ %5(s8) = G_TRUNC %4(s32)
+ %r0 = COPY %5(s8)
BX_RET 14, _, implicit %r0
...
@@ -123,19 +144,28 @@ selected: false
# CHECK: - { id: 0, class: gprb }
# CHECK: - { id: 1, class: gprb }
# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0:
liveins: %r0, %r1
%0(s1) = COPY %r0
%1(s1) = COPY %r1
- %2(s1) = G_ADD %0, %1
- %r0 = COPY %2(s1)
+ %2(s32) = G_ANYEXT %0(s1)
+ %3(s32) = G_ANYEXT %1(s1)
+ %4(s32) = G_ADD %2, %3
+ %5(s1) = G_TRUNC %4(s32)
+ %r0 = COPY %5(s1)
BX_RET 14, _, implicit %r0
...
@@ -175,19 +205,28 @@ selected: false
# CHECK: - { id: 0, class: gprb }
# CHECK: - { id: 1, class: gprb }
# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0:
liveins: %r0, %r1
%0(s16) = COPY %r0
%1(s16) = COPY %r1
- %2(s16) = G_SUB %0, %1
- %r0 = COPY %2(s16)
+ %2(s32) = G_ANYEXT %0(s16)
+ %3(s32) = G_ANYEXT %1(s16)
+ %4(s32) = G_SUB %2, %3
+ %5(s16) = G_TRUNC %4(s32)
+ %r0 = COPY %5(s16)
BX_RET 14, _, implicit %r0
...
@@ -201,19 +240,28 @@ selected: false
# CHECK: - { id: 0, class: gprb }
# CHECK: - { id: 1, class: gprb }
# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0:
liveins: %r0, %r1
%0(s8) = COPY %r0
%1(s8) = COPY %r1
- %2(s8) = G_SUB %0, %1
- %r0 = COPY %2(s8)
+ %2(s32) = G_ANYEXT %0(s8)
+ %3(s32) = G_ANYEXT %1(s8)
+ %4(s32) = G_SUB %2, %3
+ %5(s8) = G_TRUNC %4(s32)
+ %r0 = COPY %5(s8)
BX_RET 14, _, implicit %r0
...
@@ -253,19 +301,28 @@ selected: false
# CHECK: - { id: 0, class: gprb }
# CHECK: - { id: 1, class: gprb }
# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0:
liveins: %r0, %r1
%0(s16) = COPY %r0
%1(s16) = COPY %r1
- %2(s16) = G_MUL %0, %1
- %r0 = COPY %2(s16)
+ %2(s32) = G_ANYEXT %0(s16)
+ %3(s32) = G_ANYEXT %1(s16)
+ %4(s32) = G_MUL %2, %3
+ %5(s16) = G_TRUNC %4(s32)
+ %r0 = COPY %5(s16)
BX_RET 14, _, implicit %r0
...
@@ -279,19 +336,28 @@ selected: false
# CHECK: - { id: 0, class: gprb }
# CHECK: - { id: 1, class: gprb }
# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0:
liveins: %r0, %r1
%0(s8) = COPY %r0
%1(s8) = COPY %r1
- %2(s8) = G_MUL %0, %1
- %r0 = COPY %2(s8)
+ %2(s32) = G_ANYEXT %0(s8)
+ %3(s32) = G_ANYEXT %1(s8)
+ %4(s32) = G_MUL %2, %3
+ %5(s8) = G_TRUNC %4(s32)
+ %r0 = COPY %5(s8)
BX_RET 14, _, implicit %r0
...
@@ -500,6 +566,48 @@ body: |
BX_RET 14, _, implicit %r0
...
---
+name: test_anyext_s8_32
+# CHECK-LABEL: name: test_anyext_s8_32
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s8) = COPY %r0
+ %1(s32) = G_ANYEXT %0(s8)
+ %r0 = COPY %1(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_anyext_s16_32
+# CHECK-LABEL: name: test_anyext_s16_32
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s16) = COPY %r0
+ %1(s32) = G_ANYEXT %0(s16)
+ %r0 = COPY %1(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
name: test_trunc_s32_16
# CHECK-LABEL: name: test_trunc_s32_16
legalized: true
diff --git a/test/CodeGen/ARM/divmod-eabi.ll b/test/CodeGen/ARM/divmod-eabi.ll
index ce5a1df05e3f..77ffc46e6a69 100644
--- a/test/CodeGen/ARM/divmod-eabi.ll
+++ b/test/CodeGen/ARM/divmod-eabi.ll
@@ -16,17 +16,15 @@
; RUN: llc -mtriple armv7-linux-gnueabi %s -o - -O0 -optimize-regalloc | FileCheck %s --check-prefix=EABI
; RUN: llc -mtriple armv7-linux-musleabi %s -o - | FileCheck %s --check-prefix=EABI
; RUN: llc -mtriple armv7-linux-musleabi %s -o - -O0 -optimize-regalloc | FileCheck %s --check-prefix=EABI
-; RUN: llc -mtriple armv7-apple-darwin %s -o - | FileCheck %s --check-prefixes=DARWIN,DARWIN-DEFAULT
-; RUN: llc -mtriple armv7-apple-darwin %s -o - -O0 -optimize-regalloc | FileCheck %s --check-prefixes=DARWIN,DARWIN-O0
-; FIXME: long-term, we will use "-apple-macho" and won't need this exception:
-; RUN: llc -mtriple armv7-apple-darwin-eabi %s -o - | FileCheck %s --check-prefixes=DARWIN,DARWIN-DEFAULT
-; RUN: llc -mtriple armv7-apple-darwin-eabi %s -o - -O0 -optimize-regalloc | FileCheck %s --check-prefixes=DARWIN,DARWIN-O0
+; RUN: llc -mtriple armv7-apple-darwin %s -o - | FileCheck %s --check-prefixes=DARWIN
+; RUN: llc -mtriple armv7-apple-darwin %s -o - -O0 -optimize-regalloc | FileCheck %s --check-prefix=DARWIN-O0
; RUN: llc -mtriple thumbv7-windows %s -o - | FileCheck %s --check-prefixes=WINDOWS,WINDOWS-DEFAULT
; RUN: llc -mtriple thumbv7-windows %s -o - -O0 -optimize-regalloc | FileCheck %s --check-prefixes=WINDOWS,WINDOWS-O0
define signext i16 @f16(i16 signext %a, i16 signext %b) {
; EABI-LABEL: f16:
; DARWIN-LABEL: f16:
+; DARWIN-O0-LABEL: f16:
; WINDOWS-LABEL: f16:
entry:
%conv = sext i16 %a to i32
@@ -36,11 +34,9 @@ entry:
; EABI: __aeabi_idivmod
; EABI: mov [[div:r[0-9]+]], r0
; EABI: mov [[rem:r[0-9]+]], r1
-; DARWIN: ___divsi3
-; DARWIN: mov [[div:r[0-9]+]], r0
-; DARWIN: __modsi3
-; DARWIN-DEFAULT: add [[sum:r[0-9]+]], r0, [[div]]
-; DARWIN-O0: mov [[rem:r[0-9]+]], r0
+; DARWIN: __divmodsi4
+; DARWIN-O0: __divsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
; WINDOWS: __rt_sdiv
; WINDOWS-DEFAULT: add [[sum:r[0-9]+]], r1
@@ -48,16 +44,13 @@ entry:
%rem8 = srem i32 %conv1, %conv
; EABI: __aeabi_idivmod
; DARWIN: __modsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
%add = add nsw i32 %rem, %div
%add13 = add nsw i32 %add, %rem8
%conv14 = trunc i32 %add13 to i16
; EABI: add r0{{.*}}r1
; EABI: sxth r0, r0
-; DARWIN-DEFAULT: add [[res:r[0-9]+]], [[sum]], r0
-; DARWIN-O0: add [[sum:r[0-9]+]], [[rem]], [[div]]
-; DARWIN-O0: add [[res:r[0-9]+]], [[sum]], r0
-; DARWIN: sxth r0, [[res]]
; WINDOWS-DEFAULT: adds [[sum1:r[0-9]+]], [[sum]], r1
; WINDOWS-O0: adds [[sum:r[0-9]+]], [[rem]],
; WINDOWS-O0: add [[sum1:r[0-9]+]], r1
@@ -68,6 +61,7 @@ entry:
define i32 @f32(i32 %a, i32 %b) {
; EABI-LABEL: f32:
; DARWIN-LABEL: f32:
+; DARWIN-O0-LABEL: f32:
; WINDOWS-LABEL: f32:
entry:
%div = sdiv i32 %a, %b
@@ -75,11 +69,9 @@ entry:
; EABI: __aeabi_idivmod
; EABI: mov [[div:r[0-9]+]], r0
; EABI: mov [[rem:r[0-9]+]], r1
-; DARWIN: ___divsi3
-; DARWIN: mov [[div:r[0-9]+]], r0
-; DARWIN: __modsi3
-; DARWIN-DEFAULT: add [[sum:r[0-9]+]], r0, [[div]]
-; DARWIN-O0: mov [[rem:r[0-9]+]], r0
+; DARWIN: ___divmodsi4
+; DARWIN-O0: __divsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
; WINDOWS: mov [[div:r[0-9]+]], r0
; WINDOWS: __rt_sdiv
@@ -87,13 +79,11 @@ entry:
%rem1 = srem i32 %b, %a
; EABI: __aeabi_idivmod
; DARWIN: __modsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
%add = add nsw i32 %rem, %div
%add2 = add nsw i32 %add, %rem1
; EABI: add r0{{.*}}r1
-; DARWIN-DEFAULT: add r0, [[sum]], r0
-; DARWIN-O0: add [[sum:r[0-9]+]], [[rem]], [[div]]
-; DARWIN-O0: add [[res:r[0-9]+]], [[sum]], r0
; WINDOWS-DEFAULT: adds r0, [[div]], r1
; WINDOWS-O0: adds [[sum:r[0-9]+]], [[rem]], [[div]]
; WINDOWS-O0: add [[sum]], r1
@@ -103,16 +93,15 @@ entry:
define i32 @uf(i32 %a, i32 %b) {
; EABI-LABEL: uf:
; DARWIN-LABEL: uf:
+; DARWIN-O0-LABEL: uf:
; WINDOWS-LABEL: uf:
entry:
%div = udiv i32 %a, %b
%rem = urem i32 %a, %b
; EABI: __aeabi_uidivmod
-; DARWIN: ___udivsi3
-; DARWIN: mov [[div:r[0-9]+]], r0
-; DARWIN: __umodsi3
-; DARWIN-DEFAULT: add [[sum:r[0-9]+]], r0, [[div]]
-; DARWIN-O0: mov [[rem:r[0-9]+]], r0
+; DARWIN: __udivmodsi4
+; DARWIN-O0: __udivsi3
+; DARWIN-O0: __umodsi3
; WINDOWS: __rt_udiv
; WINDOWS: mov [[div:r[0-9]+]], r0
; WINDOWS: __rt_udiv
@@ -120,13 +109,11 @@ entry:
%rem1 = urem i32 %b, %a
; EABI: __aeabi_uidivmod
; DARWIN: __umodsi3
+; DARWIN-O0: __umodsi3
; WINDOWS: __rt_udiv
%add = add nuw i32 %rem, %div
%add2 = add nuw i32 %add, %rem1
; EABI: add r0{{.*}}r1
-; DARWIN-DEFAULT: add r0, [[sum]], r0
-; DARWIN-O0: add [[sum:r[0-9]+]], [[rem]], [[div]]
-; DARWIN-O0: add [[res:r[0-9]+]], [[sum]], r0
; WINDOWS-DEFAULT: adds [[sum:r[0-9]+]], [[div]], r1
; WINDOWS-O0: adds [[sum:r[0-9]+]],
; WINDOWS-O0: add [[sum]], r1
@@ -136,6 +123,7 @@ entry:
define i64 @longf(i64 %a, i64 %b) {
; EABI-LABEL: longf:
; DARWIN-LABEL: longf:
+; DARWIN-O0-LABEL: longf:
; WINDOWS-LABEL: longf:
entry:
%div = sdiv i64 %a, %b
@@ -148,6 +136,8 @@ entry:
; DARWIN: mov [[div1:r[0-9]+]], r0
; DARWIN: mov [[div2:r[0-9]+]], r1
; DARWIN: __moddi3
+; DARWIN-O0: __divdi3
+; DARWIN-O0: __moddi3
; WINDOWS: __rt_sdiv64
%add = add nsw i64 %rem, %div
; DARWIN: adds r0{{.*}}[[div1]]
@@ -160,20 +150,19 @@ entry:
define i16 @shortf(i16 %a, i16 %b) {
; EABI-LABEL: shortf:
; DARWIN-LABEL: shortf:
+; DARWIN-O0-LABEL: shortf:
; WINDOWS-LABEL: shortf:
entry:
%div = sdiv i16 %a, %b
%rem = srem i16 %a, %b
; EABI: __aeabi_idivmod
-; DARWIN: ___divsi3
-; DARWIN: mov [[div1:r[0-9]+]], r0
-; DARWIN: __modsi3
+; DARWIN: ___divmodsi4
+; DARWIN-O0: __divmodsi4
; WINDOWS: __rt_sdiv
; WINDOWS: mov [[div:r[0-9]+]], r0
; WINDOWS: __rt_sdiv
%add = add nsw i16 %rem, %div
; EABI: add r0, r1
-; DARWIN: add r0{{.*}}[[div1]]
; WINDOWS: adds r0, r1, [[div]]
ret i16 %add
}
@@ -181,20 +170,20 @@ entry:
define i32 @g1(i32 %a, i32 %b) {
; EABI-LABEL: g1:
; DARWIN-LABEL: g1:
+; DARWIN-O0-LABEL: g1:
; WINDOWS-LABEL: g1:
entry:
%div = sdiv i32 %a, %b
%rem = srem i32 %a, %b
; EABI: __aeabi_idivmod
-; DARWIN: ___divsi3
-; DARWIN: mov [[sum:r[0-9]+]], r0
-; DARWIN: __modsi3
+; DARWIN: ___divmodsi4
+; DARWIN-O0: __divsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
; WINDOWS: mov [[div:r[0-9]+]], r0
; WINDOWS: __rt_sdiv
%add = add nsw i32 %rem, %div
; EABI: add r0{{.*}}r1
-; DARWIN: add r0{{.*}}[[sum]]
; WINDOWS: adds r0, r1, [[div]]
ret i32 %add
}
@@ -203,11 +192,13 @@ entry:
define i32 @g2(i32 %a, i32 %b) {
; EABI-LABEL: g2:
; DARWIN-LABEL: g2:
+; DARWIN-O0-LABEL: g2:
; WINDOWS-LABEL: g2:
entry:
%rem = srem i32 %a, %b
; EABI: __aeabi_idivmod
; DARWIN: __modsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
ret i32 %rem
; EABI: mov r0, r1
@@ -217,6 +208,7 @@ entry:
define i32 @g3(i32 %a, i32 %b) {
; EABI-LABEL: g3:
; DARWIN-LABEL: g3:
+; DARWIN-O0-LABEL: g3:
; WINDOWS-LABEL: g3:
entry:
%rem = srem i32 %a, %b
@@ -224,11 +216,13 @@ entry:
; EABI: mov [[mod:r[0-9]+]], r1
; DARWIN: __modsi3
; DARWIN: mov [[sum:r[0-9]+]], r0
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
; WINDOWS: mov [[rem:r[0-9]+]], r1
%rem1 = srem i32 %b, %rem
; EABI: __aeabi_idivmod
; DARWIN: __modsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
%add = add nsw i32 %rem1, %rem
; EABI: add r0, r1, [[mod]]
@@ -240,6 +234,7 @@ entry:
define i32 @g4(i32 %a, i32 %b) {
; EABI-LABEL: g4:
; DARWIN-LABEL: g4:
+; DARWIN-O0-LABEL: g4:
; WINDOWS-LABEL: g4:
entry:
%div = sdiv i32 %a, %b
@@ -247,11 +242,13 @@ entry:
; EABI: mov [[div:r[0-9]+]], r0
; DARWIN: ___divsi3
; DARWIN: mov [[sum:r[0-9]+]], r0
+; DARWIN-O0: __divsi3
; WINDOWS: __rt_sdiv
; WINDOWS: mov [[div:r[0-9]+]], r0
%rem = srem i32 %b, %div
; EABI: __aeabi_idivmod
; DARWIN: __modsi3
+; DARWIN-O0: __modsi3
; WINDOWS: __rt_sdiv
%add = add nsw i32 %rem, %div
; EABI: add r0, r1, [[div]]
diff --git a/test/CodeGen/ARM/divmod.ll b/test/CodeGen/ARM/divmod.ll
index 9336d0c477d1..ffc1ed09cbf0 100644
--- a/test/CodeGen/ARM/divmod.ll
+++ b/test/CodeGen/ARM/divmod.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -mtriple=arm-apple-ios5.0 -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
; RUN: llc < %s -mtriple=arm-apple-ios5.0 -mcpu=swift | FileCheck %s -check-prefix=SWIFT
+; RUN: llc < %s -mtriple=thumbv7-apple-macho -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
; rdar://12481395
diff --git a/test/CodeGen/AVR/select-mbb-placement-bug.ll b/test/CodeGen/AVR/select-mbb-placement-bug.ll
new file mode 100644
index 000000000000..ca7ec1ab831c
--- /dev/null
+++ b/test/CodeGen/AVR/select-mbb-placement-bug.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mcpu=atmega328p < %s -march=avr | FileCheck %s
+
+; CHECK-LABEL: loopy
+define internal fastcc void @loopy() {
+
+; In this case, when we expand `Select8`/`Select16`, we should be
+; replacing the existing MBB instead of adding a new one.
+;
+; https://github.com/avr-rust/rust/issues/49
+
+; CHECK: LBB0_1:
+; CHECK: LBB0_2:
+; CHECK-NOT: LBB0_3:
+start:
+ br label %bb7.preheader
+
+bb7.preheader: ; preds = %bb10, %start
+ %i = phi i8 [ 0, %start ], [ %j, %bb10 ]
+ %j = phi i8 [ 1, %start ], [ %next, %bb10 ]
+ br label %bb10
+
+bb4: ; preds = %bb10
+ ret void
+
+bb10: ; preds = %bb7.preheader
+ tail call fastcc void @observe(i8 %i, i8 1)
+ %0 = icmp ult i8 %j, 20
+ %1 = zext i1 %0 to i8
+ %next = add i8 %j, %1
+ br i1 %0, label %bb7.preheader, label %bb4
+
+}
+
+declare void @observe(i8, i8);
+
diff --git a/test/CodeGen/Generic/expand-experimental-reductions.ll b/test/CodeGen/Generic/expand-experimental-reductions.ll
new file mode 100644
index 000000000000..ef813fa7205b
--- /dev/null
+++ b/test/CodeGen/Generic/expand-experimental-reductions.ll
@@ -0,0 +1,210 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -expand-reductions -S | FileCheck %s
+; Tests without a target which should expand all reductions
+declare i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.mul.i64.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.and.i64.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.or.i64.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.i64.v2i64(<2 x i64>)
+
+declare float @llvm.experimental.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
+declare float @llvm.experimental.vector.reduce.fmul.f32.v4f32(float, <4 x float>)
+
+declare i64 @llvm.experimental.vector.reduce.smax.i64.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smin.i64.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.umax.i64.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.umin.i64.v2i64(<2 x i64>)
+
+declare double @llvm.experimental.vector.reduce.fmax.f64.v2f64(<2 x double>)
+declare double @llvm.experimental.vector.reduce.fmin.f64.v2f64(<2 x double>)
+
+
+define i64 @add_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @add_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[BIN_RDX]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define i64 @mul_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @mul_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[BIN_RDX]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.mul.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define i64 @and_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @and_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = and <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[BIN_RDX]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.and.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define i64 @or_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @or_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = or <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[BIN_RDX]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.or.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define i64 @xor_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @xor_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = xor <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[BIN_RDX]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.xor.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define float @fadd_f32(<4 x float> %vec) {
+; CHECK-LABEL: @fadd_f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[VEC:%.*]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: ret float [[TMP0]]
+;
+entry:
+ %r = call fast float @llvm.experimental.vector.reduce.fadd.f32.v4f32(float undef, <4 x float> %vec)
+ ret float %r
+}
+
+define float @fadd_f32_strict(<4 x float> %vec) {
+; CHECK-LABEL: @fadd_f32_strict(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v4f32(float undef, <4 x float> [[VEC:%.*]])
+; CHECK-NEXT: ret float [[R]]
+;
+entry:
+ %r = call float @llvm.experimental.vector.reduce.fadd.f32.v4f32(float undef, <4 x float> %vec)
+ ret float %r
+}
+
+define float @fmul_f32(<4 x float> %vec) {
+; CHECK-LABEL: @fmul_f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[VEC:%.*]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fmul fast <4 x float> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fmul fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: ret float [[TMP0]]
+;
+entry:
+ %r = call fast float @llvm.experimental.vector.reduce.fmul.f32.v4f32(float undef, <4 x float> %vec)
+ ret float %r
+}
+
+define i64 @smax_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @smax_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp sgt <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <2 x i1> [[RDX_MINMAX_CMP]], <2 x i64> [[VEC]], <2 x i64> [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[RDX_MINMAX_SELECT]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.smax.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define i64 @smin_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @smin_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp slt <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <2 x i1> [[RDX_MINMAX_CMP]], <2 x i64> [[VEC]], <2 x i64> [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[RDX_MINMAX_SELECT]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.smin.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define i64 @umax_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @umax_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp ugt <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <2 x i1> [[RDX_MINMAX_CMP]], <2 x i64> [[VEC]], <2 x i64> [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[RDX_MINMAX_SELECT]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.umax.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define i64 @umin_i64(<2 x i64> %vec) {
+; CHECK-LABEL: @umin_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x i64> [[VEC:%.*]], <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp ult <2 x i64> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <2 x i1> [[RDX_MINMAX_CMP]], <2 x i64> [[VEC]], <2 x i64> [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x i64> [[RDX_MINMAX_SELECT]], i32 0
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %r = call i64 @llvm.experimental.vector.reduce.umin.i64.v2i64(<2 x i64> %vec)
+ ret i64 %r
+}
+
+define double @fmax_f64(<2 x double> %vec) {
+; CHECK-LABEL: @fmax_f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x double> [[VEC:%.*]], <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast ogt <2 x double> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <2 x i1> [[RDX_MINMAX_CMP]], <2 x double> [[VEC]], <2 x double> [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x double> [[RDX_MINMAX_SELECT]], i32 0
+; CHECK-NEXT: ret double [[TMP0]]
+;
+entry:
+ %r = call double @llvm.experimental.vector.reduce.fmax.f64.v2f64(<2 x double> %vec)
+ ret double %r
+}
+
+define double @fmin_f64(<2 x double> %vec) {
+; CHECK-LABEL: @fmin_f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <2 x double> [[VEC:%.*]], <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt <2 x double> [[VEC]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <2 x i1> [[RDX_MINMAX_CMP]], <2 x double> [[VEC]], <2 x double> [[RDX_SHUF]]
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x double> [[RDX_MINMAX_SELECT]], i32 0
+; CHECK-NEXT: ret double [[TMP0]]
+;
+entry:
+ %r = call double @llvm.experimental.vector.reduce.fmin.f64.v2f64(<2 x double> %vec)
+ ret double %r
+}
diff --git a/test/CodeGen/Hexagon/regalloc-bad-undef.mir b/test/CodeGen/Hexagon/regalloc-bad-undef.mir
index d8fbb92b0d50..a541e766f593 100644
--- a/test/CodeGen/Hexagon/regalloc-bad-undef.mir
+++ b/test/CodeGen/Hexagon/regalloc-bad-undef.mir
@@ -161,17 +161,17 @@ body: |
bb.1.for.body:
successors: %bb.3.for.end, %bb.2.if.end82
- ADJCALLSTACKDOWN 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3, implicit-def %r0
ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29
undef %29.isub_lo = COPY killed %r0
%29.isub_hi = S2_asr_i_r %29.isub_lo, 31
- ADJCALLSTACKDOWN 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3, implicit-def %r0
ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29
%32.isub_lo = COPY killed %r0
%7 = S2_extractup %32, 22, 9
- ADJCALLSTACKDOWN 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3, implicit-def %r0
ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29
undef %43.isub_lo = COPY killed %r0
@@ -179,7 +179,7 @@ body: |
%16 = S2_extractup %43, 6, 25
%18 = A2_tfrpi -1
%18 = S2_asl_r_p_acc %18, %47, %16.isub_lo
- ADJCALLSTACKDOWN 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29
J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3
ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29
%22 = S2_asl_r_p %18, %8.isub_lo
diff --git a/test/CodeGen/Lanai/masking_setccs.ll b/test/CodeGen/Lanai/masking_setccs.ll
new file mode 100644
index 000000000000..48136fd42574
--- /dev/null
+++ b/test/CodeGen/Lanai/masking_setccs.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s | FileCheck %s
+
+; Test that unnecessary masking with 0x1 is not inserted.
+
+target datalayout = "E-m:e-p:32:32-i64:64-a:0:32-n32-S64"
+target triple = "lanai"
+
+; CHECK-LABEL: masking:
+; CHECK-NOT: mov 1
+define i32 @masking(i32 inreg %a, i32 inreg %b, i32 inreg %c, i32 inreg %d) {
+entry:
+ %cmp = icmp ne i32 %a, 0
+ %cmp1 = icmp ult i32 %a, %b
+ %or.cond = and i1 %cmp, %cmp1
+ br i1 %or.cond, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %cmp2 = icmp ne i32 %b, 0
+ %cmp4 = icmp ult i32 %b, %c
+ %or.cond29 = and i1 %cmp2, %cmp4
+ br i1 %or.cond29, label %return, label %if.end6
+
+if.end6: ; preds = %if.end
+ %cmp7 = icmp ne i32 %c, 0
+ %cmp9 = icmp ult i32 %c, %d
+ %or.cond30 = and i1 %cmp7, %cmp9
+ br i1 %or.cond30, label %return, label %if.end11
+
+if.end11: ; preds = %if.end6
+ %cmp12 = icmp ne i32 %d, 0
+ %cmp14 = icmp ult i32 %d, %a
+ %or.cond31 = and i1 %cmp12, %cmp14
+ %b. = select i1 %or.cond31, i32 %b, i32 21
+ ret i32 %b.
+
+return: ; preds = %if.end6, %if.end, %entry
+ %retval.0 = phi i32 [ %c, %entry ], [ %d, %if.end ], [ %a, %if.end6 ]
+ ret i32 %retval.0
+}
+
+; CHECK-LABEL: notnot:
+; CHECK-NOT: mov 1
+define i32 @notnot(i32 %x) {
+entry:
+ %tobool = icmp ne i32 %x, 0
+ %lnot.ext = zext i1 %tobool to i32
+ ret i32 %lnot.ext
+}
diff --git a/test/CodeGen/Lanai/peephole-compare.mir b/test/CodeGen/Lanai/peephole-compare.mir
index 5056a05ed1f6..51133b5e58e3 100644
--- a/test/CodeGen/Lanai/peephole-compare.mir
+++ b/test/CodeGen/Lanai/peephole-compare.mir
@@ -644,7 +644,7 @@ body: |
bb.1.if.then:
successors: %bb.2.while.body
- ADJCALLSTACKDOWN 0, implicit-def dead %sp, implicit %sp
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp
CALL @g, csr, implicit-def dead %rca, implicit %sp, implicit-def %sp, implicit-def %rv
ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp
@@ -663,7 +663,7 @@ body: |
bb.4.if.then4:
successors: %bb.5.while.body6
- ADJCALLSTACKDOWN 0, implicit-def dead %sp, implicit %sp
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp
CALL @g, csr, implicit-def dead %rca, implicit %sp, implicit-def %sp, implicit-def %rv
ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp
diff --git a/test/CodeGen/MIR/ARM/PR32721_ifcvt_triangle_unanalyzable.mir b/test/CodeGen/MIR/ARM/PR32721_ifcvt_triangle_unanalyzable.mir
new file mode 100644
index 000000000000..96801f5b0a37
--- /dev/null
+++ b/test/CodeGen/MIR/ARM/PR32721_ifcvt_triangle_unanalyzable.mir
@@ -0,0 +1,24 @@
+# RUN: llc -mtriple=arm-apple-ios -run-pass=if-converter %s -o - | FileCheck %s
+---
+name: foo
+body: |
+ bb.0:
+ B %bb.2
+
+ bb.1:
+ BX_RET 14, 0
+
+ bb.2:
+ Bcc %bb.1, 1, %cpsr
+
+ bb.3:
+ B %bb.1
+
+...
+
+# We should get a single block containing the BX_RET, with no successors at all
+
+# CHECK: body:
+# CHECK-NEXT: bb.0:
+# CHECK-NEXT: BX_RET
+
diff --git a/test/CodeGen/MIR/ARM/ifcvt_canFallThroughTo.mir b/test/CodeGen/MIR/ARM/ifcvt_canFallThroughTo.mir
new file mode 100644
index 000000000000..5a1583f7a9be
--- /dev/null
+++ b/test/CodeGen/MIR/ARM/ifcvt_canFallThroughTo.mir
@@ -0,0 +1,64 @@
+# RUN: llc -mtriple=arm-apple-ios -o - %s -run-pass if-converter | FileCheck %s
+---
+name: f1
+body: |
+ bb.0:
+ successors: %bb.1
+
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2, %bb.4
+
+ Bcc %bb.4, 1, %cpsr
+
+ bb.2:
+ successors: %bb.3, %bb.5
+
+ Bcc %bb.5, 1, %cpsr
+
+ bb.3:
+ successors: %bb.5
+
+ B %bb.5
+
+ bb.4:
+ successors:
+
+ bb.5:
+ successors: %bb.1, %bb.6
+
+ Bcc %bb.1, 1, %cpsr
+
+ bb.6:
+ BX_RET 14, _
+
+...
+
+# IfConversion.cpp/canFallThroughTo thought there was a fallthrough from
+# bb.4 to bb5 even if the successor list was empty.
+# bb.4 is empty, so it surely looks like it can fallthrough, but this is what
+# happens for a bb just containing an "unreachable".
+
+#CHECK: body: |
+#CHECK: bb.0:
+#CHECK: successors: %bb.1
+
+#CHECK: bb.1:
+#CHECK: successors: %bb.3({{.*}}), %bb.2
+
+# The original brr_cond from bb.1, jumping to the empty bb
+#CHECK: Bcc %bb.2
+#CHECK: B %bb.3
+
+# Empty bb.2, originally containing "unreachable" and thus has no successors
+#CHECK: bb.2:
+#CHECK-NOT: successors
+
+#CHECK: bb.3:
+#CHECK: successors: %bb.1
+
+# Conditional BX_RET and then loop back to bb.1
+#CHECK: BX_RET 0
+#CHECK: B %bb.1
+
diff --git a/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir b/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
index 2d5347e5d30d..14bb5db5a51d 100644
--- a/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
+++ b/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
@@ -60,7 +60,7 @@ body: |
liveins: %eax
MOV32mr %stack.0.tmp, 1, _, 0, _, killed %eax
- ADJCALLSTACKDOWN64 0, 0, implicit-def %rsp, implicit-def dead %eflags, implicit %rsp
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def dead %eflags, implicit %rsp
%rsi = LEA64r %stack.0.tmp, 1, _, 0, _
%edi = MOV32r0 implicit-def dead %eflags
CALL64pcrel32 @doSomething, csr_64, implicit %rsp, implicit %edi, implicit %rsi, implicit-def %rsp, implicit-def %eax
diff --git a/test/CodeGen/MSP430/hwmult16.ll b/test/CodeGen/MSP430/hwmult16.ll
new file mode 100644
index 000000000000..b23f1ad37d81
--- /dev/null
+++ b/test/CodeGen/MSP430/hwmult16.ll
@@ -0,0 +1,43 @@
+; RUN: llc -O0 -mhwmult=16bit < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi_hw
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl_hw
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll_hw
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/MSP430/hwmult32.ll b/test/CodeGen/MSP430/hwmult32.ll
new file mode 100644
index 000000000000..6ffeb9698862
--- /dev/null
+++ b/test/CodeGen/MSP430/hwmult32.ll
@@ -0,0 +1,43 @@
+; RUN: llc -O0 -mhwmult=32bit < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi_hw
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl_hw32
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll_hw32
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/MSP430/hwmultf5.ll b/test/CodeGen/MSP430/hwmultf5.ll
new file mode 100644
index 000000000000..51ca4be4a654
--- /dev/null
+++ b/test/CodeGen/MSP430/hwmultf5.ll
@@ -0,0 +1,43 @@
+; RUN: llc -O0 -mhwmult=f5series < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi_f5hw
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl_f5hw
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll_f5hw
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/MSP430/jumptable.ll b/test/CodeGen/MSP430/jumptable.ll
index 5ccdbb701db1..b4366251698b 100644
--- a/test/CodeGen/MSP430/jumptable.ll
+++ b/test/CodeGen/MSP430/jumptable.ll
@@ -12,7 +12,7 @@ entry:
store i16 %i, i16* %i.addr, align 2
%0 = load i16, i16* %i.addr, align 2
; CHECK: mov.w #2, r13
-; CHECK: call #__mulhi3hw_noint
+; CHECK: call #__mspabi_mpyi
; CHECK: br .LJTI0_0(r12)
switch i16 %0, label %sw.default [
i16 0, label %sw.bb
diff --git a/test/CodeGen/MSP430/libcalls.ll b/test/CodeGen/MSP430/libcalls.ll
new file mode 100644
index 000000000000..950ed6c17e2c
--- /dev/null
+++ b/test/CodeGen/MSP430/libcalls.ll
@@ -0,0 +1,595 @@
+; RUN: llc -O0 < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+@g_double = global double 123.0, align 8
+@g_float = global float 123.0, align 8
+@g_i32 = global i32 123, align 8
+@g_i64 = global i64 456, align 8
+@g_i16 = global i16 789, align 8
+
+define float @d2f() #0 {
+entry:
+; CHECK: d2f:
+
+; CHECK: call #__mspabi_cvtdf
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptrunc double %0 to float
+
+ ret float %1
+}
+
+define double @f2d() #0 {
+entry:
+; CHECK: f2d:
+
+; CHECK: call #__mspabi_cvtfd
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fpext float %0 to double
+
+ ret double %1
+}
+
+define i32 @d2l() #0 {
+entry:
+; CHECK: d2l:
+
+; CHECK: call #__mspabi_fixdli
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptosi double %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @d2ll() #0 {
+entry:
+; CHECK: d2ll:
+
+; CHECK: call #__mspabi_fixdlli
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptosi double %0 to i64
+
+ ret i64 %1
+}
+
+define i32 @d2ul() #0 {
+entry:
+; CHECK: d2ul:
+
+; CHECK: call #__mspabi_fixdul
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptoui double %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @d2ull() #0 {
+entry:
+; CHECK: d2ull:
+
+; CHECK: call #__mspabi_fixdull
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fptoui double %0 to i64
+
+ ret i64 %1
+}
+
+define i32 @f2l() #0 {
+entry:
+; CHECK: f2l:
+
+; CHECK: call #__mspabi_fixfli
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptosi float %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @f2ll() #0 {
+entry:
+; CHECK: f2ll:
+
+; CHECK: call #__mspabi_fixflli
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptosi float %0 to i64
+
+ ret i64 %1
+}
+
+define i32 @f2ul() #0 {
+entry:
+; CHECK: f2ul:
+
+; CHECK: call #__mspabi_fixful
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptoui float %0 to i32
+
+ ret i32 %1
+}
+
+define i64 @f2ull() #0 {
+entry:
+; CHECK: f2ull:
+
+; CHECK: call #__mspabi_fixfull
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fptoui float %0 to i64
+
+ ret i64 %1
+}
+
+define double @l2d() #0 {
+entry:
+; CHECK: l2d:
+
+; CHECK: call #__mspabi_fltlid
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = sitofp i32 %0 to double
+
+ ret double %1
+}
+
+define double @ll2d() #0 {
+entry:
+; CHECK: ll2d:
+
+; CHECK: call #__mspabi_fltllid
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = sitofp i64 %0 to double
+
+ ret double %1
+}
+
+define double @ul2d() #0 {
+entry:
+; CHECK: ul2d:
+
+; CHECK: call #__mspabi_fltuld
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = uitofp i32 %0 to double
+
+ ret double %1
+}
+
+define double @ull2d() #0 {
+entry:
+; CHECK: ull2d:
+
+; CHECK: call #__mspabi_fltulld
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = uitofp i64 %0 to double
+
+ ret double %1
+}
+
+define float @l2f() #0 {
+entry:
+; CHECK: l2f:
+
+; CHECK: call #__mspabi_fltlif
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = sitofp i32 %0 to float
+
+ ret float %1
+}
+
+define float @ll2f() #0 {
+entry:
+; CHECK: ll2f:
+
+; CHECK: call #__mspabi_fltllif
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = sitofp i64 %0 to float
+
+ ret float %1
+}
+
+define float @ul2f() #0 {
+entry:
+; CHECK: ul2f:
+
+; CHECK: call #__mspabi_fltulf
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = uitofp i32 %0 to float
+
+ ret float %1
+}
+
+define float @ull2f() #0 {
+entry:
+; CHECK: ull2f:
+
+; CHECK: call #__mspabi_fltullf
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = uitofp i64 %0 to float
+
+ ret float %1
+}
+
+define i1 @cmpd_oeq() #0 {
+entry:
+; CHECK: cmpd_oeq:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp oeq double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_une() #0 {
+entry:
+; CHECK: cmpd_une:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp une double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_oge() #0 {
+entry:
+; CHECK: cmpd_oge:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp oge double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_olt() #0 {
+entry:
+; CHECK: cmpd_olt:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp olt double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_ole() #0 {
+entry:
+; CHECK: cmpd_ole:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp ole double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpd_ogt() #0 {
+entry:
+; CHECK: cmpd_ogt:
+
+; CHECK: call #__mspabi_cmpd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fcmp ogt double %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_oeq() #0 {
+entry:
+; CHECK: cmpf_oeq:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp oeq float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_une() #0 {
+entry:
+; CHECK: cmpf_une:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp une float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_oge() #0 {
+entry:
+; CHECK: cmpf_oge:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp oge float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_olt() #0 {
+entry:
+; CHECK: cmpf_olt:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp olt float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_ole() #0 {
+entry:
+; CHECK: cmpf_ole:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp ole float %0, 123.0
+
+ ret i1 %1
+}
+
+define i1 @cmpf_ogt() #0 {
+entry:
+; CHECK: cmpf_ogt:
+
+; CHECK: call #__mspabi_cmpf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fcmp ogt float %0, 123.0
+
+ ret i1 %1
+}
+
+define double @addd() #0 {
+entry:
+; CHECK: addd:
+
+; CHECK: call #__mspabi_addd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fadd double %0, 123.0
+
+ ret double %1
+}
+
+define float @addf() #0 {
+entry:
+; CHECK: addf:
+
+; CHECK: call #__mspabi_addf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fadd float %0, 123.0
+
+ ret float %1
+}
+
+define double @divd() #0 {
+entry:
+; CHECK: divd:
+
+; CHECK: call #__mspabi_divd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fdiv double %0, 123.0
+
+ ret double %1
+}
+
+define float @divf() #0 {
+entry:
+; CHECK: divf:
+
+; CHECK: call #__mspabi_divf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fdiv float %0, 123.0
+
+ ret float %1
+}
+
+define double @mpyd() #0 {
+entry:
+; CHECK: mpyd:
+
+; CHECK: call #__mspabi_mpyd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fmul double %0, 123.0
+
+ ret double %1
+}
+
+define float @mpyf() #0 {
+entry:
+; CHECK: mpyf:
+
+; CHECK: call #__mspabi_mpyf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fmul float %0, 123.0
+
+ ret float %1
+}
+
+define double @subd() #0 {
+entry:
+; CHECK: subd:
+
+; CHECK: call #__mspabi_subd
+ %0 = load volatile double, double* @g_double, align 8
+ %1 = fsub double %0, %0
+
+ ret double %1
+}
+
+define float @subf() #0 {
+entry:
+; CHECK: subf:
+
+; CHECK: call #__mspabi_subf
+ %0 = load volatile float, float* @g_float, align 8
+ %1 = fsub float %0, %0
+
+ ret float %1
+}
+
+define i16 @divi() #0 {
+entry:
+; CHECK: divi:
+
+; CHECK: call #__mspabi_divi
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = sdiv i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @divli() #0 {
+entry:
+; CHECK: divli:
+
+; CHECK: call #__mspabi_divli
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = sdiv i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @divlli() #0 {
+entry:
+; CHECK: divlli:
+
+; CHECK: call #__mspabi_divlli
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = sdiv i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @divu() #0 {
+entry:
+; CHECK: divu:
+
+; CHECK: call #__mspabi_divu
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = udiv i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @divul() #0 {
+entry:
+; CHECK: divul:
+
+; CHECK: call #__mspabi_divul
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = udiv i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @divull() #0 {
+entry:
+; CHECK: divull:
+
+; CHECK: call #__mspabi_divull
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = udiv i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @remi() #0 {
+entry:
+; CHECK: remi:
+
+; CHECK: call #__mspabi_remi
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = srem i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @remli() #0 {
+entry:
+; CHECK: remli:
+
+; CHECK: call #__mspabi_remli
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = srem i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @remlli() #0 {
+entry:
+; CHECK: remlli:
+
+; CHECK: call #__mspabi_remlli
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = srem i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @remu() #0 {
+entry:
+; CHECK: remu:
+
+; CHECK: call #__mspabi_remu
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = urem i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @remul() #0 {
+entry:
+; CHECK: remul:
+
+; CHECK: call #__mspabi_remul
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = urem i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @remull() #0 {
+entry:
+; CHECK: remull:
+
+; CHECK: call #__mspabi_remull
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = urem i64 %0, %0
+
+ ret i64 %1
+}
+
+define i16 @mpyi() #0 {
+entry:
+; CHECK: mpyi:
+
+; CHECK: call #__mspabi_mpyi
+ %0 = load volatile i16, i16* @g_i16, align 8
+ %1 = mul i16 %0, %0
+
+ ret i16 %1
+}
+
+define i32 @mpyli() #0 {
+entry:
+; CHECK: mpyli:
+
+; CHECK: call #__mspabi_mpyl
+ %0 = load volatile i32, i32* @g_i32, align 8
+ %1 = mul i32 %0, %0
+
+ ret i32 %1
+}
+
+define i64 @mpylli() #0 {
+entry:
+; CHECK: mpylli:
+
+; CHECK: call #__mspabi_mpyll
+ %0 = load volatile i64, i64* @g_i64, align 8
+ %1 = mul i64 %0, %0
+
+ ret i64 %1
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/MSP430/2009-11-05-8BitLibcalls.ll b/test/CodeGen/MSP430/promote-i8-mul.ll
index dce9d25ca87a..0e05e3978b1e 100644
--- a/test/CodeGen/MSP430/2009-11-05-8BitLibcalls.ll
+++ b/test/CodeGen/MSP430/promote-i8-mul.ll
@@ -8,7 +8,7 @@ target triple = "msp430-elf"
define signext i8 @foo(i8 signext %_si1, i8 signext %_si2) nounwind readnone {
entry:
; CHECK-LABEL: foo:
-; CHECK: call #__mulqi3
+; CHECK: call #__mspabi_mpyi
%mul = mul i8 %_si2, %_si1 ; <i8> [#uses=1]
ret i8 %mul
}
diff --git a/test/CodeGen/NVPTX/bug17709.ll b/test/CodeGen/NVPTX/bug17709.ll
index 076c44684579..6d747f09d8a7 100644
--- a/test/CodeGen/NVPTX/bug17709.ll
+++ b/test/CodeGen/NVPTX/bug17709.ll
@@ -1,26 +1,26 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-
-; ModuleID = '__kernelgen_main_module'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
-target triple = "nvptx64-nvidia-cuda"
-
-define private ptx_device { double, double } @__utils1_MOD_trace(%"struct.array2_complex(kind=8).43.5.57"* noalias %m) {
-entry:
- ;unreachable
- %t0 = insertvalue {double, double} undef, double 1.0, 0
- %t1 = insertvalue {double, double} %t0, double 1.0, 1
- ret { double, double } %t1
-}
-
-%struct.descriptor_dimension.0.52 = type { i64, i64, i64 }
-%"struct.array2_complex(kind=8).37.18.70" = type { i8*, i64, i64, [2 x %struct.descriptor_dimension.0.52] }
-%"struct.array2_complex(kind=8).43.5.57" = type { i8*, i64, i64, [2 x %struct.descriptor_dimension.0.52] }
-@replacementOfAlloca8 = private global %"struct.array2_complex(kind=8).37.18.70" zeroinitializer, align 4096
-
-; CHECK: .visible .entry __kernelgen_main
-define ptx_kernel void @__kernelgen_main(i32* nocapture %args, i32*) {
-entry:
- %1 = tail call ptx_device { double, double } bitcast ({ double, double } (%"struct.array2_complex(kind=8).43.5.57"*)* @__utils1_MOD_trace to { double, double } (%"struct.array2_complex(kind=8).37.18.70"*)*)(%"struct.array2_complex(kind=8).37.18.70"* noalias @replacementOfAlloca8)
- ret void
-}
-
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
+
+; ModuleID = '__kernelgen_main_module'
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+define private ptx_device { double, double } @__utils1_MOD_trace(%"struct.array2_complex(kind=8).43.5.57"* noalias %m) {
+entry:
+ ;unreachable
+ %t0 = insertvalue {double, double} undef, double 1.0, 0
+ %t1 = insertvalue {double, double} %t0, double 1.0, 1
+ ret { double, double } %t1
+}
+
+%struct.descriptor_dimension.0.52 = type { i64, i64, i64 }
+%"struct.array2_complex(kind=8).37.18.70" = type { i8*, i64, i64, [2 x %struct.descriptor_dimension.0.52] }
+%"struct.array2_complex(kind=8).43.5.57" = type { i8*, i64, i64, [2 x %struct.descriptor_dimension.0.52] }
+@replacementOfAlloca8 = private global %"struct.array2_complex(kind=8).37.18.70" zeroinitializer, align 4096
+
+; CHECK: .visible .entry __kernelgen_main
+define ptx_kernel void @__kernelgen_main(i32* nocapture %args, i32*) {
+entry:
+ %1 = tail call ptx_device { double, double } bitcast ({ double, double } (%"struct.array2_complex(kind=8).43.5.57"*)* @__utils1_MOD_trace to { double, double } (%"struct.array2_complex(kind=8).37.18.70"*)*)(%"struct.array2_complex(kind=8).37.18.70"* noalias @replacementOfAlloca8)
+ ret void
+}
+
diff --git a/test/CodeGen/NVPTX/ctlz.ll b/test/CodeGen/NVPTX/ctlz.ll
index 005958bd938a..7aa29fe811dd 100644
--- a/test/CodeGen/NVPTX/ctlz.ll
+++ b/test/CodeGen/NVPTX/ctlz.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
diff --git a/test/CodeGen/NVPTX/ctpop.ll b/test/CodeGen/NVPTX/ctpop.ll
index b961d4d27bdd..69a4f879a8d8 100644
--- a/test/CodeGen/NVPTX/ctpop.ll
+++ b/test/CodeGen/NVPTX/ctpop.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
diff --git a/test/CodeGen/NVPTX/cttz.ll b/test/CodeGen/NVPTX/cttz.ll
index 124ba9d1e9a7..0bfe0139bcdf 100644
--- a/test/CodeGen/NVPTX/cttz.ll
+++ b/test/CodeGen/NVPTX/cttz.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
diff --git a/test/CodeGen/NVPTX/f16-instructions.ll b/test/CodeGen/NVPTX/f16-instructions.ll
index 3d4140820794..08a2ee14e8bd 100644
--- a/test/CodeGen/NVPTX/f16-instructions.ll
+++ b/test/CodeGen/NVPTX/f16-instructions.ll
@@ -1,1078 +1,1079 @@
-; ## Full FP16 support enabled by default.
-; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
-; RUN: -O0 -disable-post-ra -disable-fp-elim \
-; RUN: | FileCheck -check-prefixes CHECK,CHECK-F16 %s
-; ## FP16 support explicitly disabled.
-; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
-; RUN: -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \
-; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
-; ## FP16 is not supported by hardware.
-; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \
-; RUN: -disable-post-ra -disable-fp-elim \
-; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
-
-target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-
-; CHECK-LABEL: test_ret_const(
-; CHECK: mov.b16 [[R:%h[0-9]+]], 0x3C00;
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_ret_const() #0 {
- ret half 1.0
-}
-
-; CHECK-LABEL: test_fadd(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fadd_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_param_1];
-; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_fadd(half %a, half %b) #0 {
- %r = fadd half %a, %b
- ret half %r
-}
-
-; CHECK-LABEL: test_fadd_v1f16(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fadd_v1f16_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_v1f16_param_1];
-; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <1 x half> @test_fadd_v1f16(<1 x half> %a, <1 x half> %b) #0 {
- %r = fadd <1 x half> %a, %b
- ret <1 x half> %r
-}
-
-; Check that we can lower fadd with immediate arguments.
-; CHECK-LABEL: test_fadd_imm_0(
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_imm_0_param_0];
-; CHECK-F16-DAG: mov.b16 [[A:%h[0-9]+]], 0x3C00;
-; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000;
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_fadd_imm_0(half %b) #0 {
- %r = fadd half 1.0, %b
- ret half %r
-}
-
-; CHECK-LABEL: test_fadd_imm_1(
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_imm_1_param_0];
-; CHECK-F16-DAG: mov.b16 [[A:%h[0-9]+]], 0x3C00;
-; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000;
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_fadd_imm_1(half %a) #0 {
- %r = fadd half %a, 1.0
- ret half %r
-}
-
-; CHECK-LABEL: test_fsub(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fsub_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fsub_param_1];
-; CHECK-F16-NEXT: sub.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_fsub(half %a, half %b) #0 {
- %r = fsub half %a, %b
- ret half %r
-}
-
-; CHECK-LABEL: test_fneg(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fneg_param_0];
-; CHECK-F16-NEXT: mov.b16 [[Z:%h[0-9]+]], 0x0000
-; CHECK-F16-NEXT: sub.rn.f16 [[R:%h[0-9]+]], [[Z]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
-; CHECK-NOF16-DAG: mov.f32 [[Z:%f[0-9]+]], 0f00000000;
-; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[Z]], [[A32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_fneg(half %a) #0 {
- %r = fsub half 0.0, %a
- ret half %r
-}
-
-; CHECK-LABEL: test_fmul(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fmul_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fmul_param_1];
-; CHECK-F16-NEXT: mul.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-NEXT: mul.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_fmul(half %a, half %b) #0 {
- %r = fmul half %a, %b
- ret half %r
-}
-
-; CHECK-LABEL: test_fdiv(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fdiv_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fdiv_param_1];
-; CHECK-DAG: cvt.f32.f16 [[F0:%f[0-9]+]], [[A]];
-; CHECK-DAG: cvt.f32.f16 [[F1:%f[0-9]+]], [[B]];
-; CHECK-NEXT: div.rn.f32 [[FR:%f[0-9]+]], [[F0]], [[F1]];
-; CHECK-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[FR]];
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_fdiv(half %a, half %b) #0 {
- %r = fdiv half %a, %b
- ret half %r
-}
-
-; CHECK-LABEL: test_frem(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_frem_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_frem_param_1];
-; CHECK-DAG: cvt.f32.f16 [[FA:%f[0-9]+]], [[A]];
-; CHECK-DAG: cvt.f32.f16 [[FB:%f[0-9]+]], [[B]];
-; CHECK-NEXT: div.rn.f32 [[D:%f[0-9]+]], [[FA]], [[FB]];
-; CHECK-NEXT: cvt.rmi.f32.f32 [[DI:%f[0-9]+]], [[D]];
-; CHECK-NEXT: mul.f32 [[RI:%f[0-9]+]], [[DI]], [[FB]];
-; CHECK-NEXT: sub.f32 [[RF:%f[0-9]+]], [[FA]], [[RI]];
-; CHECK-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_frem(half %a, half %b) #0 {
- %r = frem half %a, %b
- ret half %r
-}
-
-; CHECK-LABEL: test_store(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_store_param_0];
-; CHECK-DAG: ld.param.u64 %[[PTR:rd[0-9]+]], [test_store_param_1];
-; CHECK-NEXT: st.b16 [%[[PTR]]], [[A]];
-; CHECK-NEXT: ret;
-define void @test_store(half %a, half* %b) #0 {
- store half %a, half* %b
- ret void
-}
-
-; CHECK-LABEL: test_load(
-; CHECK: ld.param.u64 %[[PTR:rd[0-9]+]], [test_load_param_0];
-; CHECK-NEXT: ld.b16 [[R:%h[0-9]+]], [%[[PTR]]];
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_load(half* %a) #0 {
- %r = load half, half* %a
- ret half %r
-}
-
-; CHECK-LABEL: .visible .func test_halfp0a1(
-; CHECK-DAG: ld.param.u64 %[[FROM:rd?[0-9]+]], [test_halfp0a1_param_0];
-; CHECK-DAG: ld.param.u64 %[[TO:rd?[0-9]+]], [test_halfp0a1_param_1];
-; CHECK-DAG: ld.u8 [[B0:%r[sd]?[0-9]+]], [%[[FROM]]]
-; CHECK-DAG: st.u8 [%[[TO]]], [[B0]]
-; CHECK-DAG: ld.u8 [[B1:%r[sd]?[0-9]+]], [%[[FROM]]+1]
-; CHECK-DAG: st.u8 [%[[TO]]+1], [[B1]]
-; CHECK: ret
-define void @test_halfp0a1(half * noalias readonly %from, half * %to) {
- %1 = load half, half * %from , align 1
- store half %1, half * %to , align 1
- ret void
-}
-
-declare half @test_callee(half %a, half %b) #0
-
-; CHECK-LABEL: test_call(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_call_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_call_param_1];
-; CHECK: {
-; CHECK-DAG: .param .b32 param0;
-; CHECK-DAG: .param .b32 param1;
-; CHECK-DAG: st.param.b16 [param0+0], [[A]];
-; CHECK-DAG: st.param.b16 [param1+0], [[B]];
-; CHECK-DAG: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_callee,
-; CHECK: );
-; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
-; CHECK-NEXT: }
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_call(half %a, half %b) #0 {
- %r = call half @test_callee(half %a, half %b)
- ret half %r
-}
-
-; CHECK-LABEL: test_call_flipped(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_call_flipped_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_call_flipped_param_1];
-; CHECK: {
-; CHECK-DAG: .param .b32 param0;
-; CHECK-DAG: .param .b32 param1;
-; CHECK-DAG: st.param.b16 [param0+0], [[B]];
-; CHECK-DAG: st.param.b16 [param1+0], [[A]];
-; CHECK-DAG: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_callee,
-; CHECK: );
-; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
-; CHECK-NEXT: }
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_call_flipped(half %a, half %b) #0 {
- %r = call half @test_callee(half %b, half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_tailcall_flipped(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_tailcall_flipped_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_tailcall_flipped_param_1];
-; CHECK: {
-; CHECK-DAG: .param .b32 param0;
-; CHECK-DAG: .param .b32 param1;
-; CHECK-DAG: st.param.b16 [param0+0], [[B]];
-; CHECK-DAG: st.param.b16 [param1+0], [[A]];
-; CHECK-DAG: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_callee,
-; CHECK: );
-; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
-; CHECK-NEXT: }
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_tailcall_flipped(half %a, half %b) #0 {
- %r = tail call half @test_callee(half %b, half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_select(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_param_1];
-; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1;
-; CHECK-NEXT: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_select(half %a, half %b, i1 zeroext %c) #0 {
- %r = select i1 %c, half %a, half %b
- ret half %r
-}
-
-; CHECK-LABEL: test_select_cc(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_cc_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_cc_param_1];
-; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_select_cc_param_2];
-; CHECK-DAG: ld.param.b16 [[D:%h[0-9]+]], [test_select_cc_param_3];
-; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[C]], [[D]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[DF:%f[0-9]+]], [[D]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]];
-; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]]
-; CHECK: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_select_cc(half %a, half %b, half %c, half %d) #0 {
- %cc = fcmp une half %c, %d
- %r = select i1 %cc, half %a, half %b
- ret half %r
-}
-
-; CHECK-LABEL: test_select_cc_f32_f16(
-; CHECK-DAG: ld.param.f32 [[A:%f[0-9]+]], [test_select_cc_f32_f16_param_0];
-; CHECK-DAG: ld.param.f32 [[B:%f[0-9]+]], [test_select_cc_f32_f16_param_1];
-; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_select_cc_f32_f16_param_2];
-; CHECK-DAG: ld.param.b16 [[D:%h[0-9]+]], [test_select_cc_f32_f16_param_3];
-; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[C]], [[D]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[DF:%f[0-9]+]], [[D]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]];
-; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]]
-; CHECK-NEXT: selp.f32 [[R:%f[0-9]+]], [[A]], [[B]], [[PRED]];
-; CHECK-NEXT: st.param.f32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define float @test_select_cc_f32_f16(float %a, float %b, half %c, half %d) #0 {
- %cc = fcmp une half %c, %d
- %r = select i1 %cc, float %a, float %b
- ret float %r
-}
-
-; CHECK-LABEL: test_select_cc_f16_f32(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_cc_f16_f32_param_0];
-; CHECK-DAG: ld.param.f32 [[C:%f[0-9]+]], [test_select_cc_f16_f32_param_2];
-; CHECK-DAG: ld.param.f32 [[D:%f[0-9]+]], [test_select_cc_f16_f32_param_3];
-; CHECK-DAG: setp.neu.f32 [[PRED:%p[0-9]+]], [[C]], [[D]]
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_cc_f16_f32_param_1];
-; CHECK-NEXT: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
-; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define half @test_select_cc_f16_f32(half %a, half %b, float %c, float %d) #0 {
- %cc = fcmp une float %c, %d
- %r = select i1 %cc, half %a, half %b
- ret half %r
-}
-
-; CHECK-LABEL: test_fcmp_une(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_une_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_une_param_1];
-; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_une(half %a, half %b) #0 {
- %r = fcmp une half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_ueq(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ueq_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ueq_param_1];
-; CHECK-F16: setp.equ.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.equ.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_ueq(half %a, half %b) #0 {
- %r = fcmp ueq half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_ugt(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ugt_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ugt_param_1];
-; CHECK-F16: setp.gtu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.gtu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_ugt(half %a, half %b) #0 {
- %r = fcmp ugt half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_uge(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_uge_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_uge_param_1];
-; CHECK-F16: setp.geu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.geu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_uge(half %a, half %b) #0 {
- %r = fcmp uge half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_ult(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ult_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ult_param_1];
-; CHECK-F16: setp.ltu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.ltu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_ult(half %a, half %b) #0 {
- %r = fcmp ult half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_ule(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ule_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ule_param_1];
-; CHECK-F16: setp.leu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.leu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_ule(half %a, half %b) #0 {
- %r = fcmp ule half %a, %b
- ret i1 %r
-}
-
-
-; CHECK-LABEL: test_fcmp_uno(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_uno_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_uno_param_1];
-; CHECK-F16: setp.nan.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.nan.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_uno(half %a, half %b) #0 {
- %r = fcmp uno half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_one(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_one_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_one_param_1];
-; CHECK-F16: setp.ne.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.ne.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_one(half %a, half %b) #0 {
- %r = fcmp one half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_oeq(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_oeq_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_oeq_param_1];
-; CHECK-F16: setp.eq.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.eq.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_oeq(half %a, half %b) #0 {
- %r = fcmp oeq half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_ogt(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ogt_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ogt_param_1];
-; CHECK-F16: setp.gt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.gt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_ogt(half %a, half %b) #0 {
- %r = fcmp ogt half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_oge(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_oge_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_oge_param_1];
-; CHECK-F16: setp.ge.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.ge.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_oge(half %a, half %b) #0 {
- %r = fcmp oge half %a, %b
- ret i1 %r
-}
-
-; XCHECK-LABEL: test_fcmp_olt(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_olt_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_olt_param_1];
-; CHECK-F16: setp.lt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.lt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_olt(half %a, half %b) #0 {
- %r = fcmp olt half %a, %b
- ret i1 %r
-}
-
-; XCHECK-LABEL: test_fcmp_ole(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ole_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ole_param_1];
-; CHECK-F16: setp.le.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.le.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_ole(half %a, half %b) #0 {
- %r = fcmp ole half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_fcmp_ord(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ord_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ord_param_1];
-; CHECK-F16: setp.num.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.num.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i1 @test_fcmp_ord(half %a, half %b) #0 {
- %r = fcmp ord half %a, %b
- ret i1 %r
-}
-
-; CHECK-LABEL: test_br_cc(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_br_cc_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_br_cc_param_1];
-; CHECK-DAG: ld.param.u64 %[[C:rd[0-9]+]], [test_br_cc_param_2];
-; CHECK-DAG: ld.param.u64 %[[D:rd[0-9]+]], [test_br_cc_param_3];
-; CHECK-F16: setp.lt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK-NOF16: setp.lt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
-; CHECK-NEXT: @[[PRED]] bra [[LABEL:LBB.*]];
-; CHECK: st.u32 [%[[C]]],
-; CHECK: [[LABEL]]:
-; CHECK: st.u32 [%[[D]]],
-; CHECK: ret;
-define void @test_br_cc(half %a, half %b, i32* %p1, i32* %p2) #0 {
- %c = fcmp uge half %a, %b
- br i1 %c, label %then, label %else
-then:
- store i32 0, i32* %p1
- ret void
-else:
- store i32 0, i32* %p2
- ret void
-}
-
-; CHECK-LABEL: test_phi(
-; CHECK: ld.param.u64 %[[P1:rd[0-9]+]], [test_phi_param_0];
-; CHECK: ld.b16 {{%h[0-9]+}}, [%[[P1]]];
-; CHECK: [[LOOP:LBB[0-9_]+]]:
-; CHECK: mov.b16 [[R:%h[0-9]+]], [[AB:%h[0-9]+]];
-; CHECK: ld.b16 [[AB:%h[0-9]+]], [%[[P1]]];
-; CHECK: {
-; CHECK: st.param.b64 [param0+0], %[[P1]];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_dummy
-; CHECK: }
-; CHECK: setp.eq.b32 [[PRED:%p[0-9]+]], %r{{[0-9]+}}, 1;
-; CHECK: @[[PRED]] bra [[LOOP]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_phi(half* %p1) #0 {
-entry:
- %a = load half, half* %p1
- br label %loop
-loop:
- %r = phi half [%a, %entry], [%b, %loop]
- %b = load half, half* %p1
- %c = call i1 @test_dummy(half* %p1)
- br i1 %c, label %loop, label %return
-return:
- ret half %r
-}
-declare i1 @test_dummy(half* %p1) #0
-
-; CHECK-LABEL: test_fptosi_i32(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptosi_i32_param_0];
-; CHECK: cvt.rzi.s32.f16 [[R:%r[0-9]+]], [[A]];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define i32 @test_fptosi_i32(half %a) #0 {
- %r = fptosi half %a to i32
- ret i32 %r
-}
-
-; CHECK-LABEL: test_fptosi_i64(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptosi_i64_param_0];
-; CHECK: cvt.rzi.s64.f16 [[R:%rd[0-9]+]], [[A]];
-; CHECK: st.param.b64 [func_retval0+0], [[R]];
-; CHECK: ret;
-define i64 @test_fptosi_i64(half %a) #0 {
- %r = fptosi half %a to i64
- ret i64 %r
-}
-
-; CHECK-LABEL: test_fptoui_i32(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptoui_i32_param_0];
-; CHECK: cvt.rzi.u32.f16 [[R:%r[0-9]+]], [[A]];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define i32 @test_fptoui_i32(half %a) #0 {
- %r = fptoui half %a to i32
- ret i32 %r
-}
-
-; CHECK-LABEL: test_fptoui_i64(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptoui_i64_param_0];
-; CHECK: cvt.rzi.u64.f16 [[R:%rd[0-9]+]], [[A]];
-; CHECK: st.param.b64 [func_retval0+0], [[R]];
-; CHECK: ret;
-define i64 @test_fptoui_i64(half %a) #0 {
- %r = fptoui half %a to i64
- ret i64 %r
-}
-
-; CHECK-LABEL: test_uitofp_i32(
-; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_uitofp_i32_param_0];
-; CHECK: cvt.rn.f16.u32 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_uitofp_i32(i32 %a) #0 {
- %r = uitofp i32 %a to half
- ret half %r
-}
-
-; CHECK-LABEL: test_uitofp_i64(
-; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_uitofp_i64_param_0];
-; CHECK: cvt.rn.f16.u64 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_uitofp_i64(i64 %a) #0 {
- %r = uitofp i64 %a to half
- ret half %r
-}
-
-; CHECK-LABEL: test_sitofp_i32(
-; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_sitofp_i32_param_0];
-; CHECK: cvt.rn.f16.s32 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_sitofp_i32(i32 %a) #0 {
- %r = sitofp i32 %a to half
- ret half %r
-}
-
-; CHECK-LABEL: test_sitofp_i64(
-; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_sitofp_i64_param_0];
-; CHECK: cvt.rn.f16.s64 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_sitofp_i64(i64 %a) #0 {
- %r = sitofp i64 %a to half
- ret half %r
-}
-
-; CHECK-LABEL: test_uitofp_i32_fadd(
-; CHECK-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_uitofp_i32_fadd_param_0];
-; CHECK-DAG: cvt.rn.f16.u32 [[C:%h[0-9]+]], [[A]];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_uitofp_i32_fadd_param_1];
-; CHECK-F16: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[C]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
-; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_uitofp_i32_fadd(i32 %a, half %b) #0 {
- %c = uitofp i32 %a to half
- %r = fadd half %b, %c
- ret half %r
-}
-
-; CHECK-LABEL: test_sitofp_i32_fadd(
-; CHECK-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_sitofp_i32_fadd_param_0];
-; CHECK-DAG: cvt.rn.f16.s32 [[C:%h[0-9]+]], [[A]];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_sitofp_i32_fadd_param_1];
-; CHECK-F16: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[C]];
-; XCHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; XCHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
-; XCHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]];
-; XCHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 {
- %c = sitofp i32 %a to half
- %r = fadd half %b, %c
- ret half %r
-}
-
-; CHECK-LABEL: test_fptrunc_float(
-; CHECK: ld.param.f32 [[A:%f[0-9]+]], [test_fptrunc_float_param_0];
-; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_fptrunc_float(float %a) #0 {
- %r = fptrunc float %a to half
- ret half %r
-}
-
-; CHECK-LABEL: test_fptrunc_double(
-; CHECK: ld.param.f64 [[A:%fd[0-9]+]], [test_fptrunc_double_param_0];
-; CHECK: cvt.rn.f16.f64 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_fptrunc_double(double %a) #0 {
- %r = fptrunc double %a to half
- ret half %r
-}
-
-; CHECK-LABEL: test_fpext_float(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fpext_float_param_0];
-; CHECK: cvt.f32.f16 [[R:%f[0-9]+]], [[A]];
-; CHECK: st.param.f32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define float @test_fpext_float(half %a) #0 {
- %r = fpext half %a to float
- ret float %r
-}
-
-; CHECK-LABEL: test_fpext_double(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fpext_double_param_0];
-; CHECK: cvt.f64.f16 [[R:%fd[0-9]+]], [[A]];
-; CHECK: st.param.f64 [func_retval0+0], [[R]];
-; CHECK: ret;
-define double @test_fpext_double(half %a) #0 {
- %r = fpext half %a to double
- ret double %r
-}
-
-
-; CHECK-LABEL: test_bitcast_halftoi16(
-; CHECK: ld.param.b16 [[AH:%h[0-9]+]], [test_bitcast_halftoi16_param_0];
-; CHECK: mov.b16 [[AS:%rs[0-9]+]], [[AH]]
-; CHECK: cvt.u32.u16 [[R:%r[0-9]+]], [[AS]]
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define i16 @test_bitcast_halftoi16(half %a) #0 {
- %r = bitcast half %a to i16
- ret i16 %r
-}
-
-; CHECK-LABEL: test_bitcast_i16tohalf(
-; CHECK: ld.param.u16 [[AS:%rs[0-9]+]], [test_bitcast_i16tohalf_param_0];
-; CHECK: mov.b16 [[AH:%h[0-9]+]], [[AS]]
-; CHECK: st.param.b16 [func_retval0+0], [[AH]];
-; CHECK: ret;
-define half @test_bitcast_i16tohalf(i16 %a) #0 {
- %r = bitcast i16 %a to half
- ret half %r
-}
-
-
-declare half @llvm.sqrt.f16(half %a) #0
-declare half @llvm.powi.f16(half %a, i32 %b) #0
-declare half @llvm.sin.f16(half %a) #0
-declare half @llvm.cos.f16(half %a) #0
-declare half @llvm.pow.f16(half %a, half %b) #0
-declare half @llvm.exp.f16(half %a) #0
-declare half @llvm.exp2.f16(half %a) #0
-declare half @llvm.log.f16(half %a) #0
-declare half @llvm.log10.f16(half %a) #0
-declare half @llvm.log2.f16(half %a) #0
-declare half @llvm.fma.f16(half %a, half %b, half %c) #0
-declare half @llvm.fabs.f16(half %a) #0
-declare half @llvm.minnum.f16(half %a, half %b) #0
-declare half @llvm.maxnum.f16(half %a, half %b) #0
-declare half @llvm.copysign.f16(half %a, half %b) #0
-declare half @llvm.floor.f16(half %a) #0
-declare half @llvm.ceil.f16(half %a) #0
-declare half @llvm.trunc.f16(half %a) #0
-declare half @llvm.rint.f16(half %a) #0
-declare half @llvm.nearbyint.f16(half %a) #0
-declare half @llvm.round.f16(half %a) #0
-declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0
-
-; CHECK-LABEL: test_sqrt(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_sqrt_param_0];
-; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK: sqrt.rn.f32 [[RF:%f[0-9]+]], [[AF]];
-; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_sqrt(half %a) #0 {
- %r = call half @llvm.sqrt.f16(half %a)
- ret half %r
-}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_powi(
-;define half @test_powi(half %a, i32 %b) #0 {
-; %r = call half @llvm.powi.f16(half %a, i32 %b)
-; ret half %r
-;}
-
-; CHECK-LABEL: test_sin(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_sin_param_0];
-; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK: sin.approx.f32 [[RF:%f[0-9]+]], [[AF]];
-; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_sin(half %a) #0 #1 {
- %r = call half @llvm.sin.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_cos(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_cos_param_0];
-; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK: cos.approx.f32 [[RF:%f[0-9]+]], [[AF]];
-; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_cos(half %a) #0 #1 {
- %r = call half @llvm.cos.f16(half %a)
- ret half %r
-}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_pow(
-;define half @test_pow(half %a, half %b) #0 {
-; %r = call half @llvm.pow.f16(half %a, half %b)
-; ret half %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_exp(
-;define half @test_exp(half %a) #0 {
-; %r = call half @llvm.exp.f16(half %a)
-; ret half %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_exp2(
-;define half @test_exp2(half %a) #0 {
-; %r = call half @llvm.exp2.f16(half %a)
-; ret half %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_log(
-;define half @test_log(half %a) #0 {
-; %r = call half @llvm.log.f16(half %a)
-; ret half %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_log10(
-;define half @test_log10(half %a) #0 {
-; %r = call half @llvm.log10.f16(half %a)
-; ret half %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_log2(
-;define half @test_log2(half %a) #0 {
-; %r = call half @llvm.log2.f16(half %a)
-; ret half %r
-;}
-
-; CHECK-LABEL: test_fma(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fma_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fma_param_1];
-; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_fma_param_2];
-; CHECK-F16: fma.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]], [[C]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
-; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret
-define half @test_fma(half %a, half %b, half %c) #0 {
- %r = call half @llvm.fma.f16(half %a, half %b, half %c)
- ret half %r
-}
-
-; CHECK-LABEL: test_fabs(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fabs_param_0];
-; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK: abs.f32 [[RF:%f[0-9]+]], [[AF]];
-; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_fabs(half %a) #0 {
- %r = call half @llvm.fabs.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_minnum(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_minnum_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_minnum_param_1];
-; CHECK-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK: min.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]];
-; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_minnum(half %a, half %b) #0 {
- %r = call half @llvm.minnum.f16(half %a, half %b)
- ret half %r
-}
-
-; CHECK-LABEL: test_maxnum(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_maxnum_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_maxnum_param_1];
-; CHECK-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
-; CHECK-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
-; CHECK: max.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]];
-; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_maxnum(half %a, half %b) #0 {
- %r = call half @llvm.maxnum.f16(half %a, half %b)
- ret half %r
-}
-
-; CHECK-LABEL: test_copysign(
-; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_param_0];
-; CHECK-DAG: ld.param.b16 [[BH:%h[0-9]+]], [test_copysign_param_1];
-; CHECK-DAG: mov.b16 [[AS:%rs[0-9]+]], [[AH]];
-; CHECK-DAG: mov.b16 [[BS:%rs[0-9]+]], [[BH]];
-; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[AS]], 32767;
-; CHECK-DAG: and.b16 [[BX:%rs[0-9]+]], [[BS]], -32768;
-; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]];
-; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_copysign(half %a, half %b) #0 {
- %r = call half @llvm.copysign.f16(half %a, half %b)
- ret half %r
-}
-
-; CHECK-LABEL: test_copysign_f32(
-; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_f32_param_0];
-; CHECK-DAG: ld.param.f32 [[BF:%f[0-9]+]], [test_copysign_f32_param_1];
-; CHECK-DAG: mov.b16 [[A:%rs[0-9]+]], [[AH]];
-; CHECK-DAG: mov.b32 [[B:%r[0-9]+]], [[BF]];
-; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[A]], 32767;
-; CHECK-DAG: and.b32 [[BX0:%r[0-9]+]], [[B]], -2147483648;
-; CHECK-DAG: shr.u32 [[BX1:%r[0-9]+]], [[BX0]], 16;
-; CHECK-DAG: cvt.u16.u32 [[BX2:%rs[0-9]+]], [[BX1]];
-; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]];
-; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_copysign_f32(half %a, float %b) #0 {
- %tb = fptrunc float %b to half
- %r = call half @llvm.copysign.f16(half %a, half %tb)
- ret half %r
-}
-
-; CHECK-LABEL: test_copysign_f64(
-; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_f64_param_0];
-; CHECK-DAG: ld.param.f64 [[BD:%fd[0-9]+]], [test_copysign_f64_param_1];
-; CHECK-DAG: mov.b16 [[A:%rs[0-9]+]], [[AH]];
-; CHECK-DAG: mov.b64 [[B:%rd[0-9]+]], [[BD]];
-; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[A]], 32767;
-; CHECK-DAG: and.b64 [[BX0:%rd[0-9]+]], [[B]], -9223372036854775808;
-; CHECK-DAG: shr.u64 [[BX1:%rd[0-9]+]], [[BX0]], 48;
-; CHECK-DAG: cvt.u16.u64 [[BX2:%rs[0-9]+]], [[BX1]];
-; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]];
-; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_copysign_f64(half %a, double %b) #0 {
- %tb = fptrunc double %b to half
- %r = call half @llvm.copysign.f16(half %a, half %tb)
- ret half %r
-}
-
-; CHECK-LABEL: test_copysign_extended(
-; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_extended_param_0];
-; CHECK-DAG: ld.param.b16 [[BH:%h[0-9]+]], [test_copysign_extended_param_1];
-; CHECK-DAG: mov.b16 [[AS:%rs[0-9]+]], [[AH]];
-; CHECK-DAG: mov.b16 [[BS:%rs[0-9]+]], [[BH]];
-; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[AS]], 32767;
-; CHECK-DAG: and.b16 [[BX:%rs[0-9]+]], [[BS]], -32768;
-; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]];
-; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
-; CHECK: cvt.f32.f16 [[XR:%f[0-9]+]], [[R]];
-; CHECK: st.param.f32 [func_retval0+0], [[XR]];
-; CHECK: ret;
-define float @test_copysign_extended(half %a, half %b) #0 {
- %r = call half @llvm.copysign.f16(half %a, half %b)
- %xr = fpext half %r to float
- ret float %xr
-}
-
-; CHECK-LABEL: test_floor(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_floor_param_0];
-; CHECK: cvt.rmi.f16.f16 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_floor(half %a) #0 {
- %r = call half @llvm.floor.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_ceil(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_ceil_param_0];
-; CHECK: cvt.rpi.f16.f16 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_ceil(half %a) #0 {
- %r = call half @llvm.ceil.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_trunc(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_trunc_param_0];
-; CHECK: cvt.rzi.f16.f16 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_trunc(half %a) #0 {
- %r = call half @llvm.trunc.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_rint(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_rint_param_0];
-; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_rint(half %a) #0 {
- %r = call half @llvm.rint.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_nearbyint(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_nearbyint_param_0];
-; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_nearbyint(half %a) #0 {
- %r = call half @llvm.nearbyint.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_round(
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_round_param_0];
-; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_round(half %a) #0 {
- %r = call half @llvm.round.f16(half %a)
- ret half %r
-}
-
-; CHECK-LABEL: test_fmuladd(
-; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fmuladd_param_0];
-; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fmuladd_param_1];
-; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_fmuladd_param_2];
-; CHECK-F16: fma.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]], [[C]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
-; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]];
-; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_fmuladd(half %a, half %b, half %c) #0 {
- %r = call half @llvm.fmuladd.f16(half %a, half %b, half %c)
- ret half %r
-}
-
-attributes #0 = { nounwind }
-attributes #1 = { "unsafe-fp-math" = "true" }
+; ## Full FP16 support enabled by default.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-F16 %s
+; ## FP16 support explicitly disabled.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \
+; RUN: -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+; ## FP16 is not supported by hardware.
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \
+; RUN: -disable-post-ra -disable-fp-elim -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: test_ret_const(
+; CHECK: mov.b16 [[R:%h[0-9]+]], 0x3C00;
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_ret_const() #0 {
+ ret half 1.0
+}
+
+; CHECK-LABEL: test_fadd(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fadd_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_param_1];
+; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fadd(half %a, half %b) #0 {
+ %r = fadd half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fadd_v1f16(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fadd_v1f16_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_v1f16_param_1];
+; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <1 x half> @test_fadd_v1f16(<1 x half> %a, <1 x half> %b) #0 {
+ %r = fadd <1 x half> %a, %b
+ ret <1 x half> %r
+}
+
+; Check that we can lower fadd with immediate arguments.
+; CHECK-LABEL: test_fadd_imm_0(
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_imm_0_param_0];
+; CHECK-F16-DAG: mov.b16 [[A:%h[0-9]+]], 0x3C00;
+; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000;
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fadd_imm_0(half %b) #0 {
+ %r = fadd half 1.0, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fadd_imm_1(
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_imm_1_param_0];
+; CHECK-F16-DAG: mov.b16 [[A:%h[0-9]+]], 0x3C00;
+; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000;
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fadd_imm_1(half %a) #0 {
+ %r = fadd half %a, 1.0
+ ret half %r
+}
+
+; CHECK-LABEL: test_fsub(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fsub_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fsub_param_1];
+; CHECK-F16-NEXT: sub.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fsub(half %a, half %b) #0 {
+ %r = fsub half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fneg(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fneg_param_0];
+; CHECK-F16-NEXT: mov.b16 [[Z:%h[0-9]+]], 0x0000
+; CHECK-F16-NEXT: sub.rn.f16 [[R:%h[0-9]+]], [[Z]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: mov.f32 [[Z:%f[0-9]+]], 0f00000000;
+; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[Z]], [[A32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fneg(half %a) #0 {
+ %r = fsub half 0.0, %a
+ ret half %r
+}
+
+; CHECK-LABEL: test_fmul(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fmul_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fmul_param_1];
+; CHECK-F16-NEXT: mul.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: mul.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fmul(half %a, half %b) #0 {
+ %r = fmul half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fdiv(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fdiv_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fdiv_param_1];
+; CHECK-DAG: cvt.f32.f16 [[F0:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[F1:%f[0-9]+]], [[B]];
+; CHECK-NEXT: div.rn.f32 [[FR:%f[0-9]+]], [[F0]], [[F1]];
+; CHECK-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[FR]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fdiv(half %a, half %b) #0 {
+ %r = fdiv half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_frem(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_frem_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_frem_param_1];
+; CHECK-DAG: cvt.f32.f16 [[FA:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[FB:%f[0-9]+]], [[B]];
+; CHECK-NEXT: div.rn.f32 [[D:%f[0-9]+]], [[FA]], [[FB]];
+; CHECK-NEXT: cvt.rmi.f32.f32 [[DI:%f[0-9]+]], [[D]];
+; CHECK-NEXT: mul.f32 [[RI:%f[0-9]+]], [[DI]], [[FB]];
+; CHECK-NEXT: sub.f32 [[RF:%f[0-9]+]], [[FA]], [[RI]];
+; CHECK-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_frem(half %a, half %b) #0 {
+ %r = frem half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_store(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_store_param_0];
+; CHECK-DAG: ld.param.u64 %[[PTR:rd[0-9]+]], [test_store_param_1];
+; CHECK-NEXT: st.b16 [%[[PTR]]], [[A]];
+; CHECK-NEXT: ret;
+define void @test_store(half %a, half* %b) #0 {
+ store half %a, half* %b
+ ret void
+}
+
+; CHECK-LABEL: test_load(
+; CHECK: ld.param.u64 %[[PTR:rd[0-9]+]], [test_load_param_0];
+; CHECK-NEXT: ld.b16 [[R:%h[0-9]+]], [%[[PTR]]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_load(half* %a) #0 {
+ %r = load half, half* %a
+ ret half %r
+}
+
+; CHECK-LABEL: .visible .func test_halfp0a1(
+; CHECK-DAG: ld.param.u64 %[[FROM:rd?[0-9]+]], [test_halfp0a1_param_0];
+; CHECK-DAG: ld.param.u64 %[[TO:rd?[0-9]+]], [test_halfp0a1_param_1];
+; CHECK-DAG: ld.u8 [[B0:%r[sd]?[0-9]+]], [%[[FROM]]]
+; CHECK-DAG: st.u8 [%[[TO]]], [[B0]]
+; CHECK-DAG: ld.u8 [[B1:%r[sd]?[0-9]+]], [%[[FROM]]+1]
+; CHECK-DAG: st.u8 [%[[TO]]+1], [[B1]]
+; CHECK: ret
+define void @test_halfp0a1(half * noalias readonly %from, half * %to) {
+ %1 = load half, half * %from , align 1
+ store half %1, half * %to , align 1
+ ret void
+}
+
+declare half @test_callee(half %a, half %b) #0
+
+; CHECK-LABEL: test_call(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_call_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_call_param_1];
+; CHECK: {
+; CHECK-DAG: .param .b32 param0;
+; CHECK-DAG: .param .b32 param1;
+; CHECK-DAG: st.param.b16 [param0+0], [[A]];
+; CHECK-DAG: st.param.b16 [param1+0], [[B]];
+; CHECK-DAG: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_call(half %a, half %b) #0 {
+ %r = call half @test_callee(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_call_flipped(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_call_flipped_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_call_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .b32 param0;
+; CHECK-DAG: .param .b32 param1;
+; CHECK-DAG: st.param.b16 [param0+0], [[B]];
+; CHECK-DAG: st.param.b16 [param1+0], [[A]];
+; CHECK-DAG: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_call_flipped(half %a, half %b) #0 {
+ %r = call half @test_callee(half %b, half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_tailcall_flipped(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_tailcall_flipped_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_tailcall_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .b32 param0;
+; CHECK-DAG: .param .b32 param1;
+; CHECK-DAG: st.param.b16 [param0+0], [[B]];
+; CHECK-DAG: st.param.b16 [param1+0], [[A]];
+; CHECK-DAG: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_tailcall_flipped(half %a, half %b) #0 {
+ %r = tail call half @test_callee(half %b, half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_select(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_param_1];
+; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1;
+; CHECK-NEXT: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_select(half %a, half %b, i1 zeroext %c) #0 {
+ %r = select i1 %c, half %a, half %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_select_cc(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_cc_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_cc_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_select_cc_param_2];
+; CHECK-DAG: ld.param.b16 [[D:%h[0-9]+]], [test_select_cc_param_3];
+; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[C]], [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF:%f[0-9]+]], [[D]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]];
+; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]]
+; CHECK: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_select_cc(half %a, half %b, half %c, half %d) #0 {
+ %cc = fcmp une half %c, %d
+ %r = select i1 %cc, half %a, half %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_select_cc_f32_f16(
+; CHECK-DAG: ld.param.f32 [[A:%f[0-9]+]], [test_select_cc_f32_f16_param_0];
+; CHECK-DAG: ld.param.f32 [[B:%f[0-9]+]], [test_select_cc_f32_f16_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_select_cc_f32_f16_param_2];
+; CHECK-DAG: ld.param.b16 [[D:%h[0-9]+]], [test_select_cc_f32_f16_param_3];
+; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[C]], [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF:%f[0-9]+]], [[D]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]];
+; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]]
+; CHECK-NEXT: selp.f32 [[R:%f[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.f32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define float @test_select_cc_f32_f16(float %a, float %b, half %c, half %d) #0 {
+ %cc = fcmp une half %c, %d
+ %r = select i1 %cc, float %a, float %b
+ ret float %r
+}
+
+; CHECK-LABEL: test_select_cc_f16_f32(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_cc_f16_f32_param_0];
+; CHECK-DAG: ld.param.f32 [[C:%f[0-9]+]], [test_select_cc_f16_f32_param_2];
+; CHECK-DAG: ld.param.f32 [[D:%f[0-9]+]], [test_select_cc_f16_f32_param_3];
+; CHECK-DAG: setp.neu.f32 [[PRED:%p[0-9]+]], [[C]], [[D]]
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_cc_f16_f32_param_1];
+; CHECK-NEXT: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_select_cc_f16_f32(half %a, half %b, float %c, float %d) #0 {
+ %cc = fcmp une float %c, %d
+ %r = select i1 %cc, half %a, half %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fcmp_une(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_une_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_une_param_1];
+; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_une(half %a, half %b) #0 {
+ %r = fcmp une half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ueq(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ueq_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ueq_param_1];
+; CHECK-F16: setp.equ.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.equ.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ueq(half %a, half %b) #0 {
+ %r = fcmp ueq half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ugt(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ugt_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ugt_param_1];
+; CHECK-F16: setp.gtu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.gtu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ugt(half %a, half %b) #0 {
+ %r = fcmp ugt half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_uge(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_uge_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_uge_param_1];
+; CHECK-F16: setp.geu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.geu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_uge(half %a, half %b) #0 {
+ %r = fcmp uge half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ult(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ult_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ult_param_1];
+; CHECK-F16: setp.ltu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.ltu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ult(half %a, half %b) #0 {
+ %r = fcmp ult half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ule(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ule_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ule_param_1];
+; CHECK-F16: setp.leu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.leu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ule(half %a, half %b) #0 {
+ %r = fcmp ule half %a, %b
+ ret i1 %r
+}
+
+
+; CHECK-LABEL: test_fcmp_uno(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_uno_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_uno_param_1];
+; CHECK-F16: setp.nan.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.nan.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_uno(half %a, half %b) #0 {
+ %r = fcmp uno half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_one(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_one_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_one_param_1];
+; CHECK-F16: setp.ne.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.ne.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_one(half %a, half %b) #0 {
+ %r = fcmp one half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_oeq(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_oeq_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_oeq_param_1];
+; CHECK-F16: setp.eq.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.eq.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_oeq(half %a, half %b) #0 {
+ %r = fcmp oeq half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ogt(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ogt_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ogt_param_1];
+; CHECK-F16: setp.gt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.gt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ogt(half %a, half %b) #0 {
+ %r = fcmp ogt half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_oge(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_oge_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_oge_param_1];
+; CHECK-F16: setp.ge.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.ge.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_oge(half %a, half %b) #0 {
+ %r = fcmp oge half %a, %b
+ ret i1 %r
+}
+
+; XCHECK-LABEL: test_fcmp_olt(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_olt_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_olt_param_1];
+; CHECK-F16: setp.lt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.lt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_olt(half %a, half %b) #0 {
+ %r = fcmp olt half %a, %b
+ ret i1 %r
+}
+
+; XCHECK-LABEL: test_fcmp_ole(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ole_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ole_param_1];
+; CHECK-F16: setp.le.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.le.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ole(half %a, half %b) #0 {
+ %r = fcmp ole half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ord(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ord_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ord_param_1];
+; CHECK-F16: setp.num.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.num.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ord(half %a, half %b) #0 {
+ %r = fcmp ord half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_br_cc(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_br_cc_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_br_cc_param_1];
+; CHECK-DAG: ld.param.u64 %[[C:rd[0-9]+]], [test_br_cc_param_2];
+; CHECK-DAG: ld.param.u64 %[[D:rd[0-9]+]], [test_br_cc_param_3];
+; CHECK-F16: setp.lt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.lt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: @[[PRED]] bra [[LABEL:LBB.*]];
+; CHECK: st.u32 [%[[C]]],
+; CHECK: [[LABEL]]:
+; CHECK: st.u32 [%[[D]]],
+; CHECK: ret;
+define void @test_br_cc(half %a, half %b, i32* %p1, i32* %p2) #0 {
+ %c = fcmp uge half %a, %b
+ br i1 %c, label %then, label %else
+then:
+ store i32 0, i32* %p1
+ ret void
+else:
+ store i32 0, i32* %p2
+ ret void
+}
+
+; CHECK-LABEL: test_phi(
+; CHECK: ld.param.u64 %[[P1:rd[0-9]+]], [test_phi_param_0];
+; CHECK: ld.b16 {{%h[0-9]+}}, [%[[P1]]];
+; CHECK: [[LOOP:LBB[0-9_]+]]:
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[AB:%h[0-9]+]];
+; CHECK: ld.b16 [[AB:%h[0-9]+]], [%[[P1]]];
+; CHECK: {
+; CHECK: st.param.b64 [param0+0], %[[P1]];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_dummy
+; CHECK: }
+; CHECK: setp.eq.b32 [[PRED:%p[0-9]+]], %r{{[0-9]+}}, 1;
+; CHECK: @[[PRED]] bra [[LOOP]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_phi(half* %p1) #0 {
+entry:
+ %a = load half, half* %p1
+ br label %loop
+loop:
+ %r = phi half [%a, %entry], [%b, %loop]
+ %b = load half, half* %p1
+ %c = call i1 @test_dummy(half* %p1)
+ br i1 %c, label %loop, label %return
+return:
+ ret half %r
+}
+declare i1 @test_dummy(half* %p1) #0
+
+; CHECK-LABEL: test_fptosi_i32(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptosi_i32_param_0];
+; CHECK: cvt.rzi.s32.f16 [[R:%r[0-9]+]], [[A]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i32 @test_fptosi_i32(half %a) #0 {
+ %r = fptosi half %a to i32
+ ret i32 %r
+}
+
+; CHECK-LABEL: test_fptosi_i64(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptosi_i64_param_0];
+; CHECK: cvt.rzi.s64.f16 [[R:%rd[0-9]+]], [[A]];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i64 @test_fptosi_i64(half %a) #0 {
+ %r = fptosi half %a to i64
+ ret i64 %r
+}
+
+; CHECK-LABEL: test_fptoui_i32(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptoui_i32_param_0];
+; CHECK: cvt.rzi.u32.f16 [[R:%r[0-9]+]], [[A]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i32 @test_fptoui_i32(half %a) #0 {
+ %r = fptoui half %a to i32
+ ret i32 %r
+}
+
+; CHECK-LABEL: test_fptoui_i64(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptoui_i64_param_0];
+; CHECK: cvt.rzi.u64.f16 [[R:%rd[0-9]+]], [[A]];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i64 @test_fptoui_i64(half %a) #0 {
+ %r = fptoui half %a to i64
+ ret i64 %r
+}
+
+; CHECK-LABEL: test_uitofp_i32(
+; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_uitofp_i32_param_0];
+; CHECK: cvt.rn.f16.u32 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_uitofp_i32(i32 %a) #0 {
+ %r = uitofp i32 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_uitofp_i64(
+; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_uitofp_i64_param_0];
+; CHECK: cvt.rn.f16.u64 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_uitofp_i64(i64 %a) #0 {
+ %r = uitofp i64 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_sitofp_i32(
+; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_sitofp_i32_param_0];
+; CHECK: cvt.rn.f16.s32 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sitofp_i32(i32 %a) #0 {
+ %r = sitofp i32 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_sitofp_i64(
+; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_sitofp_i64_param_0];
+; CHECK: cvt.rn.f16.s64 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sitofp_i64(i64 %a) #0 {
+ %r = sitofp i64 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_uitofp_i32_fadd(
+; CHECK-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_uitofp_i32_fadd_param_0];
+; CHECK-DAG: cvt.rn.f16.u32 [[C:%h[0-9]+]], [[A]];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_uitofp_i32_fadd_param_1];
+; CHECK-F16: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[C]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_uitofp_i32_fadd(i32 %a, half %b) #0 {
+ %c = uitofp i32 %a to half
+ %r = fadd half %b, %c
+ ret half %r
+}
+
+; CHECK-LABEL: test_sitofp_i32_fadd(
+; CHECK-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_sitofp_i32_fadd_param_0];
+; CHECK-DAG: cvt.rn.f16.s32 [[C:%h[0-9]+]], [[A]];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_sitofp_i32_fadd_param_1];
+; CHECK-F16: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[C]];
+; XCHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; XCHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; XCHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]];
+; XCHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 {
+ %c = sitofp i32 %a to half
+ %r = fadd half %b, %c
+ ret half %r
+}
+
+; CHECK-LABEL: test_fptrunc_float(
+; CHECK: ld.param.f32 [[A:%f[0-9]+]], [test_fptrunc_float_param_0];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fptrunc_float(float %a) #0 {
+ %r = fptrunc float %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_fptrunc_double(
+; CHECK: ld.param.f64 [[A:%fd[0-9]+]], [test_fptrunc_double_param_0];
+; CHECK: cvt.rn.f16.f64 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fptrunc_double(double %a) #0 {
+ %r = fptrunc double %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_fpext_float(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fpext_float_param_0];
+; CHECK: cvt.f32.f16 [[R:%f[0-9]+]], [[A]];
+; CHECK: st.param.f32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define float @test_fpext_float(half %a) #0 {
+ %r = fpext half %a to float
+ ret float %r
+}
+
+; CHECK-LABEL: test_fpext_double(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fpext_double_param_0];
+; CHECK: cvt.f64.f16 [[R:%fd[0-9]+]], [[A]];
+; CHECK: st.param.f64 [func_retval0+0], [[R]];
+; CHECK: ret;
+define double @test_fpext_double(half %a) #0 {
+ %r = fpext half %a to double
+ ret double %r
+}
+
+
+; CHECK-LABEL: test_bitcast_halftoi16(
+; CHECK: ld.param.b16 [[AH:%h[0-9]+]], [test_bitcast_halftoi16_param_0];
+; CHECK: mov.b16 [[AS:%rs[0-9]+]], [[AH]]
+; CHECK: cvt.u32.u16 [[R:%r[0-9]+]], [[AS]]
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i16 @test_bitcast_halftoi16(half %a) #0 {
+ %r = bitcast half %a to i16
+ ret i16 %r
+}
+
+; CHECK-LABEL: test_bitcast_i16tohalf(
+; CHECK: ld.param.u16 [[AS:%rs[0-9]+]], [test_bitcast_i16tohalf_param_0];
+; CHECK: mov.b16 [[AH:%h[0-9]+]], [[AS]]
+; CHECK: st.param.b16 [func_retval0+0], [[AH]];
+; CHECK: ret;
+define half @test_bitcast_i16tohalf(i16 %a) #0 {
+ %r = bitcast i16 %a to half
+ ret half %r
+}
+
+
+declare half @llvm.sqrt.f16(half %a) #0
+declare half @llvm.powi.f16(half %a, i32 %b) #0
+declare half @llvm.sin.f16(half %a) #0
+declare half @llvm.cos.f16(half %a) #0
+declare half @llvm.pow.f16(half %a, half %b) #0
+declare half @llvm.exp.f16(half %a) #0
+declare half @llvm.exp2.f16(half %a) #0
+declare half @llvm.log.f16(half %a) #0
+declare half @llvm.log10.f16(half %a) #0
+declare half @llvm.log2.f16(half %a) #0
+declare half @llvm.fma.f16(half %a, half %b, half %c) #0
+declare half @llvm.fabs.f16(half %a) #0
+declare half @llvm.minnum.f16(half %a, half %b) #0
+declare half @llvm.maxnum.f16(half %a, half %b) #0
+declare half @llvm.copysign.f16(half %a, half %b) #0
+declare half @llvm.floor.f16(half %a) #0
+declare half @llvm.ceil.f16(half %a) #0
+declare half @llvm.trunc.f16(half %a) #0
+declare half @llvm.rint.f16(half %a) #0
+declare half @llvm.nearbyint.f16(half %a) #0
+declare half @llvm.round.f16(half %a) #0
+declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0
+
+; CHECK-LABEL: test_sqrt(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_sqrt_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: sqrt.rn.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sqrt(half %a) #0 {
+ %r = call half @llvm.sqrt.f16(half %a)
+ ret half %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_powi(
+;define half @test_powi(half %a, i32 %b) #0 {
+; %r = call half @llvm.powi.f16(half %a, i32 %b)
+; ret half %r
+;}
+
+; CHECK-LABEL: test_sin(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_sin_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: sin.approx.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sin(half %a) #0 #1 {
+ %r = call half @llvm.sin.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_cos(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_cos_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: cos.approx.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_cos(half %a) #0 #1 {
+ %r = call half @llvm.cos.f16(half %a)
+ ret half %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_pow(
+;define half @test_pow(half %a, half %b) #0 {
+; %r = call half @llvm.pow.f16(half %a, half %b)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp(
+;define half @test_exp(half %a) #0 {
+; %r = call half @llvm.exp.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp2(
+;define half @test_exp2(half %a) #0 {
+; %r = call half @llvm.exp2.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log(
+;define half @test_log(half %a) #0 {
+; %r = call half @llvm.log.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log10(
+;define half @test_log10(half %a) #0 {
+; %r = call half @llvm.log10.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log2(
+;define half @test_log2(half %a) #0 {
+; %r = call half @llvm.log2.f16(half %a)
+; ret half %r
+;}
+
+; CHECK-LABEL: test_fma(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fma_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fma_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_fma_param_2];
+; CHECK-F16: fma.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]], [[C]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret
+define half @test_fma(half %a, half %b, half %c) #0 {
+ %r = call half @llvm.fma.f16(half %a, half %b, half %c)
+ ret half %r
+}
+
+; CHECK-LABEL: test_fabs(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fabs_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: abs.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fabs(half %a) #0 {
+ %r = call half @llvm.fabs.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_minnum(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_minnum_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_minnum_param_1];
+; CHECK-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK: min.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_minnum(half %a, half %b) #0 {
+ %r = call half @llvm.minnum.f16(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_maxnum(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_maxnum_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_maxnum_param_1];
+; CHECK-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK: max.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_maxnum(half %a, half %b) #0 {
+ %r = call half @llvm.maxnum.f16(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_param_0];
+; CHECK-DAG: ld.param.b16 [[BH:%h[0-9]+]], [test_copysign_param_1];
+; CHECK-DAG: mov.b16 [[AS:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b16 [[BS:%rs[0-9]+]], [[BH]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[AS]], 32767;
+; CHECK-DAG: and.b16 [[BX:%rs[0-9]+]], [[BS]], -32768;
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_copysign(half %a, half %b) #0 {
+ %r = call half @llvm.copysign.f16(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign_f32(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_f32_param_0];
+; CHECK-DAG: ld.param.f32 [[BF:%f[0-9]+]], [test_copysign_f32_param_1];
+; CHECK-DAG: mov.b16 [[A:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b32 [[B:%r[0-9]+]], [[BF]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[A]], 32767;
+; CHECK-DAG: and.b32 [[BX0:%r[0-9]+]], [[B]], -2147483648;
+; CHECK-DAG: shr.u32 [[BX1:%r[0-9]+]], [[BX0]], 16;
+; CHECK-DAG: cvt.u16.u32 [[BX2:%rs[0-9]+]], [[BX1]];
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_copysign_f32(half %a, float %b) #0 {
+ %tb = fptrunc float %b to half
+ %r = call half @llvm.copysign.f16(half %a, half %tb)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign_f64(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_f64_param_0];
+; CHECK-DAG: ld.param.f64 [[BD:%fd[0-9]+]], [test_copysign_f64_param_1];
+; CHECK-DAG: mov.b16 [[A:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b64 [[B:%rd[0-9]+]], [[BD]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[A]], 32767;
+; CHECK-DAG: and.b64 [[BX0:%rd[0-9]+]], [[B]], -9223372036854775808;
+; CHECK-DAG: shr.u64 [[BX1:%rd[0-9]+]], [[BX0]], 48;
+; CHECK-DAG: cvt.u16.u64 [[BX2:%rs[0-9]+]], [[BX1]];
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_copysign_f64(half %a, double %b) #0 {
+ %tb = fptrunc double %b to half
+ %r = call half @llvm.copysign.f16(half %a, half %tb)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign_extended(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_extended_param_0];
+; CHECK-DAG: ld.param.b16 [[BH:%h[0-9]+]], [test_copysign_extended_param_1];
+; CHECK-DAG: mov.b16 [[AS:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b16 [[BS:%rs[0-9]+]], [[BH]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[AS]], 32767;
+; CHECK-DAG: and.b16 [[BX:%rs[0-9]+]], [[BS]], -32768;
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: cvt.f32.f16 [[XR:%f[0-9]+]], [[R]];
+; CHECK: st.param.f32 [func_retval0+0], [[XR]];
+; CHECK: ret;
+define float @test_copysign_extended(half %a, half %b) #0 {
+ %r = call half @llvm.copysign.f16(half %a, half %b)
+ %xr = fpext half %r to float
+ ret float %xr
+}
+
+; CHECK-LABEL: test_floor(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_floor_param_0];
+; CHECK: cvt.rmi.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_floor(half %a) #0 {
+ %r = call half @llvm.floor.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_ceil(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_ceil_param_0];
+; CHECK: cvt.rpi.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_ceil(half %a) #0 {
+ %r = call half @llvm.ceil.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_trunc(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_trunc_param_0];
+; CHECK: cvt.rzi.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_trunc(half %a) #0 {
+ %r = call half @llvm.trunc.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_rint(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_rint_param_0];
+; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_rint(half %a) #0 {
+ %r = call half @llvm.rint.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_nearbyint(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_nearbyint_param_0];
+; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_nearbyint(half %a) #0 {
+ %r = call half @llvm.nearbyint.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_round(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_round_param_0];
+; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_round(half %a) #0 {
+ %r = call half @llvm.round.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_fmuladd(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fmuladd_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fmuladd_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_fmuladd_param_2];
+; CHECK-F16: fma.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]], [[C]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fmuladd(half %a, half %b, half %c) #0 {
+ %r = call half @llvm.fmuladd.f16(half %a, half %b, half %c)
+ ret half %r
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { "unsafe-fp-math" = "true" }
diff --git a/test/CodeGen/NVPTX/f16x2-instructions.ll b/test/CodeGen/NVPTX/f16x2-instructions.ll
index 33bb616d895c..5dc796ada37f 100644
--- a/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -1,1426 +1,1427 @@
-; ## Full FP16 support enabled by default.
-; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
-; RUN: -O0 -disable-post-ra -disable-fp-elim \
-; RUN: | FileCheck -check-prefixes CHECK,CHECK-F16 %s
-; ## FP16 support explicitly disabled.
-; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
-; RUN: -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \
-; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
-; ## FP16 is not supported by hardware.
-; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \
-; RUN: -disable-post-ra -disable-fp-elim \
-; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
-
-target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-
-; CHECK-LABEL: test_ret_const(
-; CHECK: mov.u32 [[T:%r[0-9+]]], 1073757184;
-; CHECK: mov.b32 [[R:%hh[0-9+]]], [[T]];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_ret_const() #0 {
- ret <2 x half> <half 1.0, half 2.0>
-}
-
-; CHECK-LABEL: test_extract_0(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_0_param_0];
-; CHECK: mov.b32 {[[R:%h[0-9]+]], %tmp_hi}, [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_extract_0(<2 x half> %a) #0 {
- %e = extractelement <2 x half> %a, i32 0
- ret half %e
-}
-
-; CHECK-LABEL: test_extract_1(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_1_param_0];
-; CHECK: mov.b32 {%tmp_lo, [[R:%h[0-9]+]]}, [[A]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_extract_1(<2 x half> %a) #0 {
- %e = extractelement <2 x half> %a, i32 1
- ret half %e
-}
-
-; CHECK-LABEL: test_extract_i(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_i_param_0];
-; CHECK-DAG: ld.param.u64 [[IDX:%rd[0-9]+]], [test_extract_i_param_1];
-; CHECK-DAG: setp.eq.s64 [[PRED:%p[0-9]+]], [[IDX]], 0;
-; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[A]];
-; CHECK: selp.b16 [[R:%h[0-9]+]], [[E0]], [[E1]], [[PRED]];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK: ret;
-define half @test_extract_i(<2 x half> %a, i64 %idx) #0 {
- %e = extractelement <2 x half> %a, i64 %idx
- ret half %e
-}
-
-; CHECK-LABEL: test_fadd(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fadd_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fadd_param_1];
-;
-; CHECK-F16-NEXT: add.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
-; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_fadd(<2 x half> %a, <2 x half> %b) #0 {
- %r = fadd <2 x half> %a, %b
- ret <2 x half> %r
-}
-
-; Check that we can lower fadd with immediate arguments.
-; CHECK-LABEL: test_fadd_imm_0(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fadd_imm_0_param_0];
-;
-; CHECK-F16: mov.u32 [[I:%r[0-9+]]], 1073757184;
-; CHECK-F16: mov.b32 [[IHH:%hh[0-9+]]], [[I]];
-; CHECK-F16: add.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[IHH]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], 0f3F800000;
-; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], 0f40000000;
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_fadd_imm_0(<2 x half> %a) #0 {
- %r = fadd <2 x half> <half 1.0, half 2.0>, %a
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fadd_imm_1(
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fadd_imm_1_param_0];
-;
-; CHECK-F16: mov.u32 [[I:%r[0-9+]]], 1073757184;
-; CHECK-F16: mov.b32 [[IHH:%hh[0-9+]]], [[I]];
-; CHECK-F16: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[IHH]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], 0f3F800000;
-; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], 0f40000000;
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_fadd_imm_1(<2 x half> %a) #0 {
- %r = fadd <2 x half> %a, <half 1.0, half 2.0>
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fsub(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fsub_param_0];
-;
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fsub_param_1];
-; CHECK-F16-NEXT: sub.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: sub.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
-; CHECK-NOF16-DAG: sub.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_fsub(<2 x half> %a, <2 x half> %b) #0 {
- %r = fsub <2 x half> %a, %b
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fneg(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fneg_param_0];
-;
-; CHECK-F16: mov.u32 [[I0:%r[0-9+]]], 0;
-; CHECK-F16: mov.b32 [[IHH0:%hh[0-9+]]], [[I0]];
-; CHECK-F16-NEXT: sub.rn.f16x2 [[R:%hh[0-9]+]], [[IHH0]], [[A]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: mov.f32 [[Z:%f[0-9]+]], 0f00000000;
-; CHECK-NOF16-DAG: sub.rn.f32 [[FR0:%f[0-9]+]], [[Z]], [[FA0]];
-; CHECK-NOF16-DAG: sub.rn.f32 [[FR1:%f[0-9]+]], [[Z]], [[FA1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_fneg(<2 x half> %a) #0 {
- %r = fsub <2 x half> <half 0.0, half 0.0>, %a
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fmul(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fmul_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fmul_param_1];
-; CHECK-F16-NEXT: mul.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: mul.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
-; CHECK-NOF16-DAG: mul.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_fmul(<2 x half> %a, <2 x half> %b) #0 {
- %r = fmul <2 x half> %a, %b
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fdiv(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fdiv_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fdiv_param_1];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]];
-; CHECK-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]];
-; CHECK-DAG: div.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
-; CHECK-DAG: div.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]];
-; CHECK-NEXT: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_fdiv(<2 x half> %a, <2 x half> %b) #0 {
- %r = fdiv <2 x half> %a, %b
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_frem(
-; -- Load two 16x2 inputs and split them into f16 elements
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_frem_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_frem_param_1];
-; -- Split into elements
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; -- promote to f32.
-; CHECK-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]];
-; CHECK-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]];
-; -- frem(a[0],b[0]).
-; CHECK-DAG: div.rn.f32 [[FD0:%f[0-9]+]], [[FA0]], [[FB0]];
-; CHECK-DAG: cvt.rmi.f32.f32 [[DI0:%f[0-9]+]], [[FD0]];
-; CHECK-DAG: mul.f32 [[RI0:%f[0-9]+]], [[DI0]], [[FB0]];
-; CHECK-DAG: sub.f32 [[RF0:%f[0-9]+]], [[FA0]], [[RI0]];
-; -- frem(a[1],b[1]).
-; CHECK-DAG: div.rn.f32 [[FD1:%f[0-9]+]], [[FA1]], [[FB1]];
-; CHECK-DAG: cvt.rmi.f32.f32 [[DI1:%f[0-9]+]], [[FD1]];
-; CHECK-DAG: mul.f32 [[RI1:%f[0-9]+]], [[DI1]], [[FB1]];
-; CHECK-DAG: sub.f32 [[RF1:%f[0-9]+]], [[FA1]], [[RI1]];
-; -- convert back to f16.
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
-; -- merge into f16x2 and return it.
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_frem(<2 x half> %a, <2 x half> %b) #0 {
- %r = frem <2 x half> %a, %b
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: .func test_ldst_v2f16(
-; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v2f16_param_0];
-; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v2f16_param_1];
-; CHECK-DAG: ld.b32 [[E:%hh[0-9]+]], [%[[A]]]
-; CHECK: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[E]];
-; CHECK-DAG: st.v2.b16 [%[[B]]], {[[E0]], [[E1]]};
-; CHECK: ret;
-define void @test_ldst_v2f16(<2 x half>* %a, <2 x half>* %b) {
- %t1 = load <2 x half>, <2 x half>* %a
- store <2 x half> %t1, <2 x half>* %b, align 16
- ret void
-}
-
-; CHECK-LABEL: .func test_ldst_v3f16(
-; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v3f16_param_0];
-; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v3f16_param_1];
-; -- v3 is inconvenient to capture as it's lowered as ld.b64 + fair
-; number of bitshifting instructions that may change at llvm's whim.
-; So we only verify that we only issue correct number of writes using
-; correct offset, but not the values we write.
-; CHECK-DAG: ld.u64
-; CHECK-DAG: st.u32 [%[[B]]],
-; CHECK-DAG: st.b16 [%[[B]]+4],
-; CHECK: ret;
-define void @test_ldst_v3f16(<3 x half>* %a, <3 x half>* %b) {
- %t1 = load <3 x half>, <3 x half>* %a
- store <3 x half> %t1, <3 x half>* %b, align 16
- ret void
-}
-
-; CHECK-LABEL: .func test_ldst_v4f16(
-; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v4f16_param_0];
-; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v4f16_param_1];
-; CHECK-DAG: ld.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [%[[A]]];
-; CHECK-DAG: st.v4.b16 [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK: ret;
-define void @test_ldst_v4f16(<4 x half>* %a, <4 x half>* %b) {
- %t1 = load <4 x half>, <4 x half>* %a
- store <4 x half> %t1, <4 x half>* %b, align 16
- ret void
-}
-
-; CHECK-LABEL: .func test_ldst_v8f16(
-; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v8f16_param_0];
-; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v8f16_param_1];
-; CHECK-DAG: ld.v4.b32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [%[[A]]];
-; CHECK-DAG: st.v4.b32 [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK: ret;
-define void @test_ldst_v8f16(<8 x half>* %a, <8 x half>* %b) {
- %t1 = load <8 x half>, <8 x half>* %a
- store <8 x half> %t1, <8 x half>* %b, align 16
- ret void
-}
-
-declare <2 x half> @test_callee(<2 x half> %a, <2 x half> %b) #0
-
-; CHECK-LABEL: test_call(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_call_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_call_param_1];
-; CHECK: {
-; CHECK-DAG: .param .align 4 .b8 param0[4];
-; CHECK-DAG: .param .align 4 .b8 param1[4];
-; CHECK-DAG: st.param.b32 [param0+0], [[A]];
-; CHECK-DAG: st.param.b32 [param1+0], [[B]];
-; CHECK-DAG: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_callee,
-; CHECK: );
-; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
-; CHECK-NEXT: }
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_call(<2 x half> %a, <2 x half> %b) #0 {
- %r = call <2 x half> @test_callee(<2 x half> %a, <2 x half> %b)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_call_flipped(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_call_flipped_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_call_flipped_param_1];
-; CHECK: {
-; CHECK-DAG: .param .align 4 .b8 param0[4];
-; CHECK-DAG: .param .align 4 .b8 param1[4];
-; CHECK-DAG: st.param.b32 [param0+0], [[B]];
-; CHECK-DAG: st.param.b32 [param1+0], [[A]];
-; CHECK-DAG: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_callee,
-; CHECK: );
-; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
-; CHECK-NEXT: }
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_call_flipped(<2 x half> %a, <2 x half> %b) #0 {
- %r = call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_tailcall_flipped(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_tailcall_flipped_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_tailcall_flipped_param_1];
-; CHECK: {
-; CHECK-DAG: .param .align 4 .b8 param0[4];
-; CHECK-DAG: .param .align 4 .b8 param1[4];
-; CHECK-DAG: st.param.b32 [param0+0], [[B]];
-; CHECK-DAG: st.param.b32 [param1+0], [[A]];
-; CHECK-DAG: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_callee,
-; CHECK: );
-; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
-; CHECK-NEXT: }
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_tailcall_flipped(<2 x half> %a, <2 x half> %b) #0 {
- %r = tail call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_select(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_param_1];
-; CHECK-DAG: ld.param.u8 [[C:%rs[0-9]+]], [test_select_param_2]
-; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1;
-; CHECK-NEXT: selp.b32 [[R:%hh[0-9]+]], [[A]], [[B]], [[PRED]];
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_select(<2 x half> %a, <2 x half> %b, i1 zeroext %c) #0 {
- %r = select i1 %c, <2 x half> %a, <2 x half> %b
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_select_cc(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_cc_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_cc_param_1];
-; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_select_cc_param_2];
-; CHECK-DAG: ld.param.b32 [[D:%hh[0-9]+]], [test_select_cc_param_3];
-;
-; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[C]], [[D]]
-;
-; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
-; CHECK-NOF16-DAG: mov.b32 {[[D0:%h[0-9]+]], [[D1:%h[0-9]+]]}, [[D]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[DF0:%f[0-9]+]], [[D0]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[CF0:%f[0-9]+]], [[C0]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[DF1:%f[0-9]+]], [[D1]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[CF1:%f[0-9]+]], [[C1]];
-; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[CF0]], [[DF0]]
-; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[CF1]], [[DF1]]
-;
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-DAG: selp.b16 [[R0:%h[0-9]+]], [[A0]], [[B0]], [[P0]];
-; CHECK-DAG: selp.b16 [[R1:%h[0-9]+]], [[A1]], [[B1]], [[P1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_select_cc(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) #0 {
- %cc = fcmp une <2 x half> %c, %d
- %r = select <2 x i1> %cc, <2 x half> %a, <2 x half> %b
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_select_cc_f32_f16(
-; CHECK-DAG: ld.param.v2.f32 {[[A0:%f[0-9]+]], [[A1:%f[0-9]+]]}, [test_select_cc_f32_f16_param_0];
-; CHECK-DAG: ld.param.v2.f32 {[[B0:%f[0-9]+]], [[B1:%f[0-9]+]]}, [test_select_cc_f32_f16_param_1];
-; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_select_cc_f32_f16_param_2];
-; CHECK-DAG: ld.param.b32 [[D:%hh[0-9]+]], [test_select_cc_f32_f16_param_3];
-;
-; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[C]], [[D]]
-; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
-; CHECK-NOF16-DAG: mov.b32 {[[D0:%h[0-9]+]], [[D1:%h[0-9]+]]}, [[D]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[DF0:%f[0-9]+]], [[D0]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[CF0:%f[0-9]+]], [[C0]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[DF1:%f[0-9]+]], [[D1]];
-; CHECK-NOF16-DAG: cvt.f32.f16 [[CF1:%f[0-9]+]], [[C1]];
-; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[CF0]], [[DF0]]
-; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[CF1]], [[DF1]]
-;
-; CHECK-DAG: selp.f32 [[R0:%f[0-9]+]], [[A0]], [[B0]], [[P0]];
-; CHECK-DAG: selp.f32 [[R1:%f[0-9]+]], [[A1]], [[B1]], [[P1]];
-; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
- <2 x half> %c, <2 x half> %d) #0 {
- %cc = fcmp une <2 x half> %c, %d
- %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
- ret <2 x float> %r
-}
-
-; CHECK-LABEL: test_select_cc_f16_f32(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_cc_f16_f32_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_cc_f16_f32_param_1];
-; CHECK-DAG: ld.param.v2.f32 {[[C0:%f[0-9]+]], [[C1:%f[0-9]+]]}, [test_select_cc_f16_f32_param_2];
-; CHECK-DAG: ld.param.v2.f32 {[[D0:%f[0-9]+]], [[D1:%f[0-9]+]]}, [test_select_cc_f16_f32_param_3];
-; CHECK-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[C0]], [[D0]]
-; CHECK-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[C1]], [[D1]]
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-DAG: selp.b16 [[R0:%h[0-9]+]], [[A0]], [[B0]], [[P0]];
-; CHECK-DAG: selp.b16 [[R1:%h[0-9]+]], [[A1]], [[B1]], [[P1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b,
- <2 x float> %c, <2 x float> %d) #0 {
- %cc = fcmp une <2 x float> %c, %d
- %r = select <2 x i1> %cc, <2 x half> %a, <2 x half> %b
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fcmp_une(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_une_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_une_param_1];
-; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp une <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_ueq(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ueq_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ueq_param_1];
-; CHECK-F16: setp.equ.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.equ.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.equ.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp ueq <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_ugt(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ugt_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ugt_param_1];
-; CHECK-F16: setp.gtu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.gtu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.gtu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp ugt <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_uge(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_uge_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_uge_param_1];
-; CHECK-F16: setp.geu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.geu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.geu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp uge <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_ult(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ult_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ult_param_1];
-; CHECK-F16: setp.ltu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.ltu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.ltu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp ult <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_ule(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ule_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ule_param_1];
-; CHECK-F16: setp.leu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.leu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.leu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp ule <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-
-; CHECK-LABEL: test_fcmp_uno(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_uno_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_uno_param_1];
-; CHECK-F16: setp.nan.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.nan.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.nan.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp uno <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_one(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_one_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_one_param_1];
-; CHECK-F16: setp.ne.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.ne.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.ne.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp one <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_oeq(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_oeq_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_oeq_param_1];
-; CHECK-F16: setp.eq.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.eq.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.eq.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp oeq <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_ogt(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ogt_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ogt_param_1];
-; CHECK-F16: setp.gt.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.gt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.gt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp ogt <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_oge(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_oge_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_oge_param_1];
-; CHECK-F16: setp.ge.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.ge.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.ge.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp oge <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_olt(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_olt_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_olt_param_1];
-; CHECK-F16: setp.lt.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.lt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.lt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp olt <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; XCHECK-LABEL: test_fcmp_ole(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ole_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ole_param_1];
-; CHECK-F16: setp.le.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.le.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.le.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp ole <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fcmp_ord(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ord_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ord_param_1];
-; CHECK-F16: setp.num.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: setp.num.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
-; CHECK-NOF16-DAG: setp.num.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
-; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
-; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-NEXT: ret;
-define <2 x i1> @test_fcmp_ord(<2 x half> %a, <2 x half> %b) #0 {
- %r = fcmp ord <2 x half> %a, %b
- ret <2 x i1> %r
-}
-
-; CHECK-LABEL: test_fptosi_i32(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptosi_i32_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.rzi.s32.f16 [[R0:%r[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rzi.s32.f16 [[R1:%r[0-9]+]], [[A1]];
-; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}
-; CHECK: ret;
-define <2 x i32> @test_fptosi_i32(<2 x half> %a) #0 {
- %r = fptosi <2 x half> %a to <2 x i32>
- ret <2 x i32> %r
-}
-
-; CHECK-LABEL: test_fptosi_i64(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptosi_i64_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.rzi.s64.f16 [[R0:%rd[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rzi.s64.f16 [[R1:%rd[0-9]+]], [[A1]];
-; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]}
-; CHECK: ret;
-define <2 x i64> @test_fptosi_i64(<2 x half> %a) #0 {
- %r = fptosi <2 x half> %a to <2 x i64>
- ret <2 x i64> %r
-}
-
-; CHECK-LABEL: test_fptoui_2xi32(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptoui_2xi32_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.rzi.u32.f16 [[R0:%r[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rzi.u32.f16 [[R1:%r[0-9]+]], [[A1]];
-; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}
-; CHECK: ret;
-define <2 x i32> @test_fptoui_2xi32(<2 x half> %a) #0 {
- %r = fptoui <2 x half> %a to <2 x i32>
- ret <2 x i32> %r
-}
-
-; CHECK-LABEL: test_fptoui_2xi64(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptoui_2xi64_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.rzi.u64.f16 [[R0:%rd[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rzi.u64.f16 [[R1:%rd[0-9]+]], [[A1]];
-; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]}
-; CHECK: ret;
-define <2 x i64> @test_fptoui_2xi64(<2 x half> %a) #0 {
- %r = fptoui <2 x half> %a to <2 x i64>
- ret <2 x i64> %r
-}
-
-; CHECK-LABEL: test_uitofp_2xi32(
-; CHECK: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_uitofp_2xi32_param_0];
-; CHECK-DAG: cvt.rn.f16.u32 [[R0:%h[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f16.u32 [[R1:%h[0-9]+]], [[A1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_uitofp_2xi32(<2 x i32> %a) #0 {
- %r = uitofp <2 x i32> %a to <2 x half>
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_uitofp_2xi64(
-; CHECK: ld.param.v2.u64 {[[A0:%rd[0-9]+]], [[A1:%rd[0-9]+]]}, [test_uitofp_2xi64_param_0];
-; CHECK-DAG: cvt.rn.f32.u64 [[F0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f32.u64 [[F1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[F0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[F1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_uitofp_2xi64(<2 x i64> %a) #0 {
- %r = uitofp <2 x i64> %a to <2 x half>
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_sitofp_2xi32(
-; CHECK: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_sitofp_2xi32_param_0];
-; CHECK-DAG: cvt.rn.f16.s32 [[R0:%h[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f16.s32 [[R1:%h[0-9]+]], [[A1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_sitofp_2xi32(<2 x i32> %a) #0 {
- %r = sitofp <2 x i32> %a to <2 x half>
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_sitofp_2xi64(
-; CHECK: ld.param.v2.u64 {[[A0:%rd[0-9]+]], [[A1:%rd[0-9]+]]}, [test_sitofp_2xi64_param_0];
-; CHECK-DAG: cvt.rn.f32.s64 [[F0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f32.s64 [[F1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[F0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[F1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_sitofp_2xi64(<2 x i64> %a) #0 {
- %r = sitofp <2 x i64> %a to <2 x half>
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_uitofp_2xi32_fadd(
-; CHECK-DAG: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_uitofp_2xi32_fadd_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_uitofp_2xi32_fadd_param_1];
-; CHECK-DAG: cvt.rn.f16.u32 [[C0:%h[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f16.u32 [[C1:%h[0-9]+]], [[A1]];
-
-; CHECK-F16-DAG: mov.b32 [[C:%hh[0-9]+]], {[[C0]], [[C1]]}
-; CHECK-F16-DAG: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[C]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC1:%f[0-9]+]], [[C1]]
-; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FB0]], [[FC0]];
-; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FB1]], [[FC1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
- %c = uitofp <2 x i32> %a to <2 x half>
- %r = fadd <2 x half> %b, %c
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_sitofp_2xi32_fadd(
-; CHECK-DAG: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_sitofp_2xi32_fadd_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_sitofp_2xi32_fadd_param_1];
-; CHECK-DAG: cvt.rn.f16.s32 [[C0:%h[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f16.s32 [[C1:%h[0-9]+]], [[A1]];
-;
-; CHECK-F16-DAG: mov.b32 [[C:%hh[0-9]+]], {[[C0]], [[C1]]}
-; CHECK-F16-DAG: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[C]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC1:%f[0-9]+]], [[C1]]
-; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FB0]], [[FC0]];
-; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FB1]], [[FC1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_sitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
- %c = sitofp <2 x i32> %a to <2 x half>
- %r = fadd <2 x half> %b, %c
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fptrunc_2xfloat(
-; CHECK: ld.param.v2.f32 {[[A0:%f[0-9]+]], [[A1:%f[0-9]+]]}, [test_fptrunc_2xfloat_param_0];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[A1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
- %r = fptrunc <2 x float> %a to <2 x half>
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fptrunc_2xdouble(
-; CHECK: ld.param.v2.f64 {[[A0:%fd[0-9]+]], [[A1:%fd[0-9]+]]}, [test_fptrunc_2xdouble_param_0];
-; CHECK-DAG: cvt.rn.f16.f64 [[R0:%h[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.rn.f16.f64 [[R1:%h[0-9]+]], [[A1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
- %r = fptrunc <2 x double> %a to <2 x half>
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fpext_2xfloat(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fpext_2xfloat_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.f32.f16 [[R0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[R1:%f[0-9]+]], [[A1]];
-; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK: ret;
-define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 {
- %r = fpext <2 x half> %a to <2 x float>
- ret <2 x float> %r
-}
-
-; CHECK-LABEL: test_fpext_2xdouble(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fpext_2xdouble_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.f64.f16 [[R0:%fd[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f64.f16 [[R1:%fd[0-9]+]], [[A1]];
-; CHECK-NEXT: st.param.v2.f64 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK: ret;
-define <2 x double> @test_fpext_2xdouble(<2 x half> %a) #0 {
- %r = fpext <2 x half> %a to <2 x double>
- ret <2 x double> %r
-}
-
-
-; CHECK-LABEL: test_bitcast_2xhalf_to_2xi16(
-; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_bitcast_2xhalf_to_2xi16_param_0];
-; CHECK-DAG: cvt.u16.u32 [[R0:%rs[0-9]+]], [[A]]
-; CHECK-DAG: shr.u32 [[AH:%r[0-9]+]], [[A]], 16
-; CHECK-DAG: cvt.u16.u32 [[R1:%rs[0-9]+]], [[AH]]
-; CHECK: st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]}
-; CHECK: ret;
-define <2 x i16> @test_bitcast_2xhalf_to_2xi16(<2 x half> %a) #0 {
- %r = bitcast <2 x half> %a to <2 x i16>
- ret <2 x i16> %r
-}
-
-; CHECK-LABEL: test_bitcast_2xi16_to_2xhalf(
-; CHECK: ld.param.v2.u16 {[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [test_bitcast_2xi16_to_2xhalf_param_0];
-; CHECK-DAG: cvt.u32.u16 [[R0:%r[0-9]+]], [[RS0]];
-; CHECK-DAG: cvt.u32.u16 [[R1:%r[0-9]+]], [[RS1]];
-; CHECK-DAG: shl.b32 [[R1H:%r[0-9]+]], [[R1]], 16;
-; CHECK-DAG: or.b32 [[R1H0L:%r[0-9]+]], [[R0]], [[R1H]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], [[R1H0L]];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_bitcast_2xi16_to_2xhalf(<2 x i16> %a) #0 {
- %r = bitcast <2 x i16> %a to <2 x half>
- ret <2 x half> %r
-}
-
-
-declare <2 x half> @llvm.sqrt.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b) #0
-declare <2 x half> @llvm.sin.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.cos.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.pow.f16(<2 x half> %a, <2 x half> %b) #0
-declare <2 x half> @llvm.exp.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.exp2.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.log.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.log10.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.log2.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.fma.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0
-declare <2 x half> @llvm.fabs.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.minnum.f16(<2 x half> %a, <2 x half> %b) #0
-declare <2 x half> @llvm.maxnum.f16(<2 x half> %a, <2 x half> %b) #0
-declare <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b) #0
-declare <2 x half> @llvm.floor.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.ceil.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.trunc.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.rint.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.nearbyint.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.round.f16(<2 x half> %a) #0
-declare <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0
-
-; CHECK-LABEL: test_sqrt(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_sqrt_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: sqrt.rn.f32 [[RF0:%f[0-9]+]], [[AF0]];
-; CHECK-DAG: sqrt.rn.f32 [[RF1:%f[0-9]+]], [[AF1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_sqrt(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.sqrt.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_powi(
-;define <2 x half> @test_powi(<2 x half> %a, <2 x i32> %b) #0 {
-; %r = call <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b)
-; ret <2 x half> %r
-;}
-
-; CHECK-LABEL: test_sin(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_sin_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: sin.approx.f32 [[RF0:%f[0-9]+]], [[AF0]];
-; CHECK-DAG: sin.approx.f32 [[RF1:%f[0-9]+]], [[AF1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_sin(<2 x half> %a) #0 #1 {
- %r = call <2 x half> @llvm.sin.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_cos(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_cos_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: cos.approx.f32 [[RF0:%f[0-9]+]], [[AF0]];
-; CHECK-DAG: cos.approx.f32 [[RF1:%f[0-9]+]], [[AF1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_cos(<2 x half> %a) #0 #1 {
- %r = call <2 x half> @llvm.cos.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_pow(
-;define <2 x half> @test_pow(<2 x half> %a, <2 x half> %b) #0 {
-; %r = call <2 x half> @llvm.pow.f16(<2 x half> %a, <2 x half> %b)
-; ret <2 x half> %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_exp(
-;define <2 x half> @test_exp(<2 x half> %a) #0 {
-; %r = call <2 x half> @llvm.exp.f16(<2 x half> %a)
-; ret <2 x half> %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_exp2(
-;define <2 x half> @test_exp2(<2 x half> %a) #0 {
-; %r = call <2 x half> @llvm.exp2.f16(<2 x half> %a)
-; ret <2 x half> %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_log(
-;define <2 x half> @test_log(<2 x half> %a) #0 {
-; %r = call <2 x half> @llvm.log.f16(<2 x half> %a)
-; ret <2 x half> %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_log10(
-;define <2 x half> @test_log10(<2 x half> %a) #0 {
-; %r = call <2 x half> @llvm.log10.f16(<2 x half> %a)
-; ret <2 x half> %r
-;}
-
-;;; Can't do this yet: requires libcall.
-; XCHECK-LABEL: test_log2(
-;define <2 x half> @test_log2(<2 x half> %a) #0 {
-; %r = call <2 x half> @llvm.log2.f16(<2 x half> %a)
-; ret <2 x half> %r
-;}
-
-; CHECK-LABEL: test_fma(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fma_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fma_param_1];
-; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_fma_param_2];
-;
-; CHECK-F16: fma.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]], [[C]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
-; CHECK-NOF16-DAG: fma.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]], [[FC0]];
-; CHECK-NOF16-DAG: fma.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]], [[FC1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret
-define <2 x half> @test_fma(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 {
- %r = call <2 x half> @llvm.fma.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fabs(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fabs_param_0];
-; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: abs.f32 [[RF0:%f[0-9]+]], [[AF0]];
-; CHECK-DAG: abs.f32 [[RF1:%f[0-9]+]], [[AF1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_fabs(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.fabs.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_minnum(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_minnum_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_minnum_param_1];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.f32.f16 [[BF0:%f[0-9]+]], [[B0]];
-; CHECK-DAG: cvt.f32.f16 [[BF1:%f[0-9]+]], [[B1]];
-; CHECK-DAG: min.f32 [[RF0:%f[0-9]+]], [[AF0]], [[BF0]];
-; CHECK-DAG: min.f32 [[RF1:%f[0-9]+]], [[AF1]], [[BF1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_minnum(<2 x half> %a, <2 x half> %b) #0 {
- %r = call <2 x half> @llvm.minnum.f16(<2 x half> %a, <2 x half> %b)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_maxnum(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_maxnum_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_maxnum_param_1];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
-; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.f32.f16 [[BF0:%f[0-9]+]], [[B0]];
-; CHECK-DAG: cvt.f32.f16 [[BF1:%f[0-9]+]], [[B1]];
-; CHECK-DAG: max.f32 [[RF0:%f[0-9]+]], [[AF0]], [[BF0]];
-; CHECK-DAG: max.f32 [[RF1:%f[0-9]+]], [[AF1]], [[BF1]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
-; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_maxnum(<2 x half> %a, <2 x half> %b) #0 {
- %r = call <2 x half> @llvm.maxnum.f16(<2 x half> %a, <2 x half> %b)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_copysign(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_copysign_param_1];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
-; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
-; CHECK-DAG: mov.b16 [[BS0:%rs[0-9]+]], [[B0]];
-; CHECK-DAG: mov.b16 [[BS1:%rs[0-9]+]], [[B1]];
-; CHECK-DAG: and.b16 [[AX0:%rs[0-9]+]], [[AS0]], 32767;
-; CHECK-DAG: and.b16 [[AX1:%rs[0-9]+]], [[AS1]], 32767;
-; CHECK-DAG: and.b16 [[BX0:%rs[0-9]+]], [[BS0]], -32768;
-; CHECK-DAG: and.b16 [[BX1:%rs[0-9]+]], [[BS1]], -32768;
-; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AX0]], [[BX0]];
-; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AX1]], [[BX1]];
-; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
-; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
-; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_copysign(<2 x half> %a, <2 x half> %b) #0 {
- %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_copysign_f32(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_f32_param_0];
-; CHECK-DAG: ld.param.v2.f32 {[[B0:%f[0-9]+]], [[B1:%f[0-9]+]]}, [test_copysign_f32_param_1];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
-; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
-; CHECK-DAG: mov.b32 [[BI0:%r[0-9]+]], [[B0]];
-; CHECK-DAG: mov.b32 [[BI1:%r[0-9]+]], [[B1]];
-; CHECK-DAG: and.b16 [[AI0:%rs[0-9]+]], [[AS0]], 32767;
-; CHECK-DAG: and.b16 [[AI1:%rs[0-9]+]], [[AS1]], 32767;
-; CHECK-DAG: and.b32 [[BX0:%r[0-9]+]], [[BI0]], -2147483648;
-; CHECK-DAG: and.b32 [[BX1:%r[0-9]+]], [[BI1]], -2147483648;
-; CHECK-DAG: shr.u32 [[BY0:%r[0-9]+]], [[BX0]], 16;
-; CHECK-DAG: shr.u32 [[BY1:%r[0-9]+]], [[BX1]], 16;
-; CHECK-DAG: cvt.u16.u32 [[BZ0:%rs[0-9]+]], [[BY0]];
-; CHECK-DAG: cvt.u16.u32 [[BZ1:%rs[0-9]+]], [[BY1]];
-; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AI0]], [[BZ0]];
-; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AI1]], [[BZ1]];
-; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
-; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
-; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
- %tb = fptrunc <2 x float> %b to <2 x half>
- %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %tb)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_copysign_f64(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_f64_param_0];
-; CHECK-DAG: ld.param.v2.f64 {[[B0:%fd[0-9]+]], [[B1:%fd[0-9]+]]}, [test_copysign_f64_param_1];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
-; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
-; CHECK-DAG: mov.b64 [[BI0:%rd[0-9]+]], [[B0]];
-; CHECK-DAG: mov.b64 [[BI1:%rd[0-9]+]], [[B1]];
-; CHECK-DAG: and.b16 [[AI0:%rs[0-9]+]], [[AS0]], 32767;
-; CHECK-DAG: and.b16 [[AI1:%rs[0-9]+]], [[AS1]], 32767;
-; CHECK-DAG: and.b64 [[BX0:%rd[0-9]+]], [[BI0]], -9223372036854775808;
-; CHECK-DAG: and.b64 [[BX1:%rd[0-9]+]], [[BI1]], -9223372036854775808;
-; CHECK-DAG: shr.u64 [[BY0:%rd[0-9]+]], [[BX0]], 48;
-; CHECK-DAG: shr.u64 [[BY1:%rd[0-9]+]], [[BX1]], 48;
-; CHECK-DAG: cvt.u16.u64 [[BZ0:%rs[0-9]+]], [[BY0]];
-; CHECK-DAG: cvt.u16.u64 [[BZ1:%rs[0-9]+]], [[BY1]];
-; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AI0]], [[BZ0]];
-; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AI1]], [[BZ1]];
-; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
-; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
-; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 {
- %tb = fptrunc <2 x double> %b to <2 x half>
- %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %tb)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_copysign_extended(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_extended_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_copysign_extended_param_1];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
-; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
-; CHECK-DAG: mov.b16 [[BS0:%rs[0-9]+]], [[B0]];
-; CHECK-DAG: mov.b16 [[BS1:%rs[0-9]+]], [[B1]];
-; CHECK-DAG: and.b16 [[AX0:%rs[0-9]+]], [[AS0]], 32767;
-; CHECK-DAG: and.b16 [[AX1:%rs[0-9]+]], [[AS1]], 32767;
-; CHECK-DAG: and.b16 [[BX0:%rs[0-9]+]], [[BS0]], -32768;
-; CHECK-DAG: and.b16 [[BX1:%rs[0-9]+]], [[BS1]], -32768;
-; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AX0]], [[BX0]];
-; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AX1]], [[BX1]];
-; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
-; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
-; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: mov.b32 {[[RX0:%h[0-9]+]], [[RX1:%h[0-9]+]]}, [[R]]
-; CHECK-DAG: cvt.f32.f16 [[XR0:%f[0-9]+]], [[RX0]];
-; CHECK-DAG: cvt.f32.f16 [[XR1:%f[0-9]+]], [[RX1]];
-; CHECK: st.param.v2.f32 [func_retval0+0], {[[XR0]], [[XR1]]};
-; CHECK: ret;
-define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
- %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b)
- %xr = fpext <2 x half> %r to <2 x float>
- ret <2 x float> %xr
-}
-
-; CHECK-LABEL: test_floor(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_floor_param_0];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
-; CHECK-DAG: cvt.rmi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rmi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_floor(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.floor.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_ceil(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_ceil_param_0];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
-; CHECK-DAG: cvt.rpi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rpi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_ceil(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.ceil.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_trunc(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_trunc_param_0];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
-; CHECK-DAG: cvt.rzi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rzi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_trunc(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.trunc.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_rint(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_rint_param_0];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
-; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_rint(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.rint.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_nearbyint(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_nearbyint_param_0];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
-; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_nearbyint(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.nearbyint.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_round(
-; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_round_param_0];
-; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
-; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
-; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
-; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_round(<2 x half> %a) #0 {
- %r = call <2 x half> @llvm.round.f16(<2 x half> %a)
- ret <2 x half> %r
-}
-
-; CHECK-LABEL: test_fmuladd(
-; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fmuladd_param_0];
-; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fmuladd_param_1];
-; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_fmuladd_param_2];
-;
-; CHECK-F16: fma.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]], [[C]];
-;
-; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
-; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
-; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
-; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
-; CHECK-NOF16-DAG: fma.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]], [[FC0]];
-; CHECK-NOF16-DAG: fma.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]], [[FC1]];
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
-; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
-; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
-;
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define <2 x half> @test_fmuladd(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 {
- %r = call <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
- ret <2 x half> %r
-}
-
-attributes #0 = { nounwind }
-attributes #1 = { "unsafe-fp-math" = "true" }
+; ## Full FP16 support enabled by default.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-F16 %s
+; ## FP16 support explicitly disabled.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \
+; RUN: -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+; ## FP16 is not supported by hardware.
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \
+; RUN: -disable-post-ra -disable-fp-elim -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: test_ret_const(
+; CHECK: mov.u32 [[T:%r[0-9+]]], 1073757184;
+; CHECK: mov.b32 [[R:%hh[0-9+]]], [[T]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_ret_const() #0 {
+ ret <2 x half> <half 1.0, half 2.0>
+}
+
+; CHECK-LABEL: test_extract_0(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_0_param_0];
+; CHECK: mov.b32 {[[R:%h[0-9]+]], %tmp_hi}, [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_extract_0(<2 x half> %a) #0 {
+ %e = extractelement <2 x half> %a, i32 0
+ ret half %e
+}
+
+; CHECK-LABEL: test_extract_1(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_1_param_0];
+; CHECK: mov.b32 {%tmp_lo, [[R:%h[0-9]+]]}, [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_extract_1(<2 x half> %a) #0 {
+ %e = extractelement <2 x half> %a, i32 1
+ ret half %e
+}
+
+; CHECK-LABEL: test_extract_i(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_i_param_0];
+; CHECK-DAG: ld.param.u64 [[IDX:%rd[0-9]+]], [test_extract_i_param_1];
+; CHECK-DAG: setp.eq.s64 [[PRED:%p[0-9]+]], [[IDX]], 0;
+; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[A]];
+; CHECK: selp.b16 [[R:%h[0-9]+]], [[E0]], [[E1]], [[PRED]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_extract_i(<2 x half> %a, i64 %idx) #0 {
+ %e = extractelement <2 x half> %a, i64 %idx
+ ret half %e
+}
+
+; CHECK-LABEL: test_fadd(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fadd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fadd_param_1];
+;
+; CHECK-F16-NEXT: add.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fadd(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fadd <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; Check that we can lower fadd with immediate arguments.
+; CHECK-LABEL: test_fadd_imm_0(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fadd_imm_0_param_0];
+;
+; CHECK-F16: mov.u32 [[I:%r[0-9+]]], 1073757184;
+; CHECK-F16: mov.b32 [[IHH:%hh[0-9+]]], [[I]];
+; CHECK-F16: add.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[IHH]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], 0f3F800000;
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], 0f40000000;
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fadd_imm_0(<2 x half> %a) #0 {
+ %r = fadd <2 x half> <half 1.0, half 2.0>, %a
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fadd_imm_1(
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fadd_imm_1_param_0];
+;
+; CHECK-F16: mov.u32 [[I:%r[0-9+]]], 1073757184;
+; CHECK-F16: mov.b32 [[IHH:%hh[0-9+]]], [[I]];
+; CHECK-F16: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[IHH]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], 0f3F800000;
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], 0f40000000;
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fadd_imm_1(<2 x half> %a) #0 {
+ %r = fadd <2 x half> %a, <half 1.0, half 2.0>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fsub(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fsub_param_0];
+;
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fsub_param_1];
+; CHECK-F16-NEXT: sub.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fsub(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fsub <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fneg(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fneg_param_0];
+;
+; CHECK-F16: mov.u32 [[I0:%r[0-9+]]], 0;
+; CHECK-F16: mov.b32 [[IHH0:%hh[0-9+]]], [[I0]];
+; CHECK-F16-NEXT: sub.rn.f16x2 [[R:%hh[0-9]+]], [[IHH0]], [[A]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: mov.f32 [[Z:%f[0-9]+]], 0f00000000;
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR0:%f[0-9]+]], [[Z]], [[FA0]];
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR1:%f[0-9]+]], [[Z]], [[FA1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fneg(<2 x half> %a) #0 {
+ %r = fsub <2 x half> <half 0.0, half 0.0>, %a
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fmul(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fmul_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fmul_param_1];
+; CHECK-F16-NEXT: mul.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: mul.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-NOF16-DAG: mul.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fmul(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fmul <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fdiv(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fdiv_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fdiv_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]];
+; CHECK-DAG: div.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-DAG: div.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]];
+; CHECK-NEXT: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fdiv(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fdiv <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_frem(
+; -- Load two 16x2 inputs and split them into f16 elements
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_frem_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_frem_param_1];
+; -- Split into elements
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; -- promote to f32.
+; CHECK-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]];
+; -- frem(a[0],b[0]).
+; CHECK-DAG: div.rn.f32 [[FD0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-DAG: cvt.rmi.f32.f32 [[DI0:%f[0-9]+]], [[FD0]];
+; CHECK-DAG: mul.f32 [[RI0:%f[0-9]+]], [[DI0]], [[FB0]];
+; CHECK-DAG: sub.f32 [[RF0:%f[0-9]+]], [[FA0]], [[RI0]];
+; -- frem(a[1],b[1]).
+; CHECK-DAG: div.rn.f32 [[FD1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-DAG: cvt.rmi.f32.f32 [[DI1:%f[0-9]+]], [[FD1]];
+; CHECK-DAG: mul.f32 [[RI1:%f[0-9]+]], [[DI1]], [[FB1]];
+; CHECK-DAG: sub.f32 [[RF1:%f[0-9]+]], [[FA1]], [[RI1]];
+; -- convert back to f16.
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; -- merge into f16x2 and return it.
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_frem(<2 x half> %a, <2 x half> %b) #0 {
+ %r = frem <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: .func test_ldst_v2f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v2f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v2f16_param_1];
+; CHECK-DAG: ld.b32 [[E:%hh[0-9]+]], [%[[A]]]
+; CHECK: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[E]];
+; CHECK-DAG: st.v2.b16 [%[[B]]], {[[E0]], [[E1]]};
+; CHECK: ret;
+define void @test_ldst_v2f16(<2 x half>* %a, <2 x half>* %b) {
+ %t1 = load <2 x half>, <2 x half>* %a
+ store <2 x half> %t1, <2 x half>* %b, align 16
+ ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v3f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v3f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v3f16_param_1];
+; -- v3 is inconvenient to capture as it's lowered as ld.b64 + fair
+; number of bitshifting instructions that may change at llvm's whim.
+; So we only verify that we only issue correct number of writes using
+; correct offset, but not the values we write.
+; CHECK-DAG: ld.u64
+; CHECK-DAG: st.u32 [%[[B]]],
+; CHECK-DAG: st.b16 [%[[B]]+4],
+; CHECK: ret;
+define void @test_ldst_v3f16(<3 x half>* %a, <3 x half>* %b) {
+ %t1 = load <3 x half>, <3 x half>* %a
+ store <3 x half> %t1, <3 x half>* %b, align 16
+ ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v4f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v4f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v4f16_param_1];
+; CHECK-DAG: ld.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [%[[A]]];
+; CHECK-DAG: st.v4.b16 [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: ret;
+define void @test_ldst_v4f16(<4 x half>* %a, <4 x half>* %b) {
+ %t1 = load <4 x half>, <4 x half>* %a
+ store <4 x half> %t1, <4 x half>* %b, align 16
+ ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v8f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v8f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v8f16_param_1];
+; CHECK-DAG: ld.v4.b32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [%[[A]]];
+; CHECK-DAG: st.v4.b32 [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: ret;
+define void @test_ldst_v8f16(<8 x half>* %a, <8 x half>* %b) {
+ %t1 = load <8 x half>, <8 x half>* %a
+ store <8 x half> %t1, <8 x half>* %b, align 16
+ ret void
+}
+
+declare <2 x half> @test_callee(<2 x half> %a, <2 x half> %b) #0
+
+; CHECK-LABEL: test_call(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_call_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_call_param_1];
+; CHECK: {
+; CHECK-DAG: .param .align 4 .b8 param0[4];
+; CHECK-DAG: .param .align 4 .b8 param1[4];
+; CHECK-DAG: st.param.b32 [param0+0], [[A]];
+; CHECK-DAG: st.param.b32 [param1+0], [[B]];
+; CHECK-DAG: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_call(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @test_callee(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_call_flipped(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_call_flipped_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_call_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .align 4 .b8 param0[4];
+; CHECK-DAG: .param .align 4 .b8 param1[4];
+; CHECK-DAG: st.param.b32 [param0+0], [[B]];
+; CHECK-DAG: st.param.b32 [param1+0], [[A]];
+; CHECK-DAG: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_call_flipped(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_tailcall_flipped(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_tailcall_flipped_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_tailcall_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .align 4 .b8 param0[4];
+; CHECK-DAG: .param .align 4 .b8 param1[4];
+; CHECK-DAG: st.param.b32 [param0+0], [[B]];
+; CHECK-DAG: st.param.b32 [param1+0], [[A]];
+; CHECK-DAG: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_tailcall_flipped(<2 x half> %a, <2 x half> %b) #0 {
+ %r = tail call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_select(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_param_1];
+; CHECK-DAG: ld.param.u8 [[C:%rs[0-9]+]], [test_select_param_2]
+; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1;
+; CHECK-NEXT: selp.b32 [[R:%hh[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_select(<2 x half> %a, <2 x half> %b, i1 zeroext %c) #0 {
+ %r = select i1 %c, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_select_cc(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_cc_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_cc_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_select_cc_param_2];
+; CHECK-DAG: ld.param.b32 [[D:%hh[0-9]+]], [test_select_cc_param_3];
+;
+; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[C]], [[D]]
+;
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: mov.b32 {[[D0:%h[0-9]+]], [[D1:%h[0-9]+]]}, [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF0:%f[0-9]+]], [[D0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF0:%f[0-9]+]], [[C0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF1:%f[0-9]+]], [[D1]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF1:%f[0-9]+]], [[C1]];
+; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[CF0]], [[DF0]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[CF1]], [[DF1]]
+;
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: selp.b16 [[R0:%h[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG: selp.b16 [[R1:%h[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_select_cc(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) #0 {
+ %cc = fcmp une <2 x half> %c, %d
+ %r = select <2 x i1> %cc, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_select_cc_f32_f16(
+; CHECK-DAG: ld.param.v2.f32 {[[A0:%f[0-9]+]], [[A1:%f[0-9]+]]}, [test_select_cc_f32_f16_param_0];
+; CHECK-DAG: ld.param.v2.f32 {[[B0:%f[0-9]+]], [[B1:%f[0-9]+]]}, [test_select_cc_f32_f16_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_select_cc_f32_f16_param_2];
+; CHECK-DAG: ld.param.b32 [[D:%hh[0-9]+]], [test_select_cc_f32_f16_param_3];
+;
+; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[C]], [[D]]
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: mov.b32 {[[D0:%h[0-9]+]], [[D1:%h[0-9]+]]}, [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF0:%f[0-9]+]], [[D0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF0:%f[0-9]+]], [[C0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF1:%f[0-9]+]], [[D1]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF1:%f[0-9]+]], [[C1]];
+; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[CF0]], [[DF0]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[CF1]], [[DF1]]
+;
+; CHECK-DAG: selp.f32 [[R0:%f[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG: selp.f32 [[R1:%f[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
+ <2 x half> %c, <2 x half> %d) #0 {
+ %cc = fcmp une <2 x half> %c, %d
+ %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %r
+}
+
+; CHECK-LABEL: test_select_cc_f16_f32(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_cc_f16_f32_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_cc_f16_f32_param_1];
+; CHECK-DAG: ld.param.v2.f32 {[[C0:%f[0-9]+]], [[C1:%f[0-9]+]]}, [test_select_cc_f16_f32_param_2];
+; CHECK-DAG: ld.param.v2.f32 {[[D0:%f[0-9]+]], [[D1:%f[0-9]+]]}, [test_select_cc_f16_f32_param_3];
+; CHECK-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[C0]], [[D0]]
+; CHECK-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[C1]], [[D1]]
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: selp.b16 [[R0:%h[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG: selp.b16 [[R1:%h[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b,
+ <2 x float> %c, <2 x float> %d) #0 {
+ %cc = fcmp une <2 x float> %c, %d
+ %r = select <2 x i1> %cc, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fcmp_une(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_une_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_une_param_1];
+; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp une <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ueq(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ueq_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ueq_param_1];
+; CHECK-F16: setp.equ.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.equ.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.equ.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ueq <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ugt(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ugt_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ugt_param_1];
+; CHECK-F16: setp.gtu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.gtu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.gtu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ugt <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_uge(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_uge_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_uge_param_1];
+; CHECK-F16: setp.geu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.geu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.geu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp uge <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ult(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ult_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ult_param_1];
+; CHECK-F16: setp.ltu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.ltu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.ltu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ult <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ule(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ule_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ule_param_1];
+; CHECK-F16: setp.leu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.leu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.leu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ule <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+
+; CHECK-LABEL: test_fcmp_uno(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_uno_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_uno_param_1];
+; CHECK-F16: setp.nan.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.nan.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.nan.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp uno <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_one(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_one_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_one_param_1];
+; CHECK-F16: setp.ne.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.ne.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.ne.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp one <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_oeq(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_oeq_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_oeq_param_1];
+; CHECK-F16: setp.eq.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.eq.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.eq.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp oeq <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ogt(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ogt_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ogt_param_1];
+; CHECK-F16: setp.gt.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.gt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.gt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ogt <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_oge(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_oge_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_oge_param_1];
+; CHECK-F16: setp.ge.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.ge.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.ge.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp oge <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_olt(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_olt_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_olt_param_1];
+; CHECK-F16: setp.lt.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.lt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.lt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp olt <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; XCHECK-LABEL: test_fcmp_ole(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ole_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ole_param_1];
+; CHECK-F16: setp.le.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.le.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.le.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ole <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ord(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ord_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ord_param_1];
+; CHECK-F16: setp.num.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.num.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.num.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ord(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ord <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fptosi_i32(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptosi_i32_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.s32.f16 [[R0:%r[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.s32.f16 [[R1:%r[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i32> @test_fptosi_i32(<2 x half> %a) #0 {
+ %r = fptosi <2 x half> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+; CHECK-LABEL: test_fptosi_i64(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptosi_i64_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.s64.f16 [[R0:%rd[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.s64.f16 [[R1:%rd[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i64> @test_fptosi_i64(<2 x half> %a) #0 {
+ %r = fptosi <2 x half> %a to <2 x i64>
+ ret <2 x i64> %r
+}
+
+; CHECK-LABEL: test_fptoui_2xi32(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptoui_2xi32_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.u32.f16 [[R0:%r[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.u32.f16 [[R1:%r[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i32> @test_fptoui_2xi32(<2 x half> %a) #0 {
+ %r = fptoui <2 x half> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+; CHECK-LABEL: test_fptoui_2xi64(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptoui_2xi64_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.u64.f16 [[R0:%rd[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.u64.f16 [[R1:%rd[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i64> @test_fptoui_2xi64(<2 x half> %a) #0 {
+ %r = fptoui <2 x half> %a to <2 x i64>
+ ret <2 x i64> %r
+}
+
+; CHECK-LABEL: test_uitofp_2xi32(
+; CHECK: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_uitofp_2xi32_param_0];
+; CHECK-DAG: cvt.rn.f16.u32 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.u32 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_uitofp_2xi32(<2 x i32> %a) #0 {
+ %r = uitofp <2 x i32> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_uitofp_2xi64(
+; CHECK: ld.param.v2.u64 {[[A0:%rd[0-9]+]], [[A1:%rd[0-9]+]]}, [test_uitofp_2xi64_param_0];
+; CHECK-DAG: cvt.rn.f32.u64 [[F0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f32.u64 [[F1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[F0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[F1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_uitofp_2xi64(<2 x i64> %a) #0 {
+ %r = uitofp <2 x i64> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_sitofp_2xi32(
+; CHECK: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_sitofp_2xi32_param_0];
+; CHECK-DAG: cvt.rn.f16.s32 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.s32 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sitofp_2xi32(<2 x i32> %a) #0 {
+ %r = sitofp <2 x i32> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_sitofp_2xi64(
+; CHECK: ld.param.v2.u64 {[[A0:%rd[0-9]+]], [[A1:%rd[0-9]+]]}, [test_sitofp_2xi64_param_0];
+; CHECK-DAG: cvt.rn.f32.s64 [[F0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f32.s64 [[F1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[F0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[F1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sitofp_2xi64(<2 x i64> %a) #0 {
+ %r = sitofp <2 x i64> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_uitofp_2xi32_fadd(
+; CHECK-DAG: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_uitofp_2xi32_fadd_param_1];
+; CHECK-DAG: cvt.rn.f16.u32 [[C0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.u32 [[C1:%h[0-9]+]], [[A1]];
+
+; CHECK-F16-DAG: mov.b32 [[C:%hh[0-9]+]], {[[C0]], [[C1]]}
+; CHECK-F16-DAG: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC1:%f[0-9]+]], [[C1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
+ %c = uitofp <2 x i32> %a to <2 x half>
+ %r = fadd <2 x half> %b, %c
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_sitofp_2xi32_fadd(
+; CHECK-DAG: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_sitofp_2xi32_fadd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_sitofp_2xi32_fadd_param_1];
+; CHECK-DAG: cvt.rn.f16.s32 [[C0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.s32 [[C1:%h[0-9]+]], [[A1]];
+;
+; CHECK-F16-DAG: mov.b32 [[C:%hh[0-9]+]], {[[C0]], [[C1]]}
+; CHECK-F16-DAG: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC1:%f[0-9]+]], [[C1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
+ %c = sitofp <2 x i32> %a to <2 x half>
+ %r = fadd <2 x half> %b, %c
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fptrunc_2xfloat(
+; CHECK: ld.param.v2.f32 {[[A0:%f[0-9]+]], [[A1:%f[0-9]+]]}, [test_fptrunc_2xfloat_param_0];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
+ %r = fptrunc <2 x float> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fptrunc_2xdouble(
+; CHECK: ld.param.v2.f64 {[[A0:%fd[0-9]+]], [[A1:%fd[0-9]+]]}, [test_fptrunc_2xdouble_param_0];
+; CHECK-DAG: cvt.rn.f16.f64 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.f64 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
+ %r = fptrunc <2 x double> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fpext_2xfloat(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fpext_2xfloat_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[R0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[R1:%f[0-9]+]], [[A1]];
+; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK: ret;
+define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 {
+ %r = fpext <2 x half> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+; CHECK-LABEL: test_fpext_2xdouble(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fpext_2xdouble_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f64.f16 [[R0:%fd[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f64.f16 [[R1:%fd[0-9]+]], [[A1]];
+; CHECK-NEXT: st.param.v2.f64 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK: ret;
+define <2 x double> @test_fpext_2xdouble(<2 x half> %a) #0 {
+ %r = fpext <2 x half> %a to <2 x double>
+ ret <2 x double> %r
+}
+
+
+; CHECK-LABEL: test_bitcast_2xhalf_to_2xi16(
+; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_bitcast_2xhalf_to_2xi16_param_0];
+; CHECK-DAG: cvt.u16.u32 [[R0:%rs[0-9]+]], [[A]]
+; CHECK-DAG: shr.u32 [[AH:%r[0-9]+]], [[A]], 16
+; CHECK-DAG: cvt.u16.u32 [[R1:%rs[0-9]+]], [[AH]]
+; CHECK: st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i16> @test_bitcast_2xhalf_to_2xi16(<2 x half> %a) #0 {
+ %r = bitcast <2 x half> %a to <2 x i16>
+ ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_bitcast_2xi16_to_2xhalf(
+; CHECK: ld.param.v2.u16 {[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [test_bitcast_2xi16_to_2xhalf_param_0];
+; CHECK-DAG: cvt.u32.u16 [[R0:%r[0-9]+]], [[RS0]];
+; CHECK-DAG: cvt.u32.u16 [[R1:%r[0-9]+]], [[RS1]];
+; CHECK-DAG: shl.b32 [[R1H:%r[0-9]+]], [[R1]], 16;
+; CHECK-DAG: or.b32 [[R1H0L:%r[0-9]+]], [[R0]], [[R1H]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], [[R1H0L]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_bitcast_2xi16_to_2xhalf(<2 x i16> %a) #0 {
+ %r = bitcast <2 x i16> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+
+declare <2 x half> @llvm.sqrt.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b) #0
+declare <2 x half> @llvm.sin.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.cos.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.pow.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.exp.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.exp2.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.log.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.log10.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.log2.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.fma.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0
+declare <2 x half> @llvm.fabs.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.minnum.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.maxnum.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.floor.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.ceil.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.trunc.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.rint.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.nearbyint.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.round.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0
+
+; CHECK-LABEL: test_sqrt(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_sqrt_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: sqrt.rn.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: sqrt.rn.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sqrt(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.sqrt.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_powi(
+;define <2 x half> @test_powi(<2 x half> %a, <2 x i32> %b) #0 {
+; %r = call <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b)
+; ret <2 x half> %r
+;}
+
+; CHECK-LABEL: test_sin(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_sin_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: sin.approx.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: sin.approx.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sin(<2 x half> %a) #0 #1 {
+ %r = call <2 x half> @llvm.sin.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_cos(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_cos_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cos.approx.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: cos.approx.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_cos(<2 x half> %a) #0 #1 {
+ %r = call <2 x half> @llvm.cos.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_pow(
+;define <2 x half> @test_pow(<2 x half> %a, <2 x half> %b) #0 {
+; %r = call <2 x half> @llvm.pow.f16(<2 x half> %a, <2 x half> %b)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp(
+;define <2 x half> @test_exp(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.exp.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp2(
+;define <2 x half> @test_exp2(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.exp2.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log(
+;define <2 x half> @test_log(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.log.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log10(
+;define <2 x half> @test_log10(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.log10.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log2(
+;define <2 x half> @test_log2(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.log2.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+; CHECK-LABEL: test_fma(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fma_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fma_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_fma_param_2];
+;
+; CHECK-F16: fma.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret
+define <2 x half> @test_fma(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 {
+ %r = call <2 x half> @llvm.fma.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fabs(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fabs_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: abs.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: abs.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fabs(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.fabs.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_minnum(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_minnum_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_minnum_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[BF0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[BF1:%f[0-9]+]], [[B1]];
+; CHECK-DAG: min.f32 [[RF0:%f[0-9]+]], [[AF0]], [[BF0]];
+; CHECK-DAG: min.f32 [[RF1:%f[0-9]+]], [[AF1]], [[BF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_minnum(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.minnum.f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_maxnum(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_maxnum_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_maxnum_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[BF0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[BF1:%f[0-9]+]], [[B1]];
+; CHECK-DAG: max.f32 [[RF0:%f[0-9]+]], [[AF0]], [[BF0]];
+; CHECK-DAG: max.f32 [[RF1:%f[0-9]+]], [[AF1]], [[BF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_maxnum(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.maxnum.f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_copysign_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b16 [[BS0:%rs[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b16 [[BS1:%rs[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AX0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AX1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b16 [[BX0:%rs[0-9]+]], [[BS0]], -32768;
+; CHECK-DAG: and.b16 [[BX1:%rs[0-9]+]], [[BS1]], -32768;
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AX0]], [[BX0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AX1]], [[BX1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_copysign(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign_f32(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_f32_param_0];
+; CHECK-DAG: ld.param.v2.f32 {[[B0:%f[0-9]+]], [[B1:%f[0-9]+]]}, [test_copysign_f32_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b32 [[BI0:%r[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b32 [[BI1:%r[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AI0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AI1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b32 [[BX0:%r[0-9]+]], [[BI0]], -2147483648;
+; CHECK-DAG: and.b32 [[BX1:%r[0-9]+]], [[BI1]], -2147483648;
+; CHECK-DAG: shr.u32 [[BY0:%r[0-9]+]], [[BX0]], 16;
+; CHECK-DAG: shr.u32 [[BY1:%r[0-9]+]], [[BX1]], 16;
+; CHECK-DAG: cvt.u16.u32 [[BZ0:%rs[0-9]+]], [[BY0]];
+; CHECK-DAG: cvt.u16.u32 [[BZ1:%rs[0-9]+]], [[BY1]];
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AI0]], [[BZ0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AI1]], [[BZ1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
+ %tb = fptrunc <2 x float> %b to <2 x half>
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %tb)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign_f64(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_f64_param_0];
+; CHECK-DAG: ld.param.v2.f64 {[[B0:%fd[0-9]+]], [[B1:%fd[0-9]+]]}, [test_copysign_f64_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b64 [[BI0:%rd[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b64 [[BI1:%rd[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AI0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AI1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b64 [[BX0:%rd[0-9]+]], [[BI0]], -9223372036854775808;
+; CHECK-DAG: and.b64 [[BX1:%rd[0-9]+]], [[BI1]], -9223372036854775808;
+; CHECK-DAG: shr.u64 [[BY0:%rd[0-9]+]], [[BX0]], 48;
+; CHECK-DAG: shr.u64 [[BY1:%rd[0-9]+]], [[BX1]], 48;
+; CHECK-DAG: cvt.u16.u64 [[BZ0:%rs[0-9]+]], [[BY0]];
+; CHECK-DAG: cvt.u16.u64 [[BZ1:%rs[0-9]+]], [[BY1]];
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AI0]], [[BZ0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AI1]], [[BZ1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 {
+ %tb = fptrunc <2 x double> %b to <2 x half>
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %tb)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign_extended(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_extended_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_copysign_extended_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b16 [[BS0:%rs[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b16 [[BS1:%rs[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AX0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AX1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b16 [[BX0:%rs[0-9]+]], [[BS0]], -32768;
+; CHECK-DAG: and.b16 [[BX1:%rs[0-9]+]], [[BS1]], -32768;
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AX0]], [[BX0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AX1]], [[BX1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: mov.b32 {[[RX0:%h[0-9]+]], [[RX1:%h[0-9]+]]}, [[R]]
+; CHECK-DAG: cvt.f32.f16 [[XR0:%f[0-9]+]], [[RX0]];
+; CHECK-DAG: cvt.f32.f16 [[XR1:%f[0-9]+]], [[RX1]];
+; CHECK: st.param.v2.f32 [func_retval0+0], {[[XR0]], [[XR1]]};
+; CHECK: ret;
+define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b)
+ %xr = fpext <2 x half> %r to <2 x float>
+ ret <2 x float> %xr
+}
+
+; CHECK-LABEL: test_floor(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_floor_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rmi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rmi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_floor(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.floor.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_ceil(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_ceil_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rpi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rpi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_ceil(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.ceil.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_trunc(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_trunc_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rzi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rzi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_trunc(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.trunc.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_rint(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_rint_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_rint(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.rint.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_nearbyint(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_nearbyint_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_nearbyint(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.nearbyint.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_round(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_round_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_round(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.round.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fmuladd(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fmuladd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fmuladd_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_fmuladd_param_2];
+;
+; CHECK-F16: fma.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fmuladd(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 {
+ %r = call <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
+ ret <2 x half> %r
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { "unsafe-fp-math" = "true" }
diff --git a/test/CodeGen/NVPTX/fma.ll b/test/CodeGen/NVPTX/fma.ll
index 6785a01827e2..351f9b20dc0c 100644
--- a/test/CodeGen/NVPTX/fma.ll
+++ b/test/CodeGen/NVPTX/fma.ll
@@ -1,42 +1,42 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast | FileCheck %s
-
-declare float @dummy_f32(float, float) #0
-declare double @dummy_f64(double, double) #0
-
-define ptx_device float @t1_f32(float %x, float %y, float %z) {
-; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
-; CHECK: ret;
- %a = fmul float %x, %y
- %b = fadd float %a, %z
- ret float %b
-}
-
-define ptx_device float @t2_f32(float %x, float %y, float %z, float %w) {
-; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
-; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
-; CHECK: ret;
- %a = fmul float %x, %y
- %b = fadd float %a, %z
- %c = fadd float %a, %w
- %d = call float @dummy_f32(float %b, float %c)
- ret float %d
-}
-
-define ptx_device double @t1_f64(double %x, double %y, double %z) {
-; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
-; CHECK: ret;
- %a = fmul double %x, %y
- %b = fadd double %a, %z
- ret double %b
-}
-
-define ptx_device double @t2_f64(double %x, double %y, double %z, double %w) {
-; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
-; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
-; CHECK: ret;
- %a = fmul double %x, %y
- %b = fadd double %a, %z
- %c = fadd double %a, %w
- %d = call double @dummy_f64(double %b, double %c)
- ret double %d
-}
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast -verify-machineinstrs | FileCheck %s
+
+declare float @dummy_f32(float, float) #0
+declare double @dummy_f64(double, double) #0
+
+define ptx_device float @t1_f32(float %x, float %y, float %z) {
+; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul float %x, %y
+ %b = fadd float %a, %z
+ ret float %b
+}
+
+define ptx_device float @t2_f32(float %x, float %y, float %z, float %w) {
+; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul float %x, %y
+ %b = fadd float %a, %z
+ %c = fadd float %a, %w
+ %d = call float @dummy_f32(float %b, float %c)
+ ret float %d
+}
+
+define ptx_device double @t1_f64(double %x, double %y, double %z) {
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul double %x, %y
+ %b = fadd double %a, %z
+ ret double %b
+}
+
+define ptx_device double @t2_f64(double %x, double %y, double %z, double %w) {
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul double %x, %y
+ %b = fadd double %a, %z
+ %c = fadd double %a, %w
+ %d = call double @dummy_f64(double %b, double %c)
+ ret double %d
+}
diff --git a/test/CodeGen/NVPTX/i8-param.ll b/test/CodeGen/NVPTX/i8-param.ll
index 6a1e3a0e1a0d..c41da0eebd1f 100644
--- a/test/CodeGen/NVPTX/i8-param.ll
+++ b/test/CodeGen/NVPTX/i8-param.ll
@@ -1,23 +1,23 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
-
-; CHECK: .visible .func (.param .b32 func_retval0) callee
-define i8 @callee(i8 %a) {
-; CHECK: ld.param.u8
- %ret = add i8 %a, 42
-; CHECK: st.param.b32
- ret i8 %ret
-}
-
-; CHECK: .visible .func caller
-define void @caller(i8* %a) {
-; CHECK: ld.u8
- %val = load i8, i8* %a
- %ret = tail call i8 @callee(i8 %val)
-; CHECK: ld.param.b32
- store i8 %ret, i8* %a
- ret void
-}
-
-
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+
+; CHECK: .visible .func (.param .b32 func_retval0) callee
+define i8 @callee(i8 %a) {
+; CHECK: ld.param.u8
+ %ret = add i8 %a, 42
+; CHECK: st.param.b32
+ ret i8 %ret
+}
+
+; CHECK: .visible .func caller
+define void @caller(i8* %a) {
+; CHECK: ld.u8
+ %val = load i8, i8* %a
+ %ret = tail call i8 @callee(i8 %val)
+; CHECK: ld.param.b32
+ store i8 %ret, i8* %a
+ ret void
+}
+
+
diff --git a/test/CodeGen/NVPTX/param-load-store.ll b/test/CodeGen/NVPTX/param-load-store.ll
index 8a67567acc96..83991a2930a8 100644
--- a/test/CodeGen/NVPTX/param-load-store.ll
+++ b/test/CodeGen/NVPTX/param-load-store.ll
@@ -1,939 +1,939 @@
-; Verifies correctness of load/store of parameters and return values.
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 | FileCheck %s
-
-%s_i1 = type { i1 }
-%s_i8 = type { i8 }
-%s_i16 = type { i16 }
-%s_f16 = type { half }
-%s_i32 = type { i32 }
-%s_f32 = type { float }
-%s_i64 = type { i64 }
-%s_f64 = type { double }
-
-; More complicated types. i64 is used to increase natural alignment
-; requirement for the type.
-%s_i32x4 = type { i32, i32, i32, i32, i64}
-%s_i32f32 = type { i32, float, i32, float, i64}
-%s_i8i32x4 = type { i32, i32, i8, i32, i32, i64}
-%s_i8i32x4p = type <{ i32, i32, i8, i32, i32, i64}>
-%s_crossfield = type { i32, [2 x i32], <4 x i32>, [3 x {i32, i32, i32}]}
-; All scalar parameters must be at least 32 bits in size.
-; i1 is loaded/stored as i8.
-
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_i1(
-; CHECK-NEXT: .param .b32 test_i1_param_0
-; CHECK: ld.param.u8 [[A8:%r[0-9]+]], [test_i1_param_0];
-; CHECK: and.b32 [[A:%r[0-9]+]], [[A8]], 1;
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b32 [param0+0], [[A]]
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni
-; CHECK-NEXT: test_i1,
-; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0];
-; CHECK: and.b32 [[R:%r[0-9]+]], [[R8]], 1;
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK: ret;
-define i1 @test_i1(i1 %a) {
- %r = tail call i1 @test_i1(i1 %a);
- ret i1 %r;
-}
-
-; Signed i1 is a somewhat special case. We only care about one bit and
-; then us neg.s32 to convert it to 32-bit -1 if it's set.
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_i1s(
-; CHECK-NEXT: .param .b32 test_i1s_param_0
-; CHECK: ld.param.u8 [[A8:%rs[0-9]+]], [test_i1s_param_0];
-; CHECK: cvt.u32.u16 [[A32:%r[0-9]+]], [[A8]];
-; CHECK: and.b32 [[A1:%r[0-9]+]], [[A32]], 1;
-; CHECK: neg.s32 [[A:%r[0-9]+]], [[A1]];
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b32 [param0+0], [[A]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni
-; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0];
-; CHECK: and.b32 [[R1:%r[0-9]+]], [[R8]], 1;
-; CHECK: neg.s32 [[R:%r[0-9]+]], [[R1]];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define signext i1 @test_i1s(i1 signext %a) {
- %r = tail call signext i1 @test_i1s(i1 signext %a);
- ret i1 %r;
-}
-
-; Make sure that i1 loads are vectorized as i8 loads, respecting each element alignment.
-; CHECK: .func (.param .align 4 .b8 func_retval0[4])
-; CHECK-LABEL: test_v3i1(
-; CHECK-NEXT: .param .align 4 .b8 test_v3i1_param_0[4]
-; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i1_param_0+2];
-; CHECK-DAG: ld.param.v2.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i1_param_0]
-; CHECK: .param .align 4 .b8 param0[4];
-; CHECK-DAG: st.param.v2.b8 [param0+0], {[[E0]], [[E1]]};
-; CHECK-DAG: st.param.b8 [param0+2], [[E2]];
-; CHECK: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v3i1,
-; CHECK-DAG: ld.param.v2.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2];
-; CHECK-DAG: st.param.v2.b8 [func_retval0+0], {[[RE0]], [[RE1]]}
-; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]];
-; CHECK-NEXT: ret;
-define <3 x i1> @test_v3i1(<3 x i1> %a) {
- %r = tail call <3 x i1> @test_v3i1(<3 x i1> %a);
- ret <3 x i1> %r;
-}
-
-; CHECK: .func (.param .align 4 .b8 func_retval0[4])
-; CHECK-LABEL: test_v4i1(
-; CHECK-NEXT: .param .align 4 .b8 test_v4i1_param_0[4]
-; CHECK: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i1_param_0]
-; CHECK: .param .align 4 .b8 param0[4];
-; CHECK: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK: test_v4i1,
-; CHECK: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]};
-; CHECK-NEXT: ret;
-define <4 x i1> @test_v4i1(<4 x i1> %a) {
- %r = tail call <4 x i1> @test_v4i1(<4 x i1> %a);
- ret <4 x i1> %r;
-}
-
-; CHECK: .func (.param .align 8 .b8 func_retval0[8])
-; CHECK-LABEL: test_v5i1(
-; CHECK-NEXT: .param .align 8 .b8 test_v5i1_param_0[8]
-; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i1_param_0+4];
-; CHECK-DAG: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i1_param_0]
-; CHECK: .param .align 8 .b8 param0[8];
-; CHECK-DAG: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK-DAG: st.param.b8 [param0+4], [[E4]];
-; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v5i1,
-; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4];
-; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
-; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]];
-; CHECK-NEXT: ret;
-define <5 x i1> @test_v5i1(<5 x i1> %a) {
- %r = tail call <5 x i1> @test_v5i1(<5 x i1> %a);
- ret <5 x i1> %r;
-}
-
-; Unsigned i8 is loaded directly into 32-bit register.
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_i8(
-; CHECK-NEXT: .param .b32 test_i8_param_0
-; CHECK: ld.param.u8 [[A8:%rs[0-9]+]], [test_i8_param_0];
-; CHECK: cvt.u32.u16 [[A32:%r[0-9]+]], [[A8]];
-; CHECK: and.b32 [[A:%r[0-9]+]], [[A32]], 255;
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b32 [param0+0], [[A]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK: test_i8,
-; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0];
-; CHECK: and.b32 [[R:%r[0-9]+]], [[R32]], 255;
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i8 @test_i8(i8 %a) {
- %r = tail call i8 @test_i8(i8 %a);
- ret i8 %r;
-}
-
-; signed i8 is loaded into 16-bit register which is then sign-extended to i32.
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_i8s(
-; CHECK-NEXT: .param .b32 test_i8s_param_0
-; CHECK: ld.param.s8 [[A8:%rs[0-9]+]], [test_i8s_param_0];
-; CHECK: cvt.s32.s16 [[A:%r[0-9]+]], [[A8]];
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b32 [param0+0], [[A]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK: test_i8s,
-; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0];
-; -- This is suspicious (though correct) -- why not cvt.u8.u32, cvt.s8.s32 ?
-; CHECK: cvt.u16.u32 [[R16:%rs[0-9]+]], [[R32]];
-; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[R16]];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define signext i8 @test_i8s(i8 signext %a) {
- %r = tail call signext i8 @test_i8s(i8 signext %a);
- ret i8 %r;
-}
-
-; CHECK: .func (.param .align 4 .b8 func_retval0[4])
-; CHECK-LABEL: test_v3i8(
-; CHECK-NEXT: .param .align 4 .b8 test_v3i8_param_0[4]
-; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i8_param_0+2];
-; CHECK-DAG: ld.param.v2.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i8_param_0];
-; CHECK: .param .align 4 .b8 param0[4];
-; CHECK: st.param.v2.b8 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.b8 [param0+2], [[E2]];
-; CHECK: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v3i8,
-; CHECK-DAG: ld.param.v2.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2];
-; CHECK-DAG: st.param.v2.b8 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]];
-; CHECK-NEXT: ret;
-define <3 x i8> @test_v3i8(<3 x i8> %a) {
- %r = tail call <3 x i8> @test_v3i8(<3 x i8> %a);
- ret <3 x i8> %r;
-}
-
-; CHECK: .func (.param .align 4 .b8 func_retval0[4])
-; CHECK-LABEL: test_v4i8(
-; CHECK-NEXT: .param .align 4 .b8 test_v4i8_param_0[4]
-; CHECK: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i8_param_0]
-; CHECK: .param .align 4 .b8 param0[4];
-; CHECK: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v4i8,
-; CHECK: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
-; CHECK-NEXT: ret;
-define <4 x i8> @test_v4i8(<4 x i8> %a) {
- %r = tail call <4 x i8> @test_v4i8(<4 x i8> %a);
- ret <4 x i8> %r;
-}
-
-; CHECK: .func (.param .align 8 .b8 func_retval0[8])
-; CHECK-LABEL: test_v5i8(
-; CHECK-NEXT: .param .align 8 .b8 test_v5i8_param_0[8]
-; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i8_param_0+4];
-; CHECK-DAG ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i8_param_0]
-; CHECK: .param .align 8 .b8 param0[8];
-; CHECK-DAG: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK-DAG: st.param.b8 [param0+4], [[E4]];
-; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v5i8,
-; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4];
-; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
-; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]];
-; CHECK-NEXT: ret;
-define <5 x i8> @test_v5i8(<5 x i8> %a) {
- %r = tail call <5 x i8> @test_v5i8(<5 x i8> %a);
- ret <5 x i8> %r;
-}
-
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_i16(
-; CHECK-NEXT: .param .b32 test_i16_param_0
-; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16_param_0];
-; CHECK: cvt.u32.u16 [[E32:%r[0-9]+]], [[E16]];
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b32 [param0+0], [[E32]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_i16,
-; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0];
-; CHECK: and.b32 [[R:%r[0-9]+]], [[RE32]], 65535;
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i16 @test_i16(i16 %a) {
- %r = tail call i16 @test_i16(i16 %a);
- ret i16 %r;
-}
-
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_i16s(
-; CHECK-NEXT: .param .b32 test_i16s_param_0
-; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16s_param_0];
-; CHECK: cvt.s32.s16 [[E32:%r[0-9]+]], [[E16]];
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b32 [param0+0], [[E32]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_i16s,
-; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0];
-; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[RE32]];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define signext i16 @test_i16s(i16 signext %a) {
- %r = tail call signext i16 @test_i16s(i16 signext %a);
- ret i16 %r;
-}
-
-; CHECK: .func (.param .align 8 .b8 func_retval0[8])
-; CHECK-LABEL: test_v3i16(
-; CHECK-NEXT: .param .align 8 .b8 test_v3i16_param_0[8]
-; CHECK-DAG: ld.param.u16 [[E2:%rs[0-9]+]], [test_v3i16_param_0+4];
-; CHECK-DAG: ld.param.v2.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i16_param_0];
-; CHECK: .param .align 8 .b8 param0[8];
-; CHECK: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.b16 [param0+4], [[E2]];
-; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v3i16,
-; CHECK: ld.param.v2.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
-; CHECK: ld.param.b16 [[RE2:%rs[0-9]+]], [retval0+4];
-; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK-DAG: st.param.b16 [func_retval0+4], [[RE2]];
-; CHECK-NEXT: ret;
-define <3 x i16> @test_v3i16(<3 x i16> %a) {
- %r = tail call <3 x i16> @test_v3i16(<3 x i16> %a);
- ret <3 x i16> %r;
-}
-
-; CHECK: .func (.param .align 8 .b8 func_retval0[8])
-; CHECK-LABEL: test_v4i16(
-; CHECK-NEXT: .param .align 8 .b8 test_v4i16_param_0[8]
-; CHECK: ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i16_param_0]
-; CHECK: .param .align 8 .b8 param0[8];
-; CHECK: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v4i16,
-; CHECK: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK: st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
-; CHECK-NEXT: ret;
-define <4 x i16> @test_v4i16(<4 x i16> %a) {
- %r = tail call <4 x i16> @test_v4i16(<4 x i16> %a);
- ret <4 x i16> %r;
-}
-
-; CHECK: .func (.param .align 16 .b8 func_retval0[16])
-; CHECK-LABEL: test_v5i16(
-; CHECK-NEXT: .param .align 16 .b8 test_v5i16_param_0[16]
-; CHECK-DAG: ld.param.u16 [[E4:%rs[0-9]+]], [test_v5i16_param_0+8];
-; CHECK-DAG ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i16_param_0]
-; CHECK: .param .align 16 .b8 param0[16];
-; CHECK-DAG: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK-DAG: st.param.b16 [param0+8], [[E4]];
-; CHECK: .param .align 16 .b8 retval0[16];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v5i16,
-; CHECK-DAG: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b16 [[RE4:%rs[0-9]+]], [retval0+8];
-; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
-; CHECK-DAG: st.param.b16 [func_retval0+8], [[RE4]];
-; CHECK-NEXT: ret;
-define <5 x i16> @test_v5i16(<5 x i16> %a) {
- %r = tail call <5 x i16> @test_v5i16(<5 x i16> %a);
- ret <5 x i16> %r;
-}
-
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_f16(
-; CHECK-NEXT: .param .b32 test_f16_param_0
-; CHECK: ld.param.b16 [[E:%h[0-9]+]], [test_f16_param_0];
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b16 [param0+0], [[E]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_f16,
-; CHECK: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
-; CHECK: st.param.b16 [func_retval0+0], [[R]]
-; CHECK-NEXT: ret;
-define half @test_f16(half %a) {
- %r = tail call half @test_f16(half %a);
- ret half %r;
-}
-
-; CHECK: .func (.param .align 4 .b8 func_retval0[4])
-; CHECK-LABEL: test_v2f16(
-; CHECK-NEXT: .param .align 4 .b8 test_v2f16_param_0[4]
-; CHECK: ld.param.b32 [[E:%hh[0-9]+]], [test_v2f16_param_0];
-; CHECK: .param .align 4 .b8 param0[4];
-; CHECK: st.param.b32 [param0+0], [[E]];
-; CHECK: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v2f16,
-; CHECK: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
-; CHECK: st.param.b32 [func_retval0+0], [[R]]
-; CHECK-NEXT: ret;
-define <2 x half> @test_v2f16(<2 x half> %a) {
- %r = tail call <2 x half> @test_v2f16(<2 x half> %a);
- ret <2 x half> %r;
-}
-
-; CHECK:.func (.param .align 8 .b8 func_retval0[8])
-; CHECK-LABEL: test_v3f16(
-; CHECK: .param .align 8 .b8 test_v3f16_param_0[8]
-; CHECK-DAG: ld.param.b32 [[HH01:%hh[0-9]+]], [test_v3f16_param_0];
-; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[HH01]];
-; CHECK-DAG: ld.param.b16 [[E2:%h[0-9]+]], [test_v3f16_param_0+4];
-; CHECK: .param .align 8 .b8 param0[8];
-; CHECK-DAG: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]};
-; CHECK-DAG: st.param.b16 [param0+4], [[E2]];
-; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: call.uni (retval0),
-; CHECK: test_v3f16,
-; CHECK-DAG: ld.param.v2.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b16 [[R2:%h[0-9]+]], [retval0+4];
-; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]};
-; CHECK-DAG: st.param.b16 [func_retval0+4], [[R2]];
-; CHECK: ret;
-define <3 x half> @test_v3f16(<3 x half> %a) {
- %r = tail call <3 x half> @test_v3f16(<3 x half> %a);
- ret <3 x half> %r;
-}
-
-; CHECK:.func (.param .align 8 .b8 func_retval0[8])
-; CHECK-LABEL: test_v4f16(
-; CHECK: .param .align 8 .b8 test_v4f16_param_0[8]
-; CHECK: ld.param.v2.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]]}, [test_v4f16_param_0];
-; CHECK-DAG: mov.b32 [[HH01:%hh[0-9]+]], [[R01]];
-; CHECK-DAG: mov.b32 [[HH23:%hh[0-9]+]], [[R23]];
-; CHECK: .param .align 8 .b8 param0[8];
-; CHECK: st.param.v2.b32 [param0+0], {[[HH01]], [[HH23]]};
-; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: call.uni (retval0),
-; CHECK: test_v4f16,
-; CHECK: ld.param.v2.b32 {[[RH01:%hh[0-9]+]], [[RH23:%hh[0-9]+]]}, [retval0+0];
-; CHECK: st.param.v2.b32 [func_retval0+0], {[[RH01]], [[RH23]]};
-; CHECK: ret;
-define <4 x half> @test_v4f16(<4 x half> %a) {
- %r = tail call <4 x half> @test_v4f16(<4 x half> %a);
- ret <4 x half> %r;
-}
-
-; CHECK:.func (.param .align 16 .b8 func_retval0[16])
-; CHECK-LABEL: test_v5f16(
-; CHECK: .param .align 16 .b8 test_v5f16_param_0[16]
-; CHECK-DAG: ld.param.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [test_v5f16_param_0];
-; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[HH01]];
-; CHECK-DAG: ld.param.b16 [[E4:%h[0-9]+]], [test_v5f16_param_0+8];
-; CHECK: .param .align 16 .b8 param0[16];
-; CHECK-DAG: st.param.v4.b16 [param0+0],
-; CHECK-DAG: st.param.b16 [param0+8], [[E4]];
-; CHECK: .param .align 16 .b8 retval0[16];
-; CHECK: call.uni (retval0),
-; CHECK: test_v5f16,
-; CHECK-DAG: ld.param.v4.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]], [[R2:%h[0-9]+]], [[R3:%h[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b16 [[R4:%h[0-9]+]], [retval0+8];
-; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]};
-; CHECK-DAG: st.param.b16 [func_retval0+8], [[R4]];
-; CHECK: ret;
-define <5 x half> @test_v5f16(<5 x half> %a) {
- %r = tail call <5 x half> @test_v5f16(<5 x half> %a);
- ret <5 x half> %r;
-}
-
-; CHECK:.func (.param .align 16 .b8 func_retval0[16])
-; CHECK-LABEL: test_v8f16(
-; CHECK: .param .align 16 .b8 test_v8f16_param_0[16]
-; CHECK: ld.param.v4.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]], [[R45:%r[0-9]+]], [[R67:%r[0-9]+]]}, [test_v8f16_param_0];
-; CHECK-DAG: mov.b32 [[HH01:%hh[0-9]+]], [[R01]];
-; CHECK-DAG: mov.b32 [[HH23:%hh[0-9]+]], [[R23]];
-; CHECK-DAG: mov.b32 [[HH45:%hh[0-9]+]], [[R45]];
-; CHECK-DAG: mov.b32 [[HH67:%hh[0-9]+]], [[R67]];
-; CHECK: .param .align 16 .b8 param0[16];
-; CHECK: st.param.v4.b32 [param0+0], {[[HH01]], [[HH23]], [[HH45]], [[HH67]]};
-; CHECK: .param .align 16 .b8 retval0[16];
-; CHECK: call.uni (retval0),
-; CHECK: test_v8f16,
-; CHECK: ld.param.v4.b32 {[[RH01:%hh[0-9]+]], [[RH23:%hh[0-9]+]], [[RH45:%hh[0-9]+]], [[RH67:%hh[0-9]+]]}, [retval0+0];
-; CHECK: st.param.v4.b32 [func_retval0+0], {[[RH01]], [[RH23]], [[RH45]], [[RH67]]};
-; CHECK: ret;
-define <8 x half> @test_v8f16(<8 x half> %a) {
- %r = tail call <8 x half> @test_v8f16(<8 x half> %a);
- ret <8 x half> %r;
-}
-
-; CHECK:.func (.param .align 32 .b8 func_retval0[32])
-; CHECK-LABEL: test_v9f16(
-; CHECK: .param .align 32 .b8 test_v9f16_param_0[32]
-; CHECK-DAG: ld.param.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [test_v9f16_param_0];
-; CHECK-DAG: ld.param.v4.b16 {[[E4:%h[0-9]+]], [[E5:%h[0-9]+]], [[E6:%h[0-9]+]], [[E7:%h[0-9]+]]}, [test_v9f16_param_0+8];
-; CHECK-DAG: ld.param.b16 [[E8:%h[0-9]+]], [test_v9f16_param_0+16];
-; CHECK: .param .align 32 .b8 param0[32];
-; CHECK-DAG: st.param.v4.b16 [param0+0],
-; CHECK-DAG: st.param.v4.b16 [param0+8],
-; CHECK-DAG: st.param.b16 [param0+16], [[E8]];
-; CHECK: .param .align 32 .b8 retval0[32];
-; CHECK: call.uni (retval0),
-; CHECK: test_v9f16,
-; CHECK-DAG: ld.param.v4.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]], [[R2:%h[0-9]+]], [[R3:%h[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.v4.b16 {[[R4:%h[0-9]+]], [[R5:%h[0-9]+]], [[R6:%h[0-9]+]], [[R7:%h[0-9]+]]}, [retval0+8];
-; CHECK-DAG: ld.param.b16 [[R8:%h[0-9]+]], [retval0+16];
-; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]};
-; CHECK-DAG: st.param.v4.b16 [func_retval0+8], {[[R4]], [[R5]], [[R6]], [[R7]]};
-; CHECK-DAG: st.param.b16 [func_retval0+16], [[R8]];
-; CHECK: ret;
-define <9 x half> @test_v9f16(<9 x half> %a) {
- %r = tail call <9 x half> @test_v9f16(<9 x half> %a);
- ret <9 x half> %r;
-}
-
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_i32(
-; CHECK-NEXT: .param .b32 test_i32_param_0
-; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_i32_param_0];
-; CHECK: .param .b32 param0;
-; CHECK: st.param.b32 [param0+0], [[E]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_i32,
-; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i32 @test_i32(i32 %a) {
- %r = tail call i32 @test_i32(i32 %a);
- ret i32 %r;
-}
-
-; CHECK: .func (.param .align 16 .b8 func_retval0[16])
-; CHECK-LABEL: test_v3i32(
-; CHECK-NEXT: .param .align 16 .b8 test_v3i32_param_0[16]
-; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [test_v3i32_param_0+8];
-; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_v3i32_param_0];
-; CHECK: .param .align 16 .b8 param0[16];
-; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.b32 [param0+8], [[E2]];
-; CHECK: .param .align 16 .b8 retval0[16];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v3i32,
-; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
-; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
-; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]];
-; CHECK-NEXT: ret;
-define <3 x i32> @test_v3i32(<3 x i32> %a) {
- %r = tail call <3 x i32> @test_v3i32(<3 x i32> %a);
- ret <3 x i32> %r;
-}
-
-; CHECK: .func (.param .align 16 .b8 func_retval0[16])
-; CHECK-LABEL: test_v4i32(
-; CHECK-NEXT: .param .align 16 .b8 test_v4i32_param_0[16]
-; CHECK: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v4i32_param_0]
-; CHECK: .param .align 16 .b8 param0[16];
-; CHECK: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK: .param .align 16 .b8 retval0[16];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v4i32,
-; CHECK: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0];
-; CHECK: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
-; CHCK-NEXT: ret;
-define <4 x i32> @test_v4i32(<4 x i32> %a) {
- %r = tail call <4 x i32> @test_v4i32(<4 x i32> %a);
- ret <4 x i32> %r;
-}
-
-; CHECK: .func (.param .align 32 .b8 func_retval0[32])
-; CHECK-LABEL: test_v5i32(
-; CHECK-NEXT: .param .align 32 .b8 test_v5i32_param_0[32]
-; CHECK-DAG: ld.param.u32 [[E4:%r[0-9]+]], [test_v5i32_param_0+16];
-; CHECK-DAG ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v5i32_param_0]
-; CHECK: .param .align 32 .b8 param0[32];
-; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK-DAG: st.param.b32 [param0+16], [[E4]];
-; CHECK: .param .align 32 .b8 retval0[32];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v5i32,
-; CHECK-DAG: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0];
-; CHECK-DAG: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16];
-; CHECK-DAG: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
-; CHECK-DAG: st.param.b32 [func_retval0+16], [[RE4]];
-; CHECK-NEXT: ret;
-define <5 x i32> @test_v5i32(<5 x i32> %a) {
- %r = tail call <5 x i32> @test_v5i32(<5 x i32> %a);
- ret <5 x i32> %r;
-}
-
-; CHECK: .func (.param .b32 func_retval0)
-; CHECK-LABEL: test_f32(
-; CHECK-NEXT: .param .b32 test_f32_param_0
-; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_f32_param_0];
-; CHECK: .param .b32 param0;
-; CHECK: st.param.f32 [param0+0], [[E]];
-; CHECK: .param .b32 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_f32,
-; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0];
-; CHECK: st.param.f32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define float @test_f32(float %a) {
- %r = tail call float @test_f32(float %a);
- ret float %r;
-}
-
-; CHECK: .func (.param .b64 func_retval0)
-; CHECK-LABEL: test_i64(
-; CHECK-NEXT: .param .b64 test_i64_param_0
-; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_i64_param_0];
-; CHECK: .param .b64 param0;
-; CHECK: st.param.b64 [param0+0], [[E]];
-; CHECK: .param .b64 retval0;
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_i64,
-; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0];
-; CHECK: st.param.b64 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define i64 @test_i64(i64 %a) {
- %r = tail call i64 @test_i64(i64 %a);
- ret i64 %r;
-}
-
-; CHECK: .func (.param .align 32 .b8 func_retval0[32])
-; CHECK-LABEL: test_v3i64(
-; CHECK-NEXT: .param .align 32 .b8 test_v3i64_param_0[32]
-; CHECK-DAG: ld.param.u64 [[E2:%rd[0-9]+]], [test_v3i64_param_0+16];
-; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v3i64_param_0];
-; CHECK: .param .align 32 .b8 param0[32];
-; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.b64 [param0+16], [[E2]];
-; CHECK: .param .align 32 .b8 retval0[32];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v3i64,
-; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0];
-; CHECK: ld.param.b64 [[RE2:%rd[0-9]+]], [retval0+16];
-; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]];
-; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]];
-; CHECK-NEXT: ret;
-define <3 x i64> @test_v3i64(<3 x i64> %a) {
- %r = tail call <3 x i64> @test_v3i64(<3 x i64> %a);
- ret <3 x i64> %r;
-}
-
-; For i64 vector loads are limited by PTX to 2 elements.
-; CHECK: .func (.param .align 32 .b8 func_retval0[32])
-; CHECK-LABEL: test_v4i64(
-; CHECK-NEXT: .param .align 32 .b8 test_v4i64_param_0[32]
-; CHECK-DAG: ld.param.v2.u64 {[[E2:%rd[0-9]+]], [[E3:%rd[0-9]+]]}, [test_v4i64_param_0+16];
-; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v4i64_param_0];
-; CHECK: .param .align 32 .b8 param0[32];
-; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.v2.b64 [param0+16], {[[E2]], [[E3]]};
-; CHECK: .param .align 32 .b8 retval0[32];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_v4i64,
-; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0];
-; CHECK: ld.param.v2.b64 {[[RE2:%rd[0-9]+]], [[RE3:%rd[0-9]+]]}, [retval0+16];
-; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[RE2]], [[RE3]]};
-; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK-NEXT: ret;
-define <4 x i64> @test_v4i64(<4 x i64> %a) {
- %r = tail call <4 x i64> @test_v4i64(<4 x i64> %a);
- ret <4 x i64> %r;
-}
-
-; Aggregates, on the other hand, do not get extended.
-
-; CHECK: .func (.param .align 1 .b8 func_retval0[1])
-; CHECK-LABEL: test_s_i1(
-; CHECK-NEXT: .align 1 .b8 test_s_i1_param_0[1]
-; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i1_param_0];
-; CHECK: .param .align 1 .b8 param0[1];
-; CHECK: st.param.b8 [param0+0], [[A]]
-; CHECK: .param .align 1 .b8 retval0[1];
-; CHECK: call.uni
-; CHECK-NEXT: test_s_i1,
-; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0];
-; CHECK: st.param.b8 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define %s_i1 @test_s_i1(%s_i1 %a) {
- %r = tail call %s_i1 @test_s_i1(%s_i1 %a);
- ret %s_i1 %r;
-}
-
-; CHECK: .func (.param .align 1 .b8 func_retval0[1])
-; CHECK-LABEL: test_s_i8(
-; CHECK-NEXT: .param .align 1 .b8 test_s_i8_param_0[1]
-; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i8_param_0];
-; CHECK: .param .align 1 .b8 param0[1];
-; CHECK: st.param.b8 [param0+0], [[A]]
-; CHECK: .param .align 1 .b8 retval0[1];
-; CHECK: call.uni
-; CHECK-NEXT: test_s_i8,
-; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0];
-; CHECK: st.param.b8 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define %s_i8 @test_s_i8(%s_i8 %a) {
- %r = tail call %s_i8 @test_s_i8(%s_i8 %a);
- ret %s_i8 %r;
-}
-
-; CHECK: .func (.param .align 2 .b8 func_retval0[2])
-; CHECK-LABEL: test_s_i16(
-; CHECK-NEXT: .param .align 2 .b8 test_s_i16_param_0[2]
-; CHECK: ld.param.u16 [[A:%rs[0-9]+]], [test_s_i16_param_0];
-; CHECK: .param .align 2 .b8 param0[2];
-; CHECK: st.param.b16 [param0+0], [[A]]
-; CHECK: .param .align 2 .b8 retval0[2];
-; CHECK: call.uni
-; CHECK-NEXT: test_s_i16,
-; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define %s_i16 @test_s_i16(%s_i16 %a) {
- %r = tail call %s_i16 @test_s_i16(%s_i16 %a);
- ret %s_i16 %r;
-}
-
-; CHECK: .func (.param .align 2 .b8 func_retval0[2])
-; CHECK-LABEL: test_s_f16(
-; CHECK-NEXT: .param .align 2 .b8 test_s_f16_param_0[2]
-; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_s_f16_param_0];
-; CHECK: .param .align 2 .b8 param0[2];
-; CHECK: st.param.b16 [param0+0], [[A]]
-; CHECK: .param .align 2 .b8 retval0[2];
-; CHECK: call.uni
-; CHECK-NEXT: test_s_f16,
-; CHECK: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
-; CHECK: st.param.b16 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define %s_f16 @test_s_f16(%s_f16 %a) {
- %r = tail call %s_f16 @test_s_f16(%s_f16 %a);
- ret %s_f16 %r;
-}
-
-; CHECK: .func (.param .align 4 .b8 func_retval0[4])
-; CHECK-LABEL: test_s_i32(
-; CHECK-NEXT: .param .align 4 .b8 test_s_i32_param_0[4]
-; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_s_i32_param_0];
-; CHECK: .param .align 4 .b8 param0[4]
-; CHECK: st.param.b32 [param0+0], [[E]];
-; CHECK: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_s_i32,
-; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0];
-; CHECK: st.param.b32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define %s_i32 @test_s_i32(%s_i32 %a) {
- %r = tail call %s_i32 @test_s_i32(%s_i32 %a);
- ret %s_i32 %r;
-}
-
-; CHECK: .func (.param .align 4 .b8 func_retval0[4])
-; CHECK-LABEL: test_s_f32(
-; CHECK-NEXT: .param .align 4 .b8 test_s_f32_param_0[4]
-; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_s_f32_param_0];
-; CHECK: .param .align 4 .b8 param0[4]
-; CHECK: st.param.f32 [param0+0], [[E]];
-; CHECK: .param .align 4 .b8 retval0[4];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_s_f32,
-; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0];
-; CHECK: st.param.f32 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define %s_f32 @test_s_f32(%s_f32 %a) {
- %r = tail call %s_f32 @test_s_f32(%s_f32 %a);
- ret %s_f32 %r;
-}
-
-; CHECK: .func (.param .align 8 .b8 func_retval0[8])
-; CHECK-LABEL: test_s_i64(
-; CHECK-NEXT: .param .align 8 .b8 test_s_i64_param_0[8]
-; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_s_i64_param_0];
-; CHECK: .param .align 8 .b8 param0[8];
-; CHECK: st.param.b64 [param0+0], [[E]];
-; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_s_i64,
-; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0];
-; CHECK: st.param.b64 [func_retval0+0], [[R]];
-; CHECK-NEXT: ret;
-define %s_i64 @test_s_i64(%s_i64 %a) {
- %r = tail call %s_i64 @test_s_i64(%s_i64 %a);
- ret %s_i64 %r;
-}
-
-; Fields that have different types, but identical sizes are not vectorized.
-; CHECK: .func (.param .align 8 .b8 func_retval0[24])
-; CHECK-LABEL: test_s_i32f32(
-; CHECK: .param .align 8 .b8 test_s_i32f32_param_0[24]
-; CHECK-DAG: ld.param.u64 [[E4:%rd[0-9]+]], [test_s_i32f32_param_0+16];
-; CHECK-DAG: ld.param.f32 [[E3:%f[0-9]+]], [test_s_i32f32_param_0+12];
-; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [test_s_i32f32_param_0+8];
-; CHECK-DAG: ld.param.f32 [[E1:%f[0-9]+]], [test_s_i32f32_param_0+4];
-; CHECK-DAG: ld.param.u32 [[E0:%r[0-9]+]], [test_s_i32f32_param_0];
-; CHECK: .param .align 8 .b8 param0[24];
-; CHECK-DAG: st.param.b32 [param0+0], [[E0]];
-; CHECK-DAG: st.param.f32 [param0+4], [[E1]];
-; CHECK-DAG: st.param.b32 [param0+8], [[E2]];
-; CHECK-DAG: st.param.f32 [param0+12], [[E3]];
-; CHECK-DAG: st.param.b64 [param0+16], [[E4]];
-; CHECK: .param .align 8 .b8 retval0[24];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_s_i32f32,
-; CHECK-DAG: ld.param.b32 [[RE0:%r[0-9]+]], [retval0+0];
-; CHECK-DAG: ld.param.f32 [[RE1:%f[0-9]+]], [retval0+4];
-; CHECK-DAG: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
-; CHECK-DAG: ld.param.f32 [[RE3:%f[0-9]+]], [retval0+12];
-; CHECK-DAG: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16];
-; CHECK-DAG: st.param.b32 [func_retval0+0], [[RE0]];
-; CHECK-DAG: st.param.f32 [func_retval0+4], [[RE1]];
-; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]];
-; CHECK-DAG: st.param.f32 [func_retval0+12], [[RE3]];
-; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE4]];
-; CHECK: ret;
-define %s_i32f32 @test_s_i32f32(%s_i32f32 %a) {
- %r = tail call %s_i32f32 @test_s_i32f32(%s_i32f32 %a);
- ret %s_i32f32 %r;
-}
-
-; We do vectorize consecutive fields with matching types.
-; CHECK:.visible .func (.param .align 8 .b8 func_retval0[24])
-; CHECK-LABEL: test_s_i32x4(
-; CHECK: .param .align 8 .b8 test_s_i32x4_param_0[24]
-; CHECK-DAG: ld.param.u64 [[RD1:%rd[0-9]+]], [test_s_i32x4_param_0+16];
-; CHECK-DAG: ld.param.v2.u32 {[[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_s_i32x4_param_0+8];
-; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i32x4_param_0];
-; CHECK: .param .align 8 .b8 param0[24];
-; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.v2.b32 [param0+8], {[[E2]], [[E3]]};
-; CHECK: st.param.b64 [param0+16], [[E4]];
-; CHECK: .param .align 8 .b8 retval0[24];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_s_i32x4,
-; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
-; CHECK: ld.param.v2.b32 {[[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+8];
-; CHECK: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16];
-; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK-DAG: st.param.v2.b32 [func_retval0+8], {[[RE2]], [[RE3]]};
-; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE4]];
-; CHECK: ret;
-
-define %s_i32x4 @test_s_i32x4(%s_i32x4 %a) {
- %r = tail call %s_i32x4 @test_s_i32x4(%s_i32x4 %a);
- ret %s_i32x4 %r;
-}
-
-; CHECK:.visible .func (.param .align 8 .b8 func_retval0[32])
-; CHECK-LABEL: test_s_i1i32x4(
-; CHECK: .param .align 8 .b8 test_s_i1i32x4_param_0[32]
-; CHECK: ld.param.u64 [[E5:%rd[0-9]+]], [test_s_i1i32x4_param_0+24];
-; CHECK: ld.param.u32 [[E4:%r[0-9]+]], [test_s_i1i32x4_param_0+16];
-; CHECK: ld.param.u32 [[E3:%r[0-9]+]], [test_s_i1i32x4_param_0+12];
-; CHECK: ld.param.u8 [[E2:%rs[0-9]+]], [test_s_i1i32x4_param_0+8];
-; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i1i32x4_param_0];
-; CHECK: .param .align 8 .b8 param0[32];
-; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.b8 [param0+8], [[E2]];
-; CHECK: st.param.b32 [param0+12], [[E3]];
-; CHECK: st.param.b32 [param0+16], [[E4]];
-; CHECK: st.param.b64 [param0+24], [[E5]];
-; CHECK: .param .align 8 .b8 retval0[32];
-; CHECK: call.uni (retval0),
-; CHECK: test_s_i1i32x4,
-; CHECK: (
-; CHECK: param0
-; CHECK: );
-; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
-; CHECK: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+8];
-; CHECK: ld.param.b32 [[RE3:%r[0-9]+]], [retval0+12];
-; CHECK: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16];
-; CHECK: ld.param.b64 [[RE5:%rd[0-9]+]], [retval0+24];
-; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK: st.param.b8 [func_retval0+8], [[RE2]];
-; CHECK: st.param.b32 [func_retval0+12], [[RE3]];
-; CHECK: st.param.b32 [func_retval0+16], [[RE4]];
-; CHECK: st.param.b64 [func_retval0+24], [[RE5]];
-; CHECK: ret;
-
-define %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a) {
- %r = tail call %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a);
- ret %s_i8i32x4 %r;
-}
-
-; -- All loads/stores from parameters aligned by one must be done one
-; -- byte at a time.
-; CHECK:.visible .func (.param .align 1 .b8 func_retval0[25])
-; CHECK-LABEL: test_s_i1i32x4p(
-; CHECK-DAG: .param .align 1 .b8 test_s_i1i32x4p_param_0[25]
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+24];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+23];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+22];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+21];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+20];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+19];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+18];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+17];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+16];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+15];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+14];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+13];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+12];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+11];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+10];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+9];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+8];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+7];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+6];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+5];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+4];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+3];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+2];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+1];
-; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0];
-; --- TODO
-; --- Unaligned parameter store/ return value load is broken in both nvcc
-; --- and llvm and needs to be fixed.
-; CHECK: .param .align 1 .b8 param0[25];
-; CHECK-DAG: st.param.b32 [param0+0],
-; CHECK-DAG: st.param.b32 [param0+4],
-; CHECK-DAG: st.param.b8 [param0+8],
-; CHECK-DAG: st.param.b32 [param0+9],
-; CHECK-DAG: st.param.b32 [param0+13],
-; CHECK-DAG: st.param.b64 [param0+17],
-; CHECK: .param .align 1 .b8 retval0[25];
-; CHECK: call.uni (retval0),
-; CHECK-NEXT: test_s_i1i32x4p,
-; CHECK-DAG: ld.param.b32 %r41, [retval0+0];
-; CHECK-DAG: ld.param.b32 %r42, [retval0+4];
-; CHECK-DAG: ld.param.b8 %rs2, [retval0+8];
-; CHECK-DAG: ld.param.b32 %r43, [retval0+9];
-; CHECK-DAG: ld.param.b32 %r44, [retval0+13];
-; CHECK-DAG: ld.param.b64 %rd23, [retval0+17];
-; CHECK-DAG: st.param.b32 [func_retval0+0],
-; CHECK-DAG: st.param.b32 [func_retval0+4],
-; CHECK-DAG: st.param.b8 [func_retval0+8],
-; CHECK-DAG: st.param.b32 [func_retval0+9],
-; CHECK-DAG: st.param.b32 [func_retval0+13],
-; CHECK-DAG: st.param.b64 [func_retval0+17],
-
-define %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a) {
- %r = tail call %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a);
- ret %s_i8i32x4p %r;
-}
-
-; Check that we can vectorize loads that span multiple aggregate fields.
-; CHECK:.visible .func (.param .align 16 .b8 func_retval0[80])
-; CHECK-LABEL: test_s_crossfield(
-; CHECK: .param .align 16 .b8 test_s_crossfield_param_0[80]
-; CHECK: ld.param.u32 [[E15:%r[0-9]+]], [test_s_crossfield_param_0+64];
-; CHECK: ld.param.v4.u32 {[[E11:%r[0-9]+]], [[E12:%r[0-9]+]], [[E13:%r[0-9]+]], [[E14:%r[0-9]+]]}, [test_s_crossfield_param_0+48];
-; CHECK: ld.param.v4.u32 {[[E7:%r[0-9]+]], [[E8:%r[0-9]+]], [[E9:%r[0-9]+]], [[E10:%r[0-9]+]]}, [test_s_crossfield_param_0+32];
-; CHECK: ld.param.v4.u32 {[[E3:%r[0-9]+]], [[E4:%r[0-9]+]], [[E5:%r[0-9]+]], [[E6:%r[0-9]+]]}, [test_s_crossfield_param_0+16];
-; CHECK: ld.param.u32 [[E2:%r[0-9]+]], [test_s_crossfield_param_0+8];
-; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_crossfield_param_0];
-; CHECK: .param .align 16 .b8 param0[80];
-; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
-; CHECK: st.param.b32 [param0+8], [[E2]];
-; CHECK: st.param.v4.b32 [param0+16], {[[E3]], [[E4]], [[E5]], [[E6]]};
-; CHECK: st.param.v4.b32 [param0+32], {[[E7]], [[E8]], [[E9]], [[E10]]};
-; CHECK: st.param.v4.b32 [param0+48], {[[E11]], [[E12]], [[E13]], [[E14]]};
-; CHECK: st.param.b32 [param0+64], [[E15]];
-; CHECK: .param .align 16 .b8 retval0[80];
-; CHECK: call.uni (retval0),
-; CHECK: test_s_crossfield,
-; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
-; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
-; CHECK: ld.param.v4.b32 {[[RE3:%r[0-9]+]], [[RE4:%r[0-9]+]], [[RE5:%r[0-9]+]], [[RE6:%r[0-9]+]]}, [retval0+16];
-; CHECK: ld.param.v4.b32 {[[RE7:%r[0-9]+]], [[RE8:%r[0-9]+]], [[RE9:%r[0-9]+]], [[RE10:%r[0-9]+]]}, [retval0+32];
-; CHECK: ld.param.v4.b32 {[[RE11:%r[0-9]+]], [[RE12:%r[0-9]+]], [[RE13:%r[0-9]+]], [[RE14:%r[0-9]+]]}, [retval0+48];
-; CHECK: ld.param.b32 [[RE15:%r[0-9]+]], [retval0+64];
-; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
-; CHECK: st.param.b32 [func_retval0+8], [[RE2]];
-; CHECK: st.param.v4.b32 [func_retval0+16], {[[RE3]], [[RE4]], [[RE5]], [[RE6]]};
-; CHECK: st.param.v4.b32 [func_retval0+32], {[[RE7]], [[RE8]], [[RE9]], [[RE10]]};
-; CHECK: st.param.v4.b32 [func_retval0+48], {[[RE11]], [[RE12]], [[RE13]], [[RE14]]};
-; CHECK: st.param.b32 [func_retval0+64], [[RE15]];
-; CHECK: ret;
-
-define %s_crossfield @test_s_crossfield(%s_crossfield %a) {
- %r = tail call %s_crossfield @test_s_crossfield(%s_crossfield %a);
- ret %s_crossfield %r;
-}
+; Verifies correctness of load/store of parameters and return values.
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 -verify-machineinstrs | FileCheck %s
+
+%s_i1 = type { i1 }
+%s_i8 = type { i8 }
+%s_i16 = type { i16 }
+%s_f16 = type { half }
+%s_i32 = type { i32 }
+%s_f32 = type { float }
+%s_i64 = type { i64 }
+%s_f64 = type { double }
+
+; More complicated types. i64 is used to increase natural alignment
+; requirement for the type.
+%s_i32x4 = type { i32, i32, i32, i32, i64}
+%s_i32f32 = type { i32, float, i32, float, i64}
+%s_i8i32x4 = type { i32, i32, i8, i32, i32, i64}
+%s_i8i32x4p = type <{ i32, i32, i8, i32, i32, i64}>
+%s_crossfield = type { i32, [2 x i32], <4 x i32>, [3 x {i32, i32, i32}]}
+; All scalar parameters must be at least 32 bits in size.
+; i1 is loaded/stored as i8.
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i1(
+; CHECK-NEXT: .param .b32 test_i1_param_0
+; CHECK: ld.param.u8 [[A8:%r[0-9]+]], [test_i1_param_0];
+; CHECK: and.b32 [[A:%r[0-9]+]], [[A8]], 1;
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]]
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni
+; CHECK-NEXT: test_i1,
+; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R:%r[0-9]+]], [[R8]], 1;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i1 @test_i1(i1 %a) {
+ %r = tail call i1 @test_i1(i1 %a);
+ ret i1 %r;
+}
+
+; Signed i1 is a somewhat special case. We only care about one bit and
+; then us neg.s32 to convert it to 32-bit -1 if it's set.
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i1s(
+; CHECK-NEXT: .param .b32 test_i1s_param_0
+; CHECK: ld.param.u8 [[A8:%rs[0-9]+]], [test_i1s_param_0];
+; CHECK: cvt.u32.u16 [[A32:%r[0-9]+]], [[A8]];
+; CHECK: and.b32 [[A1:%r[0-9]+]], [[A32]], 1;
+; CHECK: neg.s32 [[A:%r[0-9]+]], [[A1]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni
+; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R1:%r[0-9]+]], [[R8]], 1;
+; CHECK: neg.s32 [[R:%r[0-9]+]], [[R1]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define signext i1 @test_i1s(i1 signext %a) {
+ %r = tail call signext i1 @test_i1s(i1 signext %a);
+ ret i1 %r;
+}
+
+; Make sure that i1 loads are vectorized as i8 loads, respecting each element alignment.
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v3i1(
+; CHECK-NEXT: .param .align 4 .b8 test_v3i1_param_0[4]
+; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i1_param_0+2];
+; CHECK-DAG: ld.param.v2.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i1_param_0]
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK-DAG: st.param.v2.b8 [param0+0], {[[E0]], [[E1]]};
+; CHECK-DAG: st.param.b8 [param0+2], [[E2]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i1,
+; CHECK-DAG: ld.param.v2.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2];
+; CHECK-DAG: st.param.v2.b8 [func_retval0+0], {[[RE0]], [[RE1]]}
+; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i1> @test_v3i1(<3 x i1> %a) {
+ %r = tail call <3 x i1> @test_v3i1(<3 x i1> %a);
+ ret <3 x i1> %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v4i1(
+; CHECK-NEXT: .param .align 4 .b8 test_v4i1_param_0[4]
+; CHECK: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i1_param_0]
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK: test_v4i1,
+; CHECK: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]};
+; CHECK-NEXT: ret;
+define <4 x i1> @test_v4i1(<4 x i1> %a) {
+ %r = tail call <4 x i1> @test_v4i1(<4 x i1> %a);
+ ret <4 x i1> %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v5i1(
+; CHECK-NEXT: .param .align 8 .b8 test_v5i1_param_0[8]
+; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i1_param_0+4];
+; CHECK-DAG: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i1_param_0]
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK-DAG: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b8 [param0+4], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i1,
+; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i1> @test_v5i1(<5 x i1> %a) {
+ %r = tail call <5 x i1> @test_v5i1(<5 x i1> %a);
+ ret <5 x i1> %r;
+}
+
+; Unsigned i8 is loaded directly into 32-bit register.
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i8(
+; CHECK-NEXT: .param .b32 test_i8_param_0
+; CHECK: ld.param.u8 [[A8:%rs[0-9]+]], [test_i8_param_0];
+; CHECK: cvt.u32.u16 [[A32:%r[0-9]+]], [[A8]];
+; CHECK: and.b32 [[A:%r[0-9]+]], [[A32]], 255;
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK: test_i8,
+; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R:%r[0-9]+]], [[R32]], 255;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i8 @test_i8(i8 %a) {
+ %r = tail call i8 @test_i8(i8 %a);
+ ret i8 %r;
+}
+
+; signed i8 is loaded into 16-bit register which is then sign-extended to i32.
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i8s(
+; CHECK-NEXT: .param .b32 test_i8s_param_0
+; CHECK: ld.param.s8 [[A8:%rs[0-9]+]], [test_i8s_param_0];
+; CHECK: cvt.s32.s16 [[A:%r[0-9]+]], [[A8]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK: test_i8s,
+; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0];
+; -- This is suspicious (though correct) -- why not cvt.u8.u32, cvt.s8.s32 ?
+; CHECK: cvt.u16.u32 [[R16:%rs[0-9]+]], [[R32]];
+; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[R16]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define signext i8 @test_i8s(i8 signext %a) {
+ %r = tail call signext i8 @test_i8s(i8 signext %a);
+ ret i8 %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v3i8(
+; CHECK-NEXT: .param .align 4 .b8 test_v3i8_param_0[4]
+; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i8_param_0+2];
+; CHECK-DAG: ld.param.v2.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i8_param_0];
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.v2.b8 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b8 [param0+2], [[E2]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i8,
+; CHECK-DAG: ld.param.v2.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2];
+; CHECK-DAG: st.param.v2.b8 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i8> @test_v3i8(<3 x i8> %a) {
+ %r = tail call <3 x i8> @test_v3i8(<3 x i8> %a);
+ ret <3 x i8> %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v4i8(
+; CHECK-NEXT: .param .align 4 .b8 test_v4i8_param_0[4]
+; CHECK: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i8_param_0]
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i8,
+; CHECK: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-NEXT: ret;
+define <4 x i8> @test_v4i8(<4 x i8> %a) {
+ %r = tail call <4 x i8> @test_v4i8(<4 x i8> %a);
+ ret <4 x i8> %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v5i8(
+; CHECK-NEXT: .param .align 8 .b8 test_v5i8_param_0[8]
+; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i8_param_0+4];
+; CHECK-DAG ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i8_param_0]
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK-DAG: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b8 [param0+4], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i8,
+; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i8> @test_v5i8(<5 x i8> %a) {
+ %r = tail call <5 x i8> @test_v5i8(<5 x i8> %a);
+ ret <5 x i8> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i16(
+; CHECK-NEXT: .param .b32 test_i16_param_0
+; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16_param_0];
+; CHECK: cvt.u32.u16 [[E32:%r[0-9]+]], [[E16]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[E32]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i16,
+; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R:%r[0-9]+]], [[RE32]], 65535;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i16 @test_i16(i16 %a) {
+ %r = tail call i16 @test_i16(i16 %a);
+ ret i16 %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i16s(
+; CHECK-NEXT: .param .b32 test_i16s_param_0
+; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16s_param_0];
+; CHECK: cvt.s32.s16 [[E32:%r[0-9]+]], [[E16]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[E32]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i16s,
+; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0];
+; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[RE32]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define signext i16 @test_i16s(i16 signext %a) {
+ %r = tail call signext i16 @test_i16s(i16 signext %a);
+ ret i16 %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v3i16(
+; CHECK-NEXT: .param .align 8 .b8 test_v3i16_param_0[8]
+; CHECK-DAG: ld.param.u16 [[E2:%rs[0-9]+]], [test_v3i16_param_0+4];
+; CHECK-DAG: ld.param.v2.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i16_param_0];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b16 [param0+4], [[E2]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i16,
+; CHECK: ld.param.v2.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b16 [[RE2:%rs[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b16 [func_retval0+4], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i16> @test_v3i16(<3 x i16> %a) {
+ %r = tail call <3 x i16> @test_v3i16(<3 x i16> %a);
+ ret <3 x i16> %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v4i16(
+; CHECK-NEXT: .param .align 8 .b8 test_v4i16_param_0[8]
+; CHECK: ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i16_param_0]
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i16,
+; CHECK: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-NEXT: ret;
+define <4 x i16> @test_v4i16(<4 x i16> %a) {
+ %r = tail call <4 x i16> @test_v4i16(<4 x i16> %a);
+ ret <4 x i16> %r;
+}
+
+; CHECK: .func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v5i16(
+; CHECK-NEXT: .param .align 16 .b8 test_v5i16_param_0[16]
+; CHECK-DAG: ld.param.u16 [[E4:%rs[0-9]+]], [test_v5i16_param_0+8];
+; CHECK-DAG ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i16_param_0]
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b16 [param0+8], [[E4]];
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i16,
+; CHECK-DAG: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b16 [[RE4:%rs[0-9]+]], [retval0+8];
+; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b16 [func_retval0+8], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i16> @test_v5i16(<5 x i16> %a) {
+ %r = tail call <5 x i16> @test_v5i16(<5 x i16> %a);
+ ret <5 x i16> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_f16(
+; CHECK-NEXT: .param .b32 test_f16_param_0
+; CHECK: ld.param.b16 [[E:%h[0-9]+]], [test_f16_param_0];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b16 [param0+0], [[E]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_f16,
+; CHECK: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK: st.param.b16 [func_retval0+0], [[R]]
+; CHECK-NEXT: ret;
+define half @test_f16(half %a) {
+ %r = tail call half @test_f16(half %a);
+ ret half %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v2f16(
+; CHECK-NEXT: .param .align 4 .b8 test_v2f16_param_0[4]
+; CHECK: ld.param.b32 [[E:%hh[0-9]+]], [test_v2f16_param_0];
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.b32 [param0+0], [[E]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v2f16,
+; CHECK: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK: st.param.b32 [func_retval0+0], [[R]]
+; CHECK-NEXT: ret;
+define <2 x half> @test_v2f16(<2 x half> %a) {
+ %r = tail call <2 x half> @test_v2f16(<2 x half> %a);
+ ret <2 x half> %r;
+}
+
+; CHECK:.func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v3f16(
+; CHECK: .param .align 8 .b8 test_v3f16_param_0[8]
+; CHECK-DAG: ld.param.b32 [[HH01:%hh[0-9]+]], [test_v3f16_param_0];
+; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[HH01]];
+; CHECK-DAG: ld.param.b16 [[E2:%h[0-9]+]], [test_v3f16_param_0+4];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK-DAG: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]};
+; CHECK-DAG: st.param.b16 [param0+4], [[E2]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK: test_v3f16,
+; CHECK-DAG: ld.param.v2.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b16 [[R2:%h[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-DAG: st.param.b16 [func_retval0+4], [[R2]];
+; CHECK: ret;
+define <3 x half> @test_v3f16(<3 x half> %a) {
+ %r = tail call <3 x half> @test_v3f16(<3 x half> %a);
+ ret <3 x half> %r;
+}
+
+; CHECK:.func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v4f16(
+; CHECK: .param .align 8 .b8 test_v4f16_param_0[8]
+; CHECK: ld.param.v2.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]]}, [test_v4f16_param_0];
+; CHECK-DAG: mov.b32 [[HH01:%hh[0-9]+]], [[R01]];
+; CHECK-DAG: mov.b32 [[HH23:%hh[0-9]+]], [[R23]];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.v2.b32 [param0+0], {[[HH01]], [[HH23]]};
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK: test_v4f16,
+; CHECK: ld.param.v2.b32 {[[RH01:%hh[0-9]+]], [[RH23:%hh[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[RH01]], [[RH23]]};
+; CHECK: ret;
+define <4 x half> @test_v4f16(<4 x half> %a) {
+ %r = tail call <4 x half> @test_v4f16(<4 x half> %a);
+ ret <4 x half> %r;
+}
+
+; CHECK:.func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v5f16(
+; CHECK: .param .align 16 .b8 test_v5f16_param_0[16]
+; CHECK-DAG: ld.param.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [test_v5f16_param_0];
+; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[HH01]];
+; CHECK-DAG: ld.param.b16 [[E4:%h[0-9]+]], [test_v5f16_param_0+8];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v4.b16 [param0+0],
+; CHECK-DAG: st.param.b16 [param0+8], [[E4]];
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK: test_v5f16,
+; CHECK-DAG: ld.param.v4.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]], [[R2:%h[0-9]+]], [[R3:%h[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b16 [[R4:%h[0-9]+]], [retval0+8];
+; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]};
+; CHECK-DAG: st.param.b16 [func_retval0+8], [[R4]];
+; CHECK: ret;
+define <5 x half> @test_v5f16(<5 x half> %a) {
+ %r = tail call <5 x half> @test_v5f16(<5 x half> %a);
+ ret <5 x half> %r;
+}
+
+; CHECK:.func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v8f16(
+; CHECK: .param .align 16 .b8 test_v8f16_param_0[16]
+; CHECK: ld.param.v4.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]], [[R45:%r[0-9]+]], [[R67:%r[0-9]+]]}, [test_v8f16_param_0];
+; CHECK-DAG: mov.b32 [[HH01:%hh[0-9]+]], [[R01]];
+; CHECK-DAG: mov.b32 [[HH23:%hh[0-9]+]], [[R23]];
+; CHECK-DAG: mov.b32 [[HH45:%hh[0-9]+]], [[R45]];
+; CHECK-DAG: mov.b32 [[HH67:%hh[0-9]+]], [[R67]];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK: st.param.v4.b32 [param0+0], {[[HH01]], [[HH23]], [[HH45]], [[HH67]]};
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK: test_v8f16,
+; CHECK: ld.param.v4.b32 {[[RH01:%hh[0-9]+]], [[RH23:%hh[0-9]+]], [[RH45:%hh[0-9]+]], [[RH67:%hh[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b32 [func_retval0+0], {[[RH01]], [[RH23]], [[RH45]], [[RH67]]};
+; CHECK: ret;
+define <8 x half> @test_v8f16(<8 x half> %a) {
+ %r = tail call <8 x half> @test_v8f16(<8 x half> %a);
+ ret <8 x half> %r;
+}
+
+; CHECK:.func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v9f16(
+; CHECK: .param .align 32 .b8 test_v9f16_param_0[32]
+; CHECK-DAG: ld.param.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [test_v9f16_param_0];
+; CHECK-DAG: ld.param.v4.b16 {[[E4:%h[0-9]+]], [[E5:%h[0-9]+]], [[E6:%h[0-9]+]], [[E7:%h[0-9]+]]}, [test_v9f16_param_0+8];
+; CHECK-DAG: ld.param.b16 [[E8:%h[0-9]+]], [test_v9f16_param_0+16];
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK-DAG: st.param.v4.b16 [param0+0],
+; CHECK-DAG: st.param.v4.b16 [param0+8],
+; CHECK-DAG: st.param.b16 [param0+16], [[E8]];
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK: test_v9f16,
+; CHECK-DAG: ld.param.v4.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]], [[R2:%h[0-9]+]], [[R3:%h[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.v4.b16 {[[R4:%h[0-9]+]], [[R5:%h[0-9]+]], [[R6:%h[0-9]+]], [[R7:%h[0-9]+]]}, [retval0+8];
+; CHECK-DAG: ld.param.b16 [[R8:%h[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]};
+; CHECK-DAG: st.param.v4.b16 [func_retval0+8], {[[R4]], [[R5]], [[R6]], [[R7]]};
+; CHECK-DAG: st.param.b16 [func_retval0+16], [[R8]];
+; CHECK: ret;
+define <9 x half> @test_v9f16(<9 x half> %a) {
+ %r = tail call <9 x half> @test_v9f16(<9 x half> %a);
+ ret <9 x half> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i32(
+; CHECK-NEXT: .param .b32 test_i32_param_0
+; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_i32_param_0];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[E]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i32,
+; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i32 @test_i32(i32 %a) {
+ %r = tail call i32 @test_i32(i32 %a);
+ ret i32 %r;
+}
+
+; CHECK: .func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v3i32(
+; CHECK-NEXT: .param .align 16 .b8 test_v3i32_param_0[16]
+; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [test_v3i32_param_0+8];
+; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_v3i32_param_0];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b32 [param0+8], [[E2]];
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i32,
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
+; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i32> @test_v3i32(<3 x i32> %a) {
+ %r = tail call <3 x i32> @test_v3i32(<3 x i32> %a);
+ ret <3 x i32> %r;
+}
+
+; CHECK: .func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v4i32(
+; CHECK-NEXT: .param .align 16 .b8 test_v4i32_param_0[16]
+; CHECK: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v4i32_param_0]
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i32,
+; CHECK: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHCK-NEXT: ret;
+define <4 x i32> @test_v4i32(<4 x i32> %a) {
+ %r = tail call <4 x i32> @test_v4i32(<4 x i32> %a);
+ ret <4 x i32> %r;
+}
+
+; CHECK: .func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v5i32(
+; CHECK-NEXT: .param .align 32 .b8 test_v5i32_param_0[32]
+; CHECK-DAG: ld.param.u32 [[E4:%r[0-9]+]], [test_v5i32_param_0+16];
+; CHECK-DAG ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v5i32_param_0]
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b32 [param0+16], [[E4]];
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i32,
+; CHECK-DAG: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b32 [func_retval0+16], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i32> @test_v5i32(<5 x i32> %a) {
+ %r = tail call <5 x i32> @test_v5i32(<5 x i32> %a);
+ ret <5 x i32> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_f32(
+; CHECK-NEXT: .param .b32 test_f32_param_0
+; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_f32_param_0];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.f32 [param0+0], [[E]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_f32,
+; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0];
+; CHECK: st.param.f32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define float @test_f32(float %a) {
+ %r = tail call float @test_f32(float %a);
+ ret float %r;
+}
+
+; CHECK: .func (.param .b64 func_retval0)
+; CHECK-LABEL: test_i64(
+; CHECK-NEXT: .param .b64 test_i64_param_0
+; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_i64_param_0];
+; CHECK: .param .b64 param0;
+; CHECK: st.param.b64 [param0+0], [[E]];
+; CHECK: .param .b64 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i64,
+; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i64 @test_i64(i64 %a) {
+ %r = tail call i64 @test_i64(i64 %a);
+ ret i64 %r;
+}
+
+; CHECK: .func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v3i64(
+; CHECK-NEXT: .param .align 32 .b8 test_v3i64_param_0[32]
+; CHECK-DAG: ld.param.u64 [[E2:%rd[0-9]+]], [test_v3i64_param_0+16];
+; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v3i64_param_0];
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b64 [param0+16], [[E2]];
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i64,
+; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b64 [[RE2:%rd[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]];
+; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i64> @test_v3i64(<3 x i64> %a) {
+ %r = tail call <3 x i64> @test_v3i64(<3 x i64> %a);
+ ret <3 x i64> %r;
+}
+
+; For i64 vector loads are limited by PTX to 2 elements.
+; CHECK: .func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v4i64(
+; CHECK-NEXT: .param .align 32 .b8 test_v4i64_param_0[32]
+; CHECK-DAG: ld.param.v2.u64 {[[E2:%rd[0-9]+]], [[E3:%rd[0-9]+]]}, [test_v4i64_param_0+16];
+; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v4i64_param_0];
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.v2.b64 [param0+16], {[[E2]], [[E3]]};
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i64,
+; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.v2.b64 {[[RE2:%rd[0-9]+]], [[RE3:%rd[0-9]+]]}, [retval0+16];
+; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[RE2]], [[RE3]]};
+; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-NEXT: ret;
+define <4 x i64> @test_v4i64(<4 x i64> %a) {
+ %r = tail call <4 x i64> @test_v4i64(<4 x i64> %a);
+ ret <4 x i64> %r;
+}
+
+; Aggregates, on the other hand, do not get extended.
+
+; CHECK: .func (.param .align 1 .b8 func_retval0[1])
+; CHECK-LABEL: test_s_i1(
+; CHECK-NEXT: .align 1 .b8 test_s_i1_param_0[1]
+; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i1_param_0];
+; CHECK: .param .align 1 .b8 param0[1];
+; CHECK: st.param.b8 [param0+0], [[A]]
+; CHECK: .param .align 1 .b8 retval0[1];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_i1,
+; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0];
+; CHECK: st.param.b8 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i1 @test_s_i1(%s_i1 %a) {
+ %r = tail call %s_i1 @test_s_i1(%s_i1 %a);
+ ret %s_i1 %r;
+}
+
+; CHECK: .func (.param .align 1 .b8 func_retval0[1])
+; CHECK-LABEL: test_s_i8(
+; CHECK-NEXT: .param .align 1 .b8 test_s_i8_param_0[1]
+; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i8_param_0];
+; CHECK: .param .align 1 .b8 param0[1];
+; CHECK: st.param.b8 [param0+0], [[A]]
+; CHECK: .param .align 1 .b8 retval0[1];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_i8,
+; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0];
+; CHECK: st.param.b8 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i8 @test_s_i8(%s_i8 %a) {
+ %r = tail call %s_i8 @test_s_i8(%s_i8 %a);
+ ret %s_i8 %r;
+}
+
+; CHECK: .func (.param .align 2 .b8 func_retval0[2])
+; CHECK-LABEL: test_s_i16(
+; CHECK-NEXT: .param .align 2 .b8 test_s_i16_param_0[2]
+; CHECK: ld.param.u16 [[A:%rs[0-9]+]], [test_s_i16_param_0];
+; CHECK: .param .align 2 .b8 param0[2];
+; CHECK: st.param.b16 [param0+0], [[A]]
+; CHECK: .param .align 2 .b8 retval0[2];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_i16,
+; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i16 @test_s_i16(%s_i16 %a) {
+ %r = tail call %s_i16 @test_s_i16(%s_i16 %a);
+ ret %s_i16 %r;
+}
+
+; CHECK: .func (.param .align 2 .b8 func_retval0[2])
+; CHECK-LABEL: test_s_f16(
+; CHECK-NEXT: .param .align 2 .b8 test_s_f16_param_0[2]
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_s_f16_param_0];
+; CHECK: .param .align 2 .b8 param0[2];
+; CHECK: st.param.b16 [param0+0], [[A]]
+; CHECK: .param .align 2 .b8 retval0[2];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_f16,
+; CHECK: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_f16 @test_s_f16(%s_f16 %a) {
+ %r = tail call %s_f16 @test_s_f16(%s_f16 %a);
+ ret %s_f16 %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_s_i32(
+; CHECK-NEXT: .param .align 4 .b8 test_s_i32_param_0[4]
+; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_s_i32_param_0];
+; CHECK: .param .align 4 .b8 param0[4]
+; CHECK: st.param.b32 [param0+0], [[E]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i32,
+; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i32 @test_s_i32(%s_i32 %a) {
+ %r = tail call %s_i32 @test_s_i32(%s_i32 %a);
+ ret %s_i32 %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_s_f32(
+; CHECK-NEXT: .param .align 4 .b8 test_s_f32_param_0[4]
+; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_s_f32_param_0];
+; CHECK: .param .align 4 .b8 param0[4]
+; CHECK: st.param.f32 [param0+0], [[E]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_f32,
+; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0];
+; CHECK: st.param.f32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_f32 @test_s_f32(%s_f32 %a) {
+ %r = tail call %s_f32 @test_s_f32(%s_f32 %a);
+ ret %s_f32 %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_s_i64(
+; CHECK-NEXT: .param .align 8 .b8 test_s_i64_param_0[8]
+; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_s_i64_param_0];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.b64 [param0+0], [[E]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i64,
+; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i64 @test_s_i64(%s_i64 %a) {
+ %r = tail call %s_i64 @test_s_i64(%s_i64 %a);
+ ret %s_i64 %r;
+}
+
+; Fields that have different types, but identical sizes are not vectorized.
+; CHECK: .func (.param .align 8 .b8 func_retval0[24])
+; CHECK-LABEL: test_s_i32f32(
+; CHECK: .param .align 8 .b8 test_s_i32f32_param_0[24]
+; CHECK-DAG: ld.param.u64 [[E4:%rd[0-9]+]], [test_s_i32f32_param_0+16];
+; CHECK-DAG: ld.param.f32 [[E3:%f[0-9]+]], [test_s_i32f32_param_0+12];
+; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [test_s_i32f32_param_0+8];
+; CHECK-DAG: ld.param.f32 [[E1:%f[0-9]+]], [test_s_i32f32_param_0+4];
+; CHECK-DAG: ld.param.u32 [[E0:%r[0-9]+]], [test_s_i32f32_param_0];
+; CHECK: .param .align 8 .b8 param0[24];
+; CHECK-DAG: st.param.b32 [param0+0], [[E0]];
+; CHECK-DAG: st.param.f32 [param0+4], [[E1]];
+; CHECK-DAG: st.param.b32 [param0+8], [[E2]];
+; CHECK-DAG: st.param.f32 [param0+12], [[E3]];
+; CHECK-DAG: st.param.b64 [param0+16], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[24];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i32f32,
+; CHECK-DAG: ld.param.b32 [[RE0:%r[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.f32 [[RE1:%f[0-9]+]], [retval0+4];
+; CHECK-DAG: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
+; CHECK-DAG: ld.param.f32 [[RE3:%f[0-9]+]], [retval0+12];
+; CHECK-DAG: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.b32 [func_retval0+0], [[RE0]];
+; CHECK-DAG: st.param.f32 [func_retval0+4], [[RE1]];
+; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]];
+; CHECK-DAG: st.param.f32 [func_retval0+12], [[RE3]];
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE4]];
+; CHECK: ret;
+define %s_i32f32 @test_s_i32f32(%s_i32f32 %a) {
+ %r = tail call %s_i32f32 @test_s_i32f32(%s_i32f32 %a);
+ ret %s_i32f32 %r;
+}
+
+; We do vectorize consecutive fields with matching types.
+; CHECK:.visible .func (.param .align 8 .b8 func_retval0[24])
+; CHECK-LABEL: test_s_i32x4(
+; CHECK: .param .align 8 .b8 test_s_i32x4_param_0[24]
+; CHECK-DAG: ld.param.u64 [[RD1:%rd[0-9]+]], [test_s_i32x4_param_0+16];
+; CHECK-DAG: ld.param.v2.u32 {[[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_s_i32x4_param_0+8];
+; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i32x4_param_0];
+; CHECK: .param .align 8 .b8 param0[24];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.v2.b32 [param0+8], {[[E2]], [[E3]]};
+; CHECK: st.param.b64 [param0+16], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[24];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i32x4,
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.v2.b32 {[[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+8];
+; CHECK: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.v2.b32 [func_retval0+8], {[[RE2]], [[RE3]]};
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE4]];
+; CHECK: ret;
+
+define %s_i32x4 @test_s_i32x4(%s_i32x4 %a) {
+ %r = tail call %s_i32x4 @test_s_i32x4(%s_i32x4 %a);
+ ret %s_i32x4 %r;
+}
+
+; CHECK:.visible .func (.param .align 8 .b8 func_retval0[32])
+; CHECK-LABEL: test_s_i1i32x4(
+; CHECK: .param .align 8 .b8 test_s_i1i32x4_param_0[32]
+; CHECK: ld.param.u64 [[E5:%rd[0-9]+]], [test_s_i1i32x4_param_0+24];
+; CHECK: ld.param.u32 [[E4:%r[0-9]+]], [test_s_i1i32x4_param_0+16];
+; CHECK: ld.param.u32 [[E3:%r[0-9]+]], [test_s_i1i32x4_param_0+12];
+; CHECK: ld.param.u8 [[E2:%rs[0-9]+]], [test_s_i1i32x4_param_0+8];
+; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i1i32x4_param_0];
+; CHECK: .param .align 8 .b8 param0[32];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b8 [param0+8], [[E2]];
+; CHECK: st.param.b32 [param0+12], [[E3]];
+; CHECK: st.param.b32 [param0+16], [[E4]];
+; CHECK: st.param.b64 [param0+24], [[E5]];
+; CHECK: .param .align 8 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK: test_s_i1i32x4,
+; CHECK: (
+; CHECK: param0
+; CHECK: );
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+8];
+; CHECK: ld.param.b32 [[RE3:%r[0-9]+]], [retval0+12];
+; CHECK: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16];
+; CHECK: ld.param.b64 [[RE5:%rd[0-9]+]], [retval0+24];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK: st.param.b8 [func_retval0+8], [[RE2]];
+; CHECK: st.param.b32 [func_retval0+12], [[RE3]];
+; CHECK: st.param.b32 [func_retval0+16], [[RE4]];
+; CHECK: st.param.b64 [func_retval0+24], [[RE5]];
+; CHECK: ret;
+
+define %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a) {
+ %r = tail call %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a);
+ ret %s_i8i32x4 %r;
+}
+
+; -- All loads/stores from parameters aligned by one must be done one
+; -- byte at a time.
+; CHECK:.visible .func (.param .align 1 .b8 func_retval0[25])
+; CHECK-LABEL: test_s_i1i32x4p(
+; CHECK-DAG: .param .align 1 .b8 test_s_i1i32x4p_param_0[25]
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+24];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+23];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+22];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+21];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+20];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+19];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+18];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+17];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+16];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+15];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+14];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+13];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+12];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+11];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+10];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+9];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+8];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+7];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+6];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+5];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+4];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+3];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+2];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+1];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0];
+; --- TODO
+; --- Unaligned parameter store/ return value load is broken in both nvcc
+; --- and llvm and needs to be fixed.
+; CHECK: .param .align 1 .b8 param0[25];
+; CHECK-DAG: st.param.b32 [param0+0],
+; CHECK-DAG: st.param.b32 [param0+4],
+; CHECK-DAG: st.param.b8 [param0+8],
+; CHECK-DAG: st.param.b32 [param0+9],
+; CHECK-DAG: st.param.b32 [param0+13],
+; CHECK-DAG: st.param.b64 [param0+17],
+; CHECK: .param .align 1 .b8 retval0[25];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i1i32x4p,
+; CHECK-DAG: ld.param.b32 %r41, [retval0+0];
+; CHECK-DAG: ld.param.b32 %r42, [retval0+4];
+; CHECK-DAG: ld.param.b8 %rs2, [retval0+8];
+; CHECK-DAG: ld.param.b32 %r43, [retval0+9];
+; CHECK-DAG: ld.param.b32 %r44, [retval0+13];
+; CHECK-DAG: ld.param.b64 %rd23, [retval0+17];
+; CHECK-DAG: st.param.b32 [func_retval0+0],
+; CHECK-DAG: st.param.b32 [func_retval0+4],
+; CHECK-DAG: st.param.b8 [func_retval0+8],
+; CHECK-DAG: st.param.b32 [func_retval0+9],
+; CHECK-DAG: st.param.b32 [func_retval0+13],
+; CHECK-DAG: st.param.b64 [func_retval0+17],
+
+define %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a) {
+ %r = tail call %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a);
+ ret %s_i8i32x4p %r;
+}
+
+; Check that we can vectorize loads that span multiple aggregate fields.
+; CHECK:.visible .func (.param .align 16 .b8 func_retval0[80])
+; CHECK-LABEL: test_s_crossfield(
+; CHECK: .param .align 16 .b8 test_s_crossfield_param_0[80]
+; CHECK: ld.param.u32 [[E15:%r[0-9]+]], [test_s_crossfield_param_0+64];
+; CHECK: ld.param.v4.u32 {[[E11:%r[0-9]+]], [[E12:%r[0-9]+]], [[E13:%r[0-9]+]], [[E14:%r[0-9]+]]}, [test_s_crossfield_param_0+48];
+; CHECK: ld.param.v4.u32 {[[E7:%r[0-9]+]], [[E8:%r[0-9]+]], [[E9:%r[0-9]+]], [[E10:%r[0-9]+]]}, [test_s_crossfield_param_0+32];
+; CHECK: ld.param.v4.u32 {[[E3:%r[0-9]+]], [[E4:%r[0-9]+]], [[E5:%r[0-9]+]], [[E6:%r[0-9]+]]}, [test_s_crossfield_param_0+16];
+; CHECK: ld.param.u32 [[E2:%r[0-9]+]], [test_s_crossfield_param_0+8];
+; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_crossfield_param_0];
+; CHECK: .param .align 16 .b8 param0[80];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b32 [param0+8], [[E2]];
+; CHECK: st.param.v4.b32 [param0+16], {[[E3]], [[E4]], [[E5]], [[E6]]};
+; CHECK: st.param.v4.b32 [param0+32], {[[E7]], [[E8]], [[E9]], [[E10]]};
+; CHECK: st.param.v4.b32 [param0+48], {[[E11]], [[E12]], [[E13]], [[E14]]};
+; CHECK: st.param.b32 [param0+64], [[E15]];
+; CHECK: .param .align 16 .b8 retval0[80];
+; CHECK: call.uni (retval0),
+; CHECK: test_s_crossfield,
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
+; CHECK: ld.param.v4.b32 {[[RE3:%r[0-9]+]], [[RE4:%r[0-9]+]], [[RE5:%r[0-9]+]], [[RE6:%r[0-9]+]]}, [retval0+16];
+; CHECK: ld.param.v4.b32 {[[RE7:%r[0-9]+]], [[RE8:%r[0-9]+]], [[RE9:%r[0-9]+]], [[RE10:%r[0-9]+]]}, [retval0+32];
+; CHECK: ld.param.v4.b32 {[[RE11:%r[0-9]+]], [[RE12:%r[0-9]+]], [[RE13:%r[0-9]+]], [[RE14:%r[0-9]+]]}, [retval0+48];
+; CHECK: ld.param.b32 [[RE15:%r[0-9]+]], [retval0+64];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK: st.param.b32 [func_retval0+8], [[RE2]];
+; CHECK: st.param.v4.b32 [func_retval0+16], {[[RE3]], [[RE4]], [[RE5]], [[RE6]]};
+; CHECK: st.param.v4.b32 [func_retval0+32], {[[RE7]], [[RE8]], [[RE9]], [[RE10]]};
+; CHECK: st.param.v4.b32 [func_retval0+48], {[[RE11]], [[RE12]], [[RE13]], [[RE14]]};
+; CHECK: st.param.b32 [func_retval0+64], [[RE15]];
+; CHECK: ret;
+
+define %s_crossfield @test_s_crossfield(%s_crossfield %a) {
+ %r = tail call %s_crossfield @test_s_crossfield(%s_crossfield %a);
+ ret %s_crossfield %r;
+}
diff --git a/test/CodeGen/NVPTX/sched1.ll b/test/CodeGen/NVPTX/sched1.ll
index fb01eb262adc..ecdf55ecdbeb 100644
--- a/test/CodeGen/NVPTX/sched1.ll
+++ b/test/CodeGen/NVPTX/sched1.ll
@@ -6,10 +6,10 @@ define void @foo(i32* %a) {
; CHECK: .func foo
; CHECK: ld.u32
; CHECK-NEXT: ld.u32
-; CHECK-NEXT: ld.u32
-; CHECK-NEXT: ld.u32
; CHECK-NEXT: add.s32
+; CHECK-NEXT: ld.u32
; CHECK-NEXT: add.s32
+; CHECK-NEXT: ld.u32
; CHECK-NEXT: add.s32
%ptr0 = getelementptr i32, i32* %a, i32 0
%val0 = load i32, i32* %ptr0
diff --git a/test/CodeGen/NVPTX/sched2.ll b/test/CodeGen/NVPTX/sched2.ll
index 91ed77878f81..347f77c5682c 100644
--- a/test/CodeGen/NVPTX/sched2.ll
+++ b/test/CodeGen/NVPTX/sched2.ll
@@ -4,12 +4,12 @@ define void @foo(<2 x i32>* %a) {
; CHECK: .func foo
; CHECK: ld.v2.u32
; CHECK-NEXT: ld.v2.u32
-; CHECK-NEXT: ld.v2.u32
-; CHECK-NEXT: ld.v2.u32
; CHECK-NEXT: add.s32
; CHECK-NEXT: add.s32
+; CHECK-NEXT: ld.v2.u32
; CHECK-NEXT: add.s32
; CHECK-NEXT: add.s32
+; CHECK-NEXT: ld.v2.u32
; CHECK-NEXT: add.s32
; CHECK-NEXT: add.s32
%ptr0 = getelementptr <2 x i32>, <2 x i32>* %a, i32 0
diff --git a/test/CodeGen/NVPTX/simple-call.ll b/test/CodeGen/NVPTX/simple-call.ll
index da6568685fe6..8ff0b5da5bcc 100644
--- a/test/CodeGen/NVPTX/simple-call.ll
+++ b/test/CodeGen/NVPTX/simple-call.ll
@@ -1,26 +1,26 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
-
-
-
-; CHECK: .func ({{.*}}) device_func
-define float @device_func(float %a) noinline {
- %ret = fmul float %a, %a
- ret float %ret
-}
-
-; CHECK: .entry kernel_func
-define void @kernel_func(float* %a) {
- %val = load float, float* %a
-; CHECK: call.uni (retval0),
-; CHECK: device_func,
- %mul = call float @device_func(float %val)
- store float %mul, float* %a
- ret void
-}
-
-
-
-!nvvm.annotations = !{!1}
-
-!1 = !{void (float*)* @kernel_func, !"kernel", i32 1}
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
+
+
+
+; CHECK: .func ({{.*}}) device_func
+define float @device_func(float %a) noinline {
+ %ret = fmul float %a, %a
+ ret float %ret
+}
+
+; CHECK: .entry kernel_func
+define void @kernel_func(float* %a) {
+ %val = load float, float* %a
+; CHECK: call.uni (retval0),
+; CHECK: device_func,
+ %mul = call float @device_func(float %val)
+ store float %mul, float* %a
+ ret void
+}
+
+
+
+!nvvm.annotations = !{!1}
+
+!1 = !{void (float*)* @kernel_func, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/vec8.ll b/test/CodeGen/NVPTX/vec8.ll
index a86ba1e29d5c..93b39c1125f8 100644
--- a/test/CodeGen/NVPTX/vec8.ll
+++ b/test/CodeGen/NVPTX/vec8.ll
@@ -7,7 +7,7 @@ define void @foo(<8 x i8> %a, i8* %b) {
; CHECK-DAG: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [foo_param_0]
; CHECK-DAG: ld.param.v4.u8 {[[E4:%rs[0-9]+]], [[E5:%rs[0-9]+]], [[E6:%rs[0-9]+]], [[E7:%rs[0-9]+]]}, [foo_param_0+4]
; CHECK-DAG: ld.param.u32 %[[B:r[0-9+]]], [foo_param_1]
-; CHECK: add.s16 [[T:%rs[0-9+]]], [[E1]], [[E6]];
+; CHECK-DAG: add.s16 [[T:%rs[0-9+]]], [[E1]], [[E6]];
; CHECK: st.u8 [%[[B]]], [[T]];
%t0 = extractelement <8 x i8> %a, i32 1
%t1 = extractelement <8 x i8> %a, i32 6
diff --git a/test/CodeGen/NVPTX/vector-call.ll b/test/CodeGen/NVPTX/vector-call.ll
index bf7b931a5758..d1ec8d25a107 100644
--- a/test/CodeGen/NVPTX/vector-call.ll
+++ b/test/CodeGen/NVPTX/vector-call.ll
@@ -1,30 +1,30 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-
-target triple = "nvptx-unknown-cuda"
-
-declare void @bar(<4 x i32>)
-
-; CHECK-LABEL: .func foo(
-; CHECK-DAG: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [foo_param_0];
-; CHECK: .param .align 16 .b8 param0[16];
-; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK: call.uni
-; CHECK: ret;
-define void @foo(<4 x i32> %a) {
- tail call void @bar(<4 x i32> %a)
- ret void
-}
-
-; CHECK-LABEL: .func foo3(
-; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [foo3_param_0];
-; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [foo3_param_0+8];
-; CHECK: .param .align 16 .b8 param0[16];
-; CHECK-DAG: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
-; CHECK-DAG: st.param.b32 [param0+8], [[E2]];
-; CHECK: call.uni
-; CHECK: ret;
-declare void @bar3(<3 x i32>)
-define void @foo3(<3 x i32> %a) {
- tail call void @bar3(<3 x i32> %a)
- ret void
-}
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | FileCheck %s
+
+target triple = "nvptx-unknown-cuda"
+
+declare void @bar(<4 x i32>)
+
+; CHECK-LABEL: .func foo(
+; CHECK-DAG: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [foo_param_0];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: call.uni
+; CHECK: ret;
+define void @foo(<4 x i32> %a) {
+ tail call void @bar(<4 x i32> %a)
+ ret void
+}
+
+; CHECK-LABEL: .func foo3(
+; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [foo3_param_0];
+; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [foo3_param_0+8];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK-DAG: st.param.b32 [param0+8], [[E2]];
+; CHECK: call.uni
+; CHECK: ret;
+declare void @bar3(<3 x i32>)
+define void @foo3(<3 x i32> %a) {
+ tail call void @bar3(<3 x i32> %a)
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/zeroext-32bit.ll b/test/CodeGen/NVPTX/zeroext-32bit.ll
index c2f0ec4b1447..bcfd987b4a66 100644
--- a/test/CodeGen/NVPTX/zeroext-32bit.ll
+++ b/test/CodeGen/NVPTX/zeroext-32bit.ll
@@ -1,26 +1,26 @@
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_30 | FileCheck %s
-
-; The zeroext attribute below should be silently ignored because
-; we can pass a 32-bit integer across a function call without
-; needing to extend it.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
-target triple = "nvptx64-unknown-cuda"
-
-; CHECK-LABEL: .visible .func zeroext_test
-; CHECK-NOT: cvt.u32.u16
-define void @zeroext_test() {
- tail call void @call1(i32 zeroext 0)
- ret void
-}
-
-declare void @call1(i32 zeroext)
-
-; CHECK-LABEL: .visible .func signext_test
-; CHECK-NOT: cvt.s32.s16
-define void @signext_test() {
- tail call void @call2(i32 zeroext 0)
- ret void
-}
-
-declare void @call2(i32 zeroext)
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_30 -verify-machineinstrs | FileCheck %s
+
+; The zeroext attribute below should be silently ignored because
+; we can pass a 32-bit integer across a function call without
+; needing to extend it.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-unknown-cuda"
+
+; CHECK-LABEL: .visible .func zeroext_test
+; CHECK-NOT: cvt.u32.u16
+define void @zeroext_test() {
+ tail call void @call1(i32 zeroext 0)
+ ret void
+}
+
+declare void @call1(i32 zeroext)
+
+; CHECK-LABEL: .visible .func signext_test
+; CHECK-NOT: cvt.s32.s16
+define void @signext_test() {
+ tail call void @call2(i32 zeroext 0)
+ ret void
+}
+
+declare void @call2(i32 zeroext)
diff --git a/test/CodeGen/PowerPC/mtvsrdd.ll b/test/CodeGen/PowerPC/mtvsrdd.ll
new file mode 100644
index 000000000000..1d6a3553b2a1
--- /dev/null
+++ b/test/CodeGen/PowerPC/mtvsrdd.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mcpu=pwr9 -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown \
+; RUN: < %s | FileCheck %s
+
+; This test case checks r0 is used as constant 0 in instruction mtvsrdd.
+
+define <2 x i64> @const0(i64 %a) {
+ %vecinit = insertelement <2 x i64> undef, i64 %a, i32 0
+ %vecinit1 = insertelement <2 x i64> %vecinit, i64 0, i32 1
+ ret <2 x i64> %vecinit1
+; CHECK-LABEL: const0
+; CHECK: mtvsrdd v2, 0, r3
+}
+
+define <2 x i64> @noconst0(i64* %a, i64* %b) {
+ %1 = load i64, i64* %a, align 8
+ %2 = load i64, i64* %b, align 8
+ %vecinit = insertelement <2 x i64> undef, i64 %2, i32 0
+ %vecinit1 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
+ ret <2 x i64> %vecinit1
+; CHECK-LABEL: noconst0
+; CHECK: mtvsrdd v2, {{r[0-9]+}}, {{r[0-9]+}}
+}
diff --git a/test/CodeGen/PowerPC/setcc-logic.ll b/test/CodeGen/PowerPC/setcc-logic.ll
index 2ed08e2ae380..a5a86f101a94 100644
--- a/test/CodeGen/PowerPC/setcc-logic.ll
+++ b/test/CodeGen/PowerPC/setcc-logic.ll
@@ -6,7 +6,7 @@ define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) {
; CHECK: # BB#0:
; CHECK-NEXT: or 3, 3, 4
; CHECK-NEXT: cntlzw 3, 3
-; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: srwi 3, 3, 5
; CHECK-NEXT: blr
%a = icmp eq i32 %P, 0
%b = icmp eq i32 %Q, 0
@@ -30,11 +30,11 @@ define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) {
define zeroext i1 @all_bits_set(i32 %P, i32 %Q) {
; CHECK-LABEL: all_bits_set:
; CHECK: # BB#0:
+; CHECK-NEXT: li 5, -1
; CHECK-NEXT: and 3, 3, 4
-; CHECK-NEXT: li 5, 0
-; CHECK-NEXT: li 12, 1
-; CHECK-NEXT: cmpwi 0, 3, -1
-; CHECK-NEXT: isel 3, 12, 5, 2
+; CHECK-NEXT: xor 3, 3, 5
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: srwi 3, 3, 5
; CHECK-NEXT: blr
%a = icmp eq i32 %P, -1
%b = icmp eq i32 %Q, -1
@@ -437,7 +437,7 @@ define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 z
; CHECK-NEXT: xor 3, 3, 4
; CHECK-NEXT: or 3, 3, 5
; CHECK-NEXT: cntlzw 3, 3
-; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: srwi 3, 3, 5
; CHECK-NEXT: blr
%cmp1 = icmp eq i16 %a, %b
%cmp2 = icmp eq i16 %c, %d
diff --git a/test/CodeGen/PowerPC/stackmap-frame-setup.ll b/test/CodeGen/PowerPC/stackmap-frame-setup.ll
index b5f1d4cfe4bc..b677b8be2966 100644
--- a/test/CodeGen/PowerPC/stackmap-frame-setup.ll
+++ b/test/CodeGen/PowerPC/stackmap-frame-setup.ll
@@ -7,11 +7,11 @@ entry:
store i64 11, i64* %metadata
store i64 12, i64* %metadata
store i64 13, i64* %metadata
-; ISEL: ADJCALLSTACKDOWN 0, implicit-def
+; ISEL: ADJCALLSTACKDOWN 0, 0, implicit-def
; ISEL-NEXT: STACKMAP
; ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
-; FAST-ISEL: ADJCALLSTACKDOWN 0, implicit-def
+; FAST-ISEL: ADJCALLSTACKDOWN 0, 0, implicit-def
; FAST-ISEL-NEXT: STACKMAP
; FAST-ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
ret void
diff --git a/test/CodeGen/PowerPC/tail-dup-layout.ll b/test/CodeGen/PowerPC/tail-dup-layout.ll
index c9b5bf8c9eeb..9665901e874f 100644
--- a/test/CodeGen/PowerPC/tail-dup-layout.ll
+++ b/test/CodeGen/PowerPC/tail-dup-layout.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O2 < %s | FileCheck %s
+; RUN: llc -O2 -o - %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-O2 %s
+; RUN: llc -O3 -o - %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-O3 %s
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-grtev4-linux-gnu"
@@ -99,11 +100,9 @@ exit:
; test1
; test2
; test3
-; test4
; optional1
; optional2
; optional3
-; optional4
; exit
; even for 50/50 branches.
; Tail duplication puts test n+1 at the end of optional n
@@ -163,6 +162,98 @@ exit:
}
; Intended layout:
+; The chain-of-triangles based duplicating produces the layout when 3
+; instructions are allowed for tail-duplication.
+; test1
+; test2
+; test3
+; optional1
+; optional2
+; optional3
+; exit
+;
+; Otherwise it produces the layout:
+; test1
+; optional1
+; test2
+; optional2
+; test3
+; optional3
+; exit
+
+;CHECK-LABEL: straight_test_3_instr_test:
+; test1 may have been merged with entry
+;CHECK: mr [[TAGREG:[0-9]+]], 3
+;CHECK: clrlwi {{[0-9]+}}, [[TAGREG]], 30
+;CHECK-NEXT: cmplwi {{[0-9]+}}, 2
+
+;CHECK-O3-NEXT: bne 0, .[[OPT1LABEL:[_0-9A-Za-z]+]]
+;CHECK-O3-NEXT: # %test2
+;CHECK-O3-NEXT: rlwinm {{[0-9]+}}, [[TAGREG]], 0, 28, 29
+;CHECK-O3-NEXT: cmplwi {{[0-9]+}}, 8
+;CHECK-O3-NEXT: bne 0, .[[OPT2LABEL:[_0-9A-Za-z]+]]
+;CHECK-O3-NEXT: .[[TEST3LABEL:[_0-9A-Za-z]+]]: # %test3
+;CHECK-O3-NEXT: rlwinm {{[0-9]+}}, [[TAGREG]], 0, 26, 27
+;CHECK-O3-NEXT: cmplwi {{[0-9]+}}, 32
+;CHECK-O3-NEXT: bne 0, .[[OPT3LABEL:[_0-9A-Za-z]+]]
+;CHECK-O3-NEXT: .[[EXITLABEL:[_0-9A-Za-z]+]]: # %exit
+;CHECK-O3: blr
+;CHECK-O3-NEXT: .[[OPT1LABEL]]:
+;CHECK-O3: rlwinm {{[0-9]+}}, [[TAGREG]], 0, 28, 29
+;CHECK-O3-NEXT: cmplwi {{[0-9]+}}, 8
+;CHECK-O3-NEXT: beq 0, .[[TEST3LABEL]]
+;CHECK-O3-NEXT: .[[OPT2LABEL]]:
+;CHECK-O3: rlwinm {{[0-9]+}}, [[TAGREG]], 0, 26, 27
+;CHECK-O3-NEXT: cmplwi {{[0-9]+}}, 32
+;CHECK-O3-NEXT: beq 0, .[[EXITLABEL]]
+;CHECK-O3-NEXT: .[[OPT3LABEL]]:
+;CHECK-O3: b .[[EXITLABEL]]
+
+;CHECK-O2-NEXT: beq 0, .[[TEST2LABEL:[_0-9A-Za-z]+]]
+;CHECK-O2-NEXT: # %optional1
+;CHECK-O2: .[[TEST2LABEL]]: # %test2
+;CHECK-O2-NEXT: rlwinm {{[0-9]+}}, [[TAGREG]], 0, 28, 29
+;CHECK-O2-NEXT: cmplwi {{[0-9]+}}, 8
+;CHECK-O2-NEXT: beq 0, .[[TEST3LABEL:[_0-9A-Za-z]+]]
+;CHECK-O2-NEXT: # %optional2
+;CHECK-O2: .[[TEST3LABEL]]: # %test3
+;CHECK-O2-NEXT: rlwinm {{[0-9]+}}, [[TAGREG]], 0, 26, 27
+;CHECK-O2-NEXT: cmplwi {{[0-9]+}}, 32
+;CHECK-O2-NEXT: beq 0, .[[EXITLABEL:[_0-9A-Za-z]+]]
+;CHECK-O2-NEXT: # %optional3
+;CHECK-O2: .[[EXITLABEL:[_0-9A-Za-z]+]]: # %exit
+;CHECK-O2: blr
+
+
+define void @straight_test_3_instr_test(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 3
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 2
+ br i1 %tagbit1eq0, label %test2, label %optional1, !prof !2
+optional1:
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 12
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 8
+ br i1 %tagbit2eq0, label %test3, label %optional2, !prof !2
+optional2:
+ call void @b()
+ br label %test3
+test3:
+ %tagbit3 = and i32 %tag, 48
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 32
+ br i1 %tagbit3eq0, label %exit, label %optional3, !prof !1
+optional3:
+ call void @c()
+ br label %exit
+exit:
+ ret void
+}
+
+; Intended layout:
; The chain-based outlining produces the layout
; entry
; --- Begin loop ---
diff --git a/test/CodeGen/PowerPC/testComparesieqsc.ll b/test/CodeGen/PowerPC/testComparesieqsc.ll
new file mode 100644
index 000000000000..71ad5ed34969
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesieqsc.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; ModuleID = 'ComparisonTestCases/testComparesieqsc.c'
+
+@glob = common local_unnamed_addr global i8 0, align 1
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsc(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_ieqsc:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsc_sext(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_ieqsc_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsc_z(i8 signext %a) {
+; CHECK-LABEL: test_ieqsc_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv1 = zext i1 %cmp to i32
+ ret i32 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsc_sext_z(i8 signext %a) {
+; CHECK-LABEL: test_ieqsc_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsc_store(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_ieqsc_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = zext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsc_sext_store(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_ieqsc_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = sext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsc_z_store(i8 signext %a) {
+; CHECK-LABEL: test_ieqsc_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = zext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsc_sext_z_store(i8 signext %a) {
+; CHECK-LABEL: test_ieqsc_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = sext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesieqsi.ll b/test/CodeGen/PowerPC/testComparesieqsi.ll
new file mode 100644
index 000000000000..16882dbd0045
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesieqsi.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; ModuleID = 'ComparisonTestCases/testComparesieqsi.c'
+
+@glob = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsi(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_ieqsi:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsi_sext(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_ieqsi_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsi_z(i32 signext %a) {
+; CHECK-LABEL: test_ieqsi_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqsi_sext_z(i32 signext %a) {
+; CHECK-LABEL: test_ieqsi_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsi_store(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_ieqsi_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsi_sext_store(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_ieqsi_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsi_z_store(i32 signext %a) {
+; CHECK-LABEL: test_ieqsi_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqsi_sext_z_store(i32 signext %a) {
+; CHECK-LABEL: test_ieqsi_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesieqss.ll b/test/CodeGen/PowerPC/testComparesieqss.ll
new file mode 100644
index 000000000000..110c5a62804e
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesieqss.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; ModuleID = 'ComparisonTestCases/testComparesieqss.c'
+
+@glob = common local_unnamed_addr global i16 0, align 2
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqss(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_ieqss:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqss_sext(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_ieqss_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqss_z(i16 signext %a) {
+; CHECK-LABEL: test_ieqss_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv1 = zext i1 %cmp to i32
+ ret i32 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_ieqss_sext_z(i16 signext %a) {
+; CHECK-LABEL: test_ieqss_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqss_store(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_ieqss_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = zext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqss_sext_store(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_ieqss_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = sext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqss_z_store(i16 signext %a) {
+; CHECK-LABEL: test_ieqss_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = zext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_ieqss_sext_z_store(i16 signext %a) {
+; CHECK-LABEL: test_ieqss_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = sext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesiequc.ll b/test/CodeGen/PowerPC/testComparesiequc.ll
new file mode 100644
index 000000000000..e2c975f2c191
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesiequc.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; ModuleID = 'ComparisonTestCases/testComparesiequc.c'
+
+@glob = common local_unnamed_addr global i8 0, align 1
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequc(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_iequc:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequc_sext(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_iequc_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequc_z(i8 zeroext %a) {
+; CHECK-LABEL: test_iequc_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv1 = zext i1 %cmp to i32
+ ret i32 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequc_sext_z(i8 zeroext %a) {
+; CHECK-LABEL: test_iequc_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequc_store(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_iequc_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = zext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequc_sext_store(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_iequc_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = sext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequc_z_store(i8 zeroext %a) {
+; CHECK-LABEL: test_iequc_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = zext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequc_sext_z_store(i8 zeroext %a) {
+; CHECK-LABEL: test_iequc_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = sext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesiequi.ll b/test/CodeGen/PowerPC/testComparesiequi.ll
new file mode 100644
index 000000000000..789b176a7700
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesiequi.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; ModuleID = 'ComparisonTestCases/testComparesiequi.c'
+
+@glob = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequi(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_iequi:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequi_sext(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_iequi_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequi_z(i32 zeroext %a) {
+; CHECK-LABEL: test_iequi_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequi_sext_z(i32 zeroext %a) {
+; CHECK-LABEL: test_iequi_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequi_store(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_iequi_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequi_sext_store(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_iequi_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequi_z_store(i32 zeroext %a) {
+; CHECK-LABEL: test_iequi_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequi_sext_z_store(i32 zeroext %a) {
+; CHECK-LABEL: test_iequi_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesiequs.ll b/test/CodeGen/PowerPC/testComparesiequs.ll
new file mode 100644
index 000000000000..b72943893e98
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesiequs.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; ModuleID = 'ComparisonTestCases/testComparesiequs.c'
+
+@glob = common local_unnamed_addr global i16 0, align 2
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequs(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_iequs:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequs_sext(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_iequs_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequs_z(i16 zeroext %a) {
+; CHECK-LABEL: test_iequs_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv1 = zext i1 %cmp to i32
+ ret i32 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @test_iequs_sext_z(i16 zeroext %a) {
+; CHECK-LABEL: test_iequs_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %sub = sext i1 %cmp to i32
+ ret i32 %sub
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequs_store(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_iequs_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = zext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequs_sext_store(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_iequs_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = sext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequs_z_store(i16 zeroext %a) {
+; CHECK-LABEL: test_iequs_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = zext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_iequs_sext_z_store(i16 zeroext %a) {
+; CHECK-LABEL: test_iequs_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = sext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testCompareslleqsc.ll b/test/CodeGen/PowerPC/testCompareslleqsc.ll
new file mode 100644
index 000000000000..56af12827931
--- /dev/null
+++ b/test/CodeGen/PowerPC/testCompareslleqsc.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; ModuleID = 'ComparisonTestCases/testCompareslleqsc.c'
+
+@glob = common local_unnamed_addr global i8 0, align 1
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsc(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_lleqsc:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = zext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsc_sext(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_lleqsc_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = sext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsc_z(i8 signext %a) {
+; CHECK-LABEL: test_lleqsc_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = zext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsc_sext_z(i8 signext %a) {
+; CHECK-LABEL: test_lleqsc_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = sext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsc_store(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_lleqsc_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = zext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsc_sext_store(i8 signext %a, i8 signext %b) {
+; CHECK-LABEL: test_lleqsc_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = sext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsc_z_store(i8 signext %a) {
+; CHECK-LABEL: test_lleqsc_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = zext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsc_sext_z_store(i8 signext %a) {
+; CHECK-LABEL: test_lleqsc_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = sext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testCompareslleqsi.ll b/test/CodeGen/PowerPC/testCompareslleqsi.ll
new file mode 100644
index 000000000000..90cf2c85888e
--- /dev/null
+++ b/test/CodeGen/PowerPC/testCompareslleqsi.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+
+@glob = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsi(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_lleqsi:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv1 = zext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsi_sext(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_lleqsi_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv1 = sext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsi_z(i32 signext %a) {
+; CHECK-LABEL: test_lleqsi_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv1 = zext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqsi_sext_z(i32 signext %a) {
+; CHECK-LABEL: test_lleqsi_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv1 = sext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsi_store(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_lleqsi_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsi_sext_store(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: test_lleqsi_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsi_z_store(i32 signext %a) {
+; CHECK-LABEL: test_lleqsi_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+; CHECKNEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqsi_sext_z_store(i32 signext %a) {
+; CHECK-LABEL: test_lleqsi_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testCompareslleqss.ll b/test/CodeGen/PowerPC/testCompareslleqss.ll
new file mode 100644
index 000000000000..df60a6ccc00e
--- /dev/null
+++ b/test/CodeGen/PowerPC/testCompareslleqss.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+
+@glob = common local_unnamed_addr global i16 0, align 2
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqss(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_lleqss:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = zext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqss_sext(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_lleqss_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = sext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqss_z(i16 signext %a) {
+; CHECK-LABEL: test_lleqss_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = zext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_lleqss_sext_z(i16 signext %a) {
+; CHECK-LABEL: test_lleqss_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = sext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqss_store(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_lleqss_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = zext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqss_sext_store(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: test_lleqss_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = sext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqss_z_store(i16 signext %a) {
+; CHECK-LABEL: test_lleqss_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = zext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_lleqss_sext_z_store(i16 signext %a) {
+; CHECK-LABEL: test_lleqss_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = sext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesllequc.ll b/test/CodeGen/PowerPC/testComparesllequc.ll
new file mode 100644
index 000000000000..248825761295
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesllequc.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+
+@glob = common local_unnamed_addr global i8 0, align 1
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequc(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_llequc:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = zext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequc_sext(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_llequc_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = sext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequc_z(i8 zeroext %a) {
+; CHECK-LABEL: test_llequc_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = zext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequc_sext_z(i8 zeroext %a) {
+; CHECK-LABEL: test_llequc_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = sext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequc_store(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_llequc_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = zext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequc_sext_store(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: test_llequc_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, %b
+ %conv3 = sext i1 %cmp to i8
+ store i8 %conv3, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequc_z_store(i8 zeroext %a) {
+; CHECK-LABEL: test_llequc_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = zext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequc_sext_z_store(i8 zeroext %a) {
+; CHECK-LABEL: test_llequc_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stb r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %conv2 = sext i1 %cmp to i8
+ store i8 %conv2, i8* @glob, align 1
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesllequi.ll b/test/CodeGen/PowerPC/testComparesllequi.ll
new file mode 100644
index 000000000000..2342d80d94ef
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesllequi.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+
+@glob = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequi(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_llequi:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv1 = zext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequi_sext(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_llequi_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv1 = sext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequi_z(i32 zeroext %a) {
+; CHECK-LABEL: test_llequi_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv1 = zext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequi_sext_z(i32 zeroext %a) {
+; CHECK-LABEL: test_llequi_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv1 = sext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequi_store(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_llequi_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequi_sext_store(i32 zeroext %a, i32 zeroext %b) {
+; CHECK-LABEL: test_llequi_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, %b
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequi_z_store(i32 zeroext %a) {
+; CHECK-LABEL: test_llequi_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ store i32 %conv, i32* @glob, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequi_sext_z_store(i32 zeroext %a) {
+; CHECK-LABEL: test_llequi_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: stw r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %sub = sext i1 %cmp to i32
+ store i32 %sub, i32* @glob, align 4
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/testComparesllequs.ll b/test/CodeGen/PowerPC/testComparesllequs.ll
new file mode 100644
index 000000000000..e79a974c06f5
--- /dev/null
+++ b/test/CodeGen/PowerPC/testComparesllequs.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \
+; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl
+
+@glob = common local_unnamed_addr global i16 0, align 2
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequs(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_llequs:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = zext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequs_sext(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_llequs_sext:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = sext i1 %cmp to i64
+ ret i64 %conv3
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequs_z(i16 zeroext %a) {
+; CHECK-LABEL: test_llequs_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = zext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @test_llequs_sext_z(i16 zeroext %a) {
+; CHECK-LABEL: test_llequs_sext_z:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = sext i1 %cmp to i64
+ ret i64 %conv2
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequs_store(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_llequs_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r12)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = zext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequs_sext_store(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: test_llequs_sext_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xor r3, r3, r4
+; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r5)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, %b
+ %conv3 = sext i1 %cmp to i16
+ store i16 %conv3, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequs_z_store(i16 zeroext %a) {
+; CHECK-LABEL: test_llequs_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: srwi r3, r3, 5
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = zext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind
+define void @test_llequs_sext_z_store(i16 zeroext %a) {
+; CHECK-LABEL: test_llequs_sext_z_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
+; CHECK-NEXT: cntlzw r3, r3
+; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
+; CHECK-NEXT: rldicr r3, r3, 58, 0
+; CHECK-NEXT: sradi r3, r3, 63
+; CHECK-NEXT: sth r3, 0(r4)
+; CHECK-NEXT: blr
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %conv2 = sext i1 %cmp to i16
+ store i16 %conv2, i16* @glob, align 2
+ ret void
+}
diff --git a/test/CodeGen/SPARC/LeonItinerariesUT.ll b/test/CodeGen/SPARC/LeonItinerariesUT.ll
index 87e0c4621c08..d586fe183a92 100644
--- a/test/CodeGen/SPARC/LeonItinerariesUT.ll
+++ b/test/CodeGen/SPARC/LeonItinerariesUT.ll
@@ -28,9 +28,9 @@
; LEON3_4_ITIN-LABEL: f32_ops:
; LEON3_4_ITIN: ld
; LEON3_4_ITIN-NEXT: ld
-; LEON3_4_ITIN-NEXT: ld
; LEON3_4_ITIN-NEXT: fadds
; LEON3_4_ITIN-NEXT: ld
+; LEON3_4_ITIN-NEXT: ld
; LEON3_4_ITIN-NEXT: fsubs
; LEON3_4_ITIN-NEXT: fmuls
; LEON3_4_ITIN-NEXT: retl
@@ -47,4 +47,4 @@ entry:
%6 = fmul float %5, %3
%7 = fdiv float %6, %4
ret float %7
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/SPARC/inlineasm-v9.ll b/test/CodeGen/SPARC/inlineasm-v9.ll
new file mode 100644
index 000000000000..9c5424c46229
--- /dev/null
+++ b/test/CodeGen/SPARC/inlineasm-v9.ll
@@ -0,0 +1,30 @@
+; RUN: llc -march=sparcv9 <%s | FileCheck %s
+
+;; Ensures that inline-asm accepts and uses 'f' and 'e' register constraints.
+; CHECK-LABEL: faddd:
+; CHECK: faddd %f0, %f2, %f0
+define double @faddd(double, double) local_unnamed_addr #2 {
+entry:
+ %2 = tail call double asm sideeffect "faddd $1, $2, $0;", "=f,f,e"(double %0, double %1) #7
+ ret double %2
+}
+
+; CHECK-LABEL: faddq:
+; CHECK: faddq %f0, %f4, %f0
+define fp128 @faddq(fp128, fp128) local_unnamed_addr #2 {
+entry:
+ %2 = tail call fp128 asm sideeffect "faddq $1, $2, $0;", "=f,f,e"(fp128 %0, fp128 %1) #7
+ ret fp128 %2
+}
+
+;; Ensure that 'e' can indeed go in the high area, and 'f' cannot.
+; CHECK-LABEL: faddd_high:
+; CHECK: fmovd %f2, %f32
+; CHECK: fmovd %f0, %f2
+; CHECK: faddd %f2, %f32, %f2
+define double @faddd_high(double, double) local_unnamed_addr #2 {
+entry:
+ %2 = tail call double asm sideeffect "faddd $1, $2, $0;", "=f,f,e,~{d0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7}"(double %0, double %1) #7
+ ret double %2
+}
+
diff --git a/test/CodeGen/SPARC/inlineasm.ll b/test/CodeGen/SPARC/inlineasm.ll
index af631f0d29f5..35a62706c1ab 100644
--- a/test/CodeGen/SPARC/inlineasm.ll
+++ b/test/CodeGen/SPARC/inlineasm.ll
@@ -94,3 +94,21 @@ entry:
%0 = call i64 asm sideeffect "xor $1, %g0, $0", "=r,0,~{i1}"(i64 5);
ret i64 %0
}
+
+
+;; Ensures that inline-asm accepts and uses 'f' and 'e' register constraints.
+; CHECK-LABEL: fadds:
+; CHECK: fadds %f0, %f1, %f0
+define float @fadds(float, float) local_unnamed_addr #2 {
+entry:
+ %2 = tail call float asm sideeffect "fadds $1, $2, $0;", "=f,f,e"(float %0, float %1) #7
+ ret float %2
+}
+
+; CHECK-LABEL: faddd:
+; CHECK: faddd %f0, %f2, %f0
+define double @faddd(double, double) local_unnamed_addr #2 {
+entry:
+ %2 = tail call double asm sideeffect "faddd $1, $2, $0;", "=f,f,e"(double %0, double %1) #7
+ ret double %2
+}
diff --git a/test/CodeGen/SystemZ/list-ilp-crash.ll b/test/CodeGen/SystemZ/list-ilp-crash.ll
new file mode 100644
index 000000000000..c67ed318b93f
--- /dev/null
+++ b/test/CodeGen/SystemZ/list-ilp-crash.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -pre-RA-sched=list-ilp | FileCheck %s
+;
+; Check that list-ilp scheduler does not crash due to SystemZ's current use
+; of MVT::Untyped.
+
+define void @pr32723(i8) {
+; CHECK: .text
+BB:
+ br label %CF245
+
+CF245: ; preds = %CF245, %BB
+ %Shuff57 = shufflevector <4 x i8> zeroinitializer, <4 x i8> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %Cmp84 = icmp uge i8 %0, undef
+ br i1 %Cmp84, label %CF245, label %CF260
+
+CF260: ; preds = %CF245
+ %B156 = sdiv <4 x i8> %Shuff57, %Shuff57
+ br label %CF255
+
+CF255: ; preds = %CF255, %CF260
+ %I186 = insertelement <4 x i8> %B156, i8 %0, i32 2
+ br label %CF255
+}
diff --git a/test/CodeGen/SystemZ/lower-copy-undef-src.mir b/test/CodeGen/SystemZ/lower-copy-undef-src.mir
new file mode 100644
index 000000000000..322460d79d68
--- /dev/null
+++ b/test/CodeGen/SystemZ/lower-copy-undef-src.mir
@@ -0,0 +1,14 @@
+# RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 -run-pass=postrapseudos -o - %s | FileCheck %s
+#
+# Test that a COPY with an undef source operand gets handled like an identity
+# copy rather than lowered into a target instruction with the undef flag
+# dropped.
+---
+# CHECK-LABEL: name: undef_copy
+# CHECK: %r13d = KILL undef %r0d, implicit killed %r12q, implicit-def %r12q
+name: undef_copy
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %r12q
+ %r13d = COPY undef %r0d, implicit killed %r12q, implicit-def %r12q
diff --git a/test/CodeGen/Thumb2/v8_IT_5.ll b/test/CodeGen/Thumb2/v8_IT_5.ll
index d8d60413cb0e..5e7a40299ed7 100644
--- a/test/CodeGen/Thumb2/v8_IT_5.ll
+++ b/test/CodeGen/Thumb2/v8_IT_5.ll
@@ -9,7 +9,7 @@
; CHECK-NEXT: b
; CHECK: [[JUMPTARGET]]:{{.*}}%if.else173
; CHECK-NEXT: mov.w
-; CHECK-NEXT: bx lr
+; CHECK-NEXT: pop
; CHECK-NEXT: %if.else145
; CHECK-NEXT: mov.w
diff --git a/test/CodeGen/X86/2007-01-08-InstrSched.ll b/test/CodeGen/X86/2007-01-08-InstrSched.ll
index 4ec703921e29..24aa5b98d0bb 100644
--- a/test/CodeGen/X86/2007-01-08-InstrSched.ll
+++ b/test/CodeGen/X86/2007-01-08-InstrSched.ll
@@ -13,10 +13,10 @@ define float @foo(float %x) nounwind {
; CHECK: mulss
; CHECK: mulss
-; CHECK: mulss
-; CHECK: mulss
; CHECK: addss
+; CHECK: mulss
; CHECK: addss
+; CHECK: mulss
; CHECK: addss
; CHECK: ret
}
diff --git a/test/CodeGen/X86/2010-01-18-DbgValue.ll b/test/CodeGen/X86/2010-01-18-DbgValue.ll
index 8b11fd86ef17..ae60d57bbf49 100644
--- a/test/CodeGen/X86/2010-01-18-DbgValue.ll
+++ b/test/CodeGen/X86/2010-01-18-DbgValue.ll
@@ -1,14 +1,19 @@
-; RUN: llc -march=x86 -O0 < %s | FileCheck %s
-; Currently, dbg.declare generates a DEBUG_VALUE comment. Eventually it will
-; generate DWARF and this test will need to be modified or removed.
+; RUN: llc -march=x86 -O0 < %s -filetype=obj | llvm-dwarfdump - | FileCheck %s
+; CHECK-LABEL: .debug_info contents:
+
+; CHECK-LABEL: DW_TAG_subprogram
+; CHECK: DW_AT_name [DW_FORM_strp] ( {{.*}}"foo")
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NEXT: DW_AT_location [DW_FORM_exprloc] (<0x2> 91 {{..}} )
+; DW_OP_fbreg ??
+; CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( {{.*}}"my_r0")
%struct.Pt = type { double, double }
%struct.Rect = type { %struct.Pt, %struct.Pt }
define double @foo(%struct.Rect* byval %my_r0) nounwind ssp !dbg !1 {
entry:
-;CHECK: DEBUG_VALUE
%retval = alloca double ; <double*> [#uses=2]
%0 = alloca double ; <double*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
diff --git a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll b/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
deleted file mode 100644
index 495ff0304b1b..000000000000
--- a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-macosx -enable-misched \
-; RUN: -verify-machineinstrs | FileCheck %s
-;
-; Test LiveInterval update handling of DBG_VALUE.
-; rdar://12777252.
-;
-; CHECK: %entry
-; CHECK: DEBUG_VALUE: subdivp:hg
-; CHECK: j
-
-%struct.node.0.27 = type { i16, double, [3 x double], i32, i32 }
-%struct.hgstruct.2.29 = type { %struct.bnode.1.28*, [3 x double], double, [3 x double] }
-%struct.bnode.1.28 = type { i16, double, [3 x double], i32, i32, [3 x double], [3 x double], [3 x double], double, %struct.bnode.1.28*, %struct.bnode.1.28* }
-
-declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
-
-define signext i16 @subdivp(%struct.node.0.27* nocapture %p, double %dsq, double %tolsq, %struct.hgstruct.2.29* nocapture byval align 8 %hg) nounwind uwtable readonly ssp !dbg !14 {
-entry:
- call void @llvm.dbg.declare(metadata %struct.hgstruct.2.29* %hg, metadata !4, metadata !DIExpression()), !dbg !DILocation(scope: !14)
- %type = getelementptr inbounds %struct.node.0.27, %struct.node.0.27* %p, i64 0, i32 0
- %0 = load i16, i16* %type, align 2
- %cmp = icmp eq i16 %0, 1
- br i1 %cmp, label %return, label %for.cond.preheader
-
-for.cond.preheader: ; preds = %entry
- %arrayidx6.1 = getelementptr inbounds %struct.hgstruct.2.29, %struct.hgstruct.2.29* %hg, i64 0, i32 1, i64 1
- %cmp22 = fcmp olt double 0.000000e+00, %dsq
- %conv24 = zext i1 %cmp22 to i16
- br label %return
-
-return: ; preds = %for.cond.preheader, %entry
- %retval.0 = phi i16 [ %conv24, %for.cond.preheader ], [ 0, %entry ]
- ret i16 %retval.0
-}
-
-declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnone
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!12}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.3 (trunk 168918) (llvm/trunk 168920)", isOptimized: true, emissionKind: FullDebug, file: !11, enums: !2, retainedTypes: !2, globals: !2)
-!2 = !{}
-!4 = !DILocalVariable(name: "hg", line: 725, arg: 4, scope: !14, file: !5, type: !6)
-!5 = !DIFile(filename: "MultiSource/Benchmarks/Olden/bh/newbh.c", directory: "MultiSource/Benchmarks/Olden/bh")
-!6 = !DIDerivedType(tag: DW_TAG_typedef, name: "hgstruct", line: 492, file: !11, baseType: !7)
-!7 = !DICompositeType(tag: DW_TAG_structure_type, line: 487, size: 512, align: 64, file: !11)
-!11 = !DIFile(filename: "MultiSource/Benchmarks/Olden/bh/newbh.c", directory: "MultiSource/Benchmarks/Olden/bh")
-!12 = !{i32 1, !"Debug Info Version", i32 3}
-!14 = distinct !DISubprogram(name: "subdivp", isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 1, file: !11, scope: !5, type: !15)
-!15 = !DISubroutineType(types: !16)
-!16 = !{null}
diff --git a/test/CodeGen/X86/2012-11-30-misched-dbg.ll b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
deleted file mode 100644
index fbe6000d7ace..000000000000
--- a/test/CodeGen/X86/2012-11-30-misched-dbg.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-macosx -enable-misched \
-; RUN: -verify-machineinstrs | FileCheck %s
-;
-; Test MachineScheduler handling of DBG_VALUE.
-; rdar://12776937.
-;
-; CHECK: %if.else581
-; CHECK: DEBUG_VALUE: num1
-; CHECK: call
-
-%union.rec = type {}
-
-@.str15 = external hidden unnamed_addr constant [6 x i8], align 1
-
-declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
-
-define i32 @AttachGalley(%union.rec** nocapture %suspend_pt) nounwind uwtable ssp !dbg !21 {
-entry:
- %num14075 = alloca [20 x i8], align 16
- br label %if.end33
-
-if.end33: ; preds = %entry
- %cmp1733 = icmp eq i32 undef, 0
- br label %if.else581
-
-if.else581: ; preds = %if.end33
- %cmp586 = icmp eq i8 undef, -123
- br i1 %cmp586, label %if.then588, label %if.else594
-
-if.then588: ; preds = %if.else581
- br label %for.cond1710.preheader
-
-if.else594: ; preds = %if.else581
- unreachable
-
-for.cond1710.preheader: ; preds = %if.then588
- br label %for.cond1710
-
-for.cond1710: ; preds = %for.cond1710, %for.cond1710.preheader
- br i1 undef, label %for.cond1710, label %if.then3344
-
-if.then3344:
- br label %if.then4073
-
-if.then4073: ; preds = %if.then3344
- call void @llvm.dbg.declare(metadata [20 x i8]* %num14075, metadata !4, metadata !DIExpression()), !dbg !DILocation(scope: !5)
- %arraydecay4078 = getelementptr inbounds [20 x i8], [20 x i8]* %num14075, i64 0, i64 0
- %0 = load i32, i32* undef, align 4
- %add4093 = add nsw i32 %0, 0
- %conv4094 = sitofp i32 %add4093 to float
- %div4095 = fdiv float %conv4094, 5.670000e+02
- %conv4096 = fpext float %div4095 to double
- %call4097 = call i32 (i8*, i32, i64, i8*, ...) @__sprintf_chk(i8* %arraydecay4078, i32 0, i64 20, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str15, i64 0, i64 0), double %conv4096) nounwind
- br i1 %cmp1733, label %if.then4107, label %if.else4114
-
-if.then4107: ; preds = %if.then4073
- unreachable
-
-if.else4114: ; preds = %if.then4073
- unreachable
-}
-
-declare i32 @__sprintf_chk(i8*, i32, i64, i8*, ...)
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!35}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.3 (trunk 168918) (llvm/trunk 168920)", isOptimized: true, emissionKind: FullDebug, file: !19, enums: !2, retainedTypes: !2, globals: !2)
-!1 = !{!2}
-!2 = !{}
-!4 = !DILocalVariable(name: "num1", line: 815, scope: !5, file: !14, type: !15)
-!5 = distinct !DILexicalBlock(line: 815, column: 0, file: !14, scope: !6)
-!6 = distinct !DILexicalBlock(line: 812, column: 0, file: !14, scope: !7)
-!7 = distinct !DILexicalBlock(line: 807, column: 0, file: !14, scope: !8)
-!8 = distinct !DILexicalBlock(line: 440, column: 0, file: !14, scope: !9)
-!9 = distinct !DILexicalBlock(line: 435, column: 0, file: !14, scope: !10)
-!10 = distinct !DILexicalBlock(line: 434, column: 0, file: !14, scope: !11)
-!11 = distinct !DILexicalBlock(line: 250, column: 0, file: !14, scope: !12)
-!12 = distinct !DILexicalBlock(line: 249, column: 0, file: !14, scope: !13)
-!13 = distinct !DILexicalBlock(line: 221, column: 0, file: !14, scope: !21)
-!14 = !DIFile(filename: "MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c", directory: "MultiSource/Benchmarks/MiBench/consumer-typeset")
-!15 = !DICompositeType(tag: DW_TAG_array_type, size: 160, align: 8, baseType: !16, elements: !17)
-!16 = !DIBasicType(tag: DW_TAG_base_type, name: "char", size: 8, align: 8, encoding: DW_ATE_signed_char)
-!17 = !{!18}
-!18 = !DISubrange(count: 20)
-!19 = !DIFile(filename: "MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c", directory: "MultiSource/Benchmarks/MiBench/consumer-typeset")
-
-!21 = distinct !DISubprogram(name: "AttachGalley", isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 1, file: !19, scope: !14, type: !22)
-!22 = !DISubroutineType(types: !23)
-!23 = !{null}
-
-; Test DebugValue uses visited by RegisterPressureTracker findUseBetween().
-;
-; CHECK: @main
-; CHECK: DEBUG_VALUE: main:X
-; CHECK: call
-
-%"class.__gnu_cxx::hash_map" = type { %"class.__gnu_cxx::hashtable" }
-%"class.__gnu_cxx::hashtable" = type { i64, i64, i64, i64, i64, i64 }
-
-define void @main() uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !37 {
-entry:
- %X = alloca %"class.__gnu_cxx::hash_map", align 8
- br i1 undef, label %cond.true, label %cond.end
-
-cond.true: ; preds = %entry
- unreachable
-
-cond.end: ; preds = %entry
- call void @llvm.dbg.declare(metadata %"class.__gnu_cxx::hash_map"* %X, metadata !31, metadata !DIExpression()), !dbg !DILocation(scope: !37)
- %_M_num_elements.i.i.i.i = getelementptr inbounds %"class.__gnu_cxx::hash_map", %"class.__gnu_cxx::hash_map"* %X, i64 0, i32 0, i32 5
- invoke void @_Znwm()
- to label %exit.i unwind label %lpad2.i.i.i.i
-
-exit.i: ; preds = %cond.end
- unreachable
-
-lpad2.i.i.i.i: ; preds = %cond.end
- %0 = landingpad { i8*, i32 }
- cleanup
- br i1 undef, label %lpad.body.i.i, label %if.then.i.i.i.i.i.i.i.i
-
-if.then.i.i.i.i.i.i.i.i: ; preds = %lpad2.i.i.i.i
- unreachable
-
-lpad.body.i.i: ; preds = %lpad2.i.i.i.i
- resume { i8*, i32 } %0
-}
-
-declare i32 @__gxx_personality_v0(...)
-
-declare void @_Znwm()
-
-!llvm.dbg.cu = !{!30}
-
-!30 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.3 (trunk 169129) (llvm/trunk 169135)", isOptimized: true, emissionKind: FullDebug, file: !34, enums: !2, retainedTypes: !2)
-!31 = !DILocalVariable(name: "X", line: 29, scope: !37, type: !32)
-!32 = !DIDerivedType(tag: DW_TAG_typedef, name: "HM", line: 28, file: !34, baseType: null)
-!33 = !DIFile(filename: "SingleSource/Benchmarks/Shootout-C++/hash.cpp", directory: "SingleSource/Benchmarks/Shootout-C++")
-!34 = !DIFile(filename: "SingleSource/Benchmarks/Shootout-C++/hash.cpp", directory: "SingleSource/Benchmarks/Shootout-C++")
-!35 = !{i32 1, !"Debug Info Version", i32 3}
-!37 = distinct !DISubprogram(name: "main", isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !30, scopeLine: 1, file: !19, scope: !14, type: !22)
diff --git a/test/CodeGen/X86/2012-11-30-regpres-dbg.ll b/test/CodeGen/X86/2012-11-30-regpres-dbg.ll
deleted file mode 100644
index a717202d3574..000000000000
--- a/test/CodeGen/X86/2012-11-30-regpres-dbg.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-macosx -enable-misched \
-; RUN: -verify-machineinstrs | FileCheck %s
-;
-; Test RegisterPressure handling of DBG_VALUE.
-;
-; CHECK: %entry
-; CHECK: DEBUG_VALUE: test:callback
-; CHECK: ret
-
-%struct.btCompoundLeafCallback = type { i32, i32 }
-
-declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
-
-define void @test() unnamed_addr uwtable ssp align 2 !dbg !2 {
-entry:
- %callback = alloca %struct.btCompoundLeafCallback, align 8
- br i1 undef, label %if.end, label %if.then
-
-if.then: ; preds = %entry
- unreachable
-
-if.end: ; preds = %entry
- call void @llvm.dbg.declare(metadata %struct.btCompoundLeafCallback* %callback, metadata !3, metadata !DIExpression()), !dbg !DILocation(scope: !2)
- %m = getelementptr inbounds %struct.btCompoundLeafCallback, %struct.btCompoundLeafCallback* %callback, i64 0, i32 1
- store i32 0, i32* undef, align 8
- %cmp12447 = icmp sgt i32 undef, 0
- br i1 %cmp12447, label %for.body.lr.ph, label %invoke.cont44
-
-for.body.lr.ph: ; preds = %if.end
- unreachable
-
-invoke.cont44: ; preds = %if.end
- ret void
-}
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!8}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.3 (trunk 168984) (llvm/trunk 168983)", isOptimized: true, emissionKind: FullDebug, file: !6)
-!2 = distinct !DISubprogram(name: "test", isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 1, file: !6, scope: !5, type: !7)
-!3 = !DILocalVariable(name: "callback", line: 214, scope: !2, type: !4)
-!4 = !DICompositeType(tag: DW_TAG_structure_type, name: "btCompoundLeafCallback", line: 90, size: 64, align: 64, file: !6)
-!5 = !DIFile(filename: "MultiSource/Benchmarks/Bullet/btCompoundCollisionAlgorithm.cpp", directory: "MultiSource/Benchmarks/Bullet")
-!6 = !DIFile(filename: "MultiSource/Benchmarks/Bullet/btCompoundCollisionAlgorithm.cpp", directory: "MultiSource/Benchmarks/Bullet")
-!7 = !DISubroutineType(types: !9)
-!8 = !{i32 1, !"Debug Info Version", i32 3}
-!9 = !{null}
diff --git a/test/CodeGen/X86/GlobalISel/add-scalar.ll b/test/CodeGen/X86/GlobalISel/add-scalar.ll
new file mode 100644
index 000000000000..553bc2789ff0
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_add_i64:
+; ALL: # BB#0:
+; ALL-NEXT: leaq (%rsi,%rdi), %rax
+; ALL-NEXT: retq
+ %ret = add i64 %arg1, %arg2
+ ret i64 %ret
+}
+
+define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_add_i32:
+; ALL: # BB#0:
+; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ALL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ALL-NEXT: leal (%rsi,%rdi), %eax
+; ALL-NEXT: retq
+ %ret = add i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
+; ALL-LABEL: test_add_i16:
+; ALL: # BB#0:
+; ALL-NEXT: # kill: %DI<def> %DI<kill> %RDI<def>
+; ALL-NEXT: # kill: %SI<def> %SI<kill> %RSI<def>
+; ALL-NEXT: leal (%rsi,%rdi), %eax
+; ALL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ALL-NEXT: retq
+ %ret = add i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
+; ALL-LABEL: test_add_i8:
+; ALL: # BB#0:
+; ALL-NEXT: addb %dil, %sil
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = add i8 %arg1, %arg2
+ ret i8 %ret
+}
diff --git a/test/CodeGen/X86/GlobalISel/binop.ll b/test/CodeGen/X86/GlobalISel/binop.ll
index bf4c42cb4292..1aae1db8ab07 100644
--- a/test/CodeGen/X86/GlobalISel/binop.ll
+++ b/test/CodeGen/X86/GlobalISel/binop.ll
@@ -4,48 +4,6 @@
; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F
; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL
-define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
-; ALL-LABEL: test_add_i64:
-; ALL: # BB#0:
-; ALL-NEXT: leaq (%rsi,%rdi), %rax
-; ALL-NEXT: retq
- %ret = add i64 %arg1, %arg2
- ret i64 %ret
-}
-
-define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
-; ALL-LABEL: test_add_i32:
-; ALL: # BB#0:
-; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; ALL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ALL-NEXT: leal (%rsi,%rdi), %eax
-; ALL-NEXT: retq
- %ret = add i32 %arg1, %arg2
- ret i32 %ret
-}
-
-define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
-; ALL-LABEL: test_add_i16:
-; ALL: # BB#0:
-; ALL-NEXT: # kill: %DI<def> %DI<kill> %RDI<def>
-; ALL-NEXT: # kill: %SI<def> %SI<kill> %RSI<def>
-; ALL-NEXT: leal (%rsi,%rdi), %eax
-; ALL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; ALL-NEXT: retq
- %ret = add i16 %arg1, %arg2
- ret i16 %ret
-}
-
-define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
-; ALL-LABEL: test_add_i8:
-; ALL: # BB#0:
-; ALL-NEXT: addb %dil, %sil
-; ALL-NEXT: movl %esi, %eax
-; ALL-NEXT: retq
- %ret = add i8 %arg1, %arg2
- ret i8 %ret
-}
-
define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
; ALL-LABEL: test_sub_i64:
; ALL: # BB#0:
diff --git a/test/CodeGen/X86/GlobalISel/br.ll b/test/CodeGen/X86/GlobalISel/br.ll
new file mode 100644
index 000000000000..faa6a0350337
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/br.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=x86_64-linux-gnu -global-isel %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+
+define void @uncondbr() {
+; CHECK-LABEL: uncondbr:
+; CHECK: # BB#1: # %entry
+; CHECK-NEXT: jmp .LBB0_3
+; CHECK-NEXT: .LBB0_2: # %end
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_3: # %bb2
+; CHECK-NEXT: jmp .LBB0_2
+entry:
+ br label %bb2
+end:
+ ret void
+bb2:
+ br label %end
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/cmp.ll b/test/CodeGen/X86/GlobalISel/cmp.ll
new file mode 100644
index 000000000000..03692bb6b1de
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/cmp.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL
+
+define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
+; ALL-LABEL: test_icmp_eq_i8:
+; ALL: # BB#0:
+; ALL-NEXT: cmpb %sil, %dil
+; ALL-NEXT: sete %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp eq i8 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_eq_i16(i16 %a, i16 %b) {
+; ALL-LABEL: test_icmp_eq_i16:
+; ALL: # BB#0:
+; ALL-NEXT: cmpw %si, %di
+; ALL-NEXT: sete %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp eq i16 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_eq_i64(i64 %a, i64 %b) {
+; ALL-LABEL: test_icmp_eq_i64:
+; ALL: # BB#0:
+; ALL-NEXT: cmpq %rsi, %rdi
+; ALL-NEXT: sete %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp eq i64 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_eq_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_eq_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: sete %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp eq i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_ne_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_ne_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setne %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp ne i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_ugt_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_ugt_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: seta %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp ugt i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_uge_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_uge_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setae %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp uge i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_ult_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_ult_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setb %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp ult i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_ule_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_ule_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setbe %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp ule i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_sgt_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_sgt_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setg %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp sgt i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_sge_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_sge_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setge %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp sge i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_slt_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_slt_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setl %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp slt i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
+define i32 @test_icmp_sle_i32(i32 %a, i32 %b) {
+; ALL-LABEL: test_icmp_sle_i32:
+; ALL: # BB#0:
+; ALL-NEXT: cmpl %esi, %edi
+; ALL-NEXT: setle %al
+; ALL-NEXT: andl $1, %eax
+; ALL-NEXT: retq
+ %r = icmp sle i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
index c4d3566008b1..64cd0e70a4fd 100644
--- a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
+++ b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
@@ -1,7 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64
-; TODO merge with ext.ll after i64 sext suported on 32bit platform
+; TODO merge with ext.ll after i64 sext suported on 32bit platform
+
+define i64 @test_zext_i1(i8 %a) {
+; X64-LABEL: test_zext_i1:
+; X64: # BB#0:
+; X64-NEXT: # kill: %DIL<def> %DIL<kill> %RDI<def>
+; X64-NEXT: andq $1, %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %val = trunc i8 %a to i1
+ %r = zext i1 %val to i64
+ ret i64 %r
+}
define i64 @test_sext_i8(i8 %val) {
; X64-LABEL: test_sext_i8:
diff --git a/test/CodeGen/X86/GlobalISel/ext.ll b/test/CodeGen/X86/GlobalISel/ext.ll
index 3c032686130e..4d4e3b05ca28 100644
--- a/test/CodeGen/X86/GlobalISel/ext.ll
+++ b/test/CodeGen/X86/GlobalISel/ext.ll
@@ -2,6 +2,24 @@
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64
; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X32
+define i32 @test_zext_i1(i32 %a) {
+; X64-LABEL: test_zext_i1:
+; X64: # BB#0:
+; X64-NEXT: andl $1, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+;
+; X32-LABEL: test_zext_i1:
+; X32: # BB#0:
+; X32-NEXT: leal 4(%esp), %eax
+; X32-NEXT: movl (%eax), %eax
+; X32-NEXT: andl $1, %eax
+; X32-NEXT: retl
+ %val = trunc i32 %a to i1
+ %r = zext i1 %val to i32
+ ret i32 %r
+}
+
define i32 @test_zext_i8(i8 %val) {
; X64-LABEL: test_zext_i8:
; X64: # BB#0:
diff --git a/test/CodeGen/X86/GlobalISel/legalize-cmp.mir b/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
new file mode 100644
index 000000000000..68ccbbba0a73
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
@@ -0,0 +1,179 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i32 @test_cmp_i8(i8 %a, i8 %b) {
+ %r = icmp ult i8 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_cmp_i16(i16 %a, i16 %b) {
+ %r = icmp ult i16 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_cmp_i32(i32 %a, i32 %b) {
+ %r = icmp ult i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_cmp_i64(i64 %a, i64 %b) {
+ %r = icmp ult i64 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_cmp_p0(i32* %a, i32* %b) {
+ %r = icmp ult i32* %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+...
+---
+name: test_cmp_i8
+# CHECK-LABEL: name: test_cmp_i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+# CHECK: %0(s8) = COPY %edi
+# CHECK-NEXT: %1(s8) = COPY %esi
+# CHECK-NEXT: %2(s1) = G_ICMP intpred(ult), %0(s8), %1
+# CHECK-NEXT: %3(s32) = G_ZEXT %2(s1)
+# CHECK-NEXT: %eax = COPY %3(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s1) = G_ICMP intpred(ult), %0(s8), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_cmp_i16
+# CHECK-LABEL: name: test_cmp_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+# CHECK: %0(s16) = COPY %edi
+# CHECK-NEXT: %1(s16) = COPY %esi
+# CHECK-NEXT: %2(s1) = G_ICMP intpred(ult), %0(s16), %1
+# CHECK-NEXT: %3(s32) = G_ZEXT %2(s1)
+# CHECK-NEXT: %eax = COPY %3(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s1) = G_ICMP intpred(ult), %0(s16), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_cmp_i32
+# CHECK-LABEL: name: test_cmp_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+# CHECK: %0(s32) = COPY %edi
+# CHECK-NEXT: %1(s32) = COPY %esi
+# CHECK-NEXT: %2(s1) = G_ICMP intpred(ult), %0(s32), %1
+# CHECK-NEXT: %3(s32) = G_ZEXT %2(s1)
+# CHECK-NEXT: %eax = COPY %3(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(ult), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_cmp_i64
+# CHECK-LABEL: name: test_cmp_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+# CHECK: %0(s64) = COPY %rdi
+# CHECK-NEXT: %1(s64) = COPY %rsi
+# CHECK-NEXT: %2(s1) = G_ICMP intpred(ult), %0(s64), %1
+# CHECK-NEXT: %3(s32) = G_ZEXT %2(s1)
+# CHECK-NEXT: %eax = COPY %3(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s1) = G_ICMP intpred(ult), %0(s64), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_cmp_p0
+# CHECK-LABEL: name: test_cmp_p0
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+# CHECK: %0(p0) = COPY %rdi
+# CHECK-NEXT: %1(p0) = COPY %rsi
+# CHECK-NEXT: %2(s1) = G_ICMP intpred(ult), %0(p0), %1
+# CHECK-NEXT: %3(s32) = G_ZEXT %2(s1)
+# CHECK-NEXT: %eax = COPY %3(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(p0) = COPY %rdi
+ %1(p0) = COPY %rsi
+ %2(s1) = G_ICMP intpred(ult), %0(p0), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir b/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
index 25af600f2299..6f051f1b6ea5 100644
--- a/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
+++ b/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
@@ -1,6 +1,12 @@
# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
--- |
+ define i64 @test_sext_i1(i8 %a) {
+ %val = trunc i8 %a to i1
+ %r = sext i1 %val to i64
+ ret i64 %r
+ }
+
define i64 @test_sext_i8(i8 %val) {
%r = sext i8 %val to i64
ret i64 %r
@@ -16,6 +22,12 @@
ret i64 %r
}
+ define i64 @test_zext_i1(i8 %a) {
+ %val = trunc i8 %a to i1
+ %r = zext i1 %val to i64
+ ret i64 %r
+ }
+
define i64 @test_zext_i8(i8 %val) {
%r = zext i8 %val to i64
ret i64 %r
@@ -33,6 +45,32 @@
...
---
+name: test_sext_i1
+# CHECK-LABEL: name: test_sext_i1
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: %0(s8) = COPY %edi
+# CHECK-NEXT: %1(s1) = G_TRUNC %0(s8)
+# CHECK-NEXT: %2(s64) = G_SEXT %1(s1)
+# CHECK-NEXT: %rax = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s8) = COPY %edi
+ %1(s1) = G_TRUNC %0(s8)
+ %2(s64) = G_SEXT %1(s1)
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
+---
name: test_sext_i8
# CHECK-LABEL: name: test_sext_i8
alignment: 4
@@ -102,6 +140,32 @@ body: |
...
---
+name: test_zext_i1
+# CHECK-LABEL: name: test_zext_i1
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: %0(s8) = COPY %edi
+# CHECK-NEXT: %1(s1) = G_TRUNC %0(s8)
+# CHECK-NEXT: %2(s64) = G_ZEXT %1(s1)
+# CHECK-NEXT: %rax = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s8) = COPY %edi
+ %1(s1) = G_TRUNC %0(s8)
+ %2(s64) = G_ZEXT %1(s1)
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
+---
name: test_zext_i8
# CHECK-LABEL: name: test_zext_i8
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/legalize-ext.mir b/test/CodeGen/X86/GlobalISel/legalize-ext.mir
index 46457e0fff59..c9add0dc4e95 100644
--- a/test/CodeGen/X86/GlobalISel/legalize-ext.mir
+++ b/test/CodeGen/X86/GlobalISel/legalize-ext.mir
@@ -1,6 +1,12 @@
# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
--- |
+ define i32 @test_zext_i1(i8 %a) {
+ %val = trunc i8 %a to i1
+ %r = zext i1 %val to i32
+ ret i32 %r
+ }
+
define i32 @test_zext_i8(i8 %val) {
%r = zext i8 %val to i32
ret i32 %r
@@ -11,6 +17,12 @@
ret i32 %r
}
+ define i32 @test_sext_i1(i8 %a) {
+ %val = trunc i8 %a to i1
+ %r = sext i1 %val to i32
+ ret i32 %r
+ }
+
define i32 @test_sext_i8(i8 %val) {
%r = sext i8 %val to i32
ret i32 %r
@@ -23,6 +35,32 @@
...
---
+name: test_zext_i1
+# ALL-LABEL: name: test_zext_i1
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(s8) = COPY %edi
+# ALL-NEXT: %1(s1) = G_TRUNC %0(s8)
+# ALL-NEXT: %2(s32) = G_ZEXT %1(s1)
+# ALL-NEXT: %eax = COPY %2(s32)
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s8) = COPY %edi
+ %1(s1) = G_TRUNC %0(s8)
+ %2(s32) = G_ZEXT %1(s1)
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
name: test_zext_i8
# ALL-LABEL: name: test_zext_i8
alignment: 4
@@ -69,6 +107,32 @@ body: |
...
---
+name: test_sext_i1
+# ALL-LABEL: name: test_sext_i1
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(s8) = COPY %edi
+# ALL-NEXT: %1(s1) = G_TRUNC %0(s8)
+# ALL-NEXT: %2(s32) = G_SEXT %1(s1)
+# ALL-NEXT: %eax = COPY %2(s32)
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s8) = COPY %edi
+ %1(s1) = G_TRUNC %0(s8)
+ %2(s32) = G_SEXT %1(s1)
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
name: test_sext_i8
# ALL-LABEL: name: test_sext_i8
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/memop-x32.ll b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
index 49a7fd79f8b2..49a7fd79f8b2 100644
--- a/test/CodeGen/X86/GlobalISel/memop-x32.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
diff --git a/test/CodeGen/X86/GlobalISel/memop.ll b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index a7407c0e6b75..3e45a9c9a49d 100644
--- a/test/CodeGen/X86/GlobalISel/memop.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -1,13 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_FAST
-; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_GREEDY
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_FAST --check-prefix=AVX_FAST
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_GREEDY --check-prefix=AVX_GREEDY
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_FAST --check-prefix=AVX512F_FAST
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_GREEDY --check-prefix=AVX512F_GREEDY
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_FAST --check-prefix=AVX512VL_FAST
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_GREEDY --check-prefix=AVX512VL_GREEDY
-
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST
+; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY
define i8 @test_load_i8(i8 * %p1) {
; ALL-LABEL: test_load_i8:
@@ -77,34 +70,6 @@ define double @test_load_double(double * %p1) {
ret double %r
}
-define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) {
-; SSE-LABEL: test_load_v4i32_noalign:
-; SSE: # BB#0:
-; SSE-NEXT: movups (%rdi), %xmm0
-; SSE-NEXT: retq
-;
-; ALL_AVX-LABEL: test_load_v4i32_noalign:
-; ALL_AVX: # BB#0:
-; ALL_AVX-NEXT: vmovups (%rdi), %xmm0
-; ALL_AVX-NEXT: retq
- %r = load <4 x i32>, <4 x i32>* %p1, align 1
- ret <4 x i32> %r
-}
-
-define <4 x i32> @test_load_v4i32_align(<4 x i32> * %p1) {
-; SSE-LABEL: test_load_v4i32_align:
-; SSE: # BB#0:
-; SSE-NEXT: movaps (%rdi), %xmm0
-; SSE-NEXT: retq
-;
-; ALL_AVX-LABEL: test_load_v4i32_align:
-; ALL_AVX: # BB#0:
-; ALL_AVX-NEXT: vmovaps (%rdi), %xmm0
-; ALL_AVX-NEXT: retq
- %r = load <4 x i32>, <4 x i32>* %p1, align 16
- ret <4 x i32> %r
-}
-
define i32 * @test_store_i32(i32 %val, i32 * %p1) {
; ALL-LABEL: test_store_i32:
; ALL: # BB#0:
@@ -139,19 +104,6 @@ define float * @test_store_float(float %val, float * %p1) {
; SSE_GREEDY-NEXT: movss %xmm0, (%rdi)
; SSE_GREEDY-NEXT: movq %rdi, %rax
; SSE_GREEDY-NEXT: retq
-;
-; ALL_AVX_FAST-LABEL: test_store_float:
-; ALL_AVX_FAST: # BB#0:
-; ALL_AVX_FAST-NEXT: vmovd %xmm0, %eax
-; ALL_AVX_FAST-NEXT: movl %eax, (%rdi)
-; ALL_AVX_FAST-NEXT: movq %rdi, %rax
-; ALL_AVX_FAST-NEXT: retq
-;
-; ALL_AVX_GREEDY-LABEL: test_store_float:
-; ALL_AVX_GREEDY: # BB#0:
-; ALL_AVX_GREEDY-NEXT: vmovss %xmm0, (%rdi)
-; ALL_AVX_GREEDY-NEXT: movq %rdi, %rax
-; ALL_AVX_GREEDY-NEXT: retq
store float %val, float* %p1
ret float * %p1;
}
@@ -171,18 +123,6 @@ define double * @test_store_double(double %val, double * %p1) {
; SSE_GREEDY-NEXT: movq %rdi, %rax
; SSE_GREEDY-NEXT: retq
;
-; ALL_AVX_FAST-LABEL: test_store_double:
-; ALL_AVX_FAST: # BB#0:
-; ALL_AVX_FAST-NEXT: vmovq %xmm0, %rax
-; ALL_AVX_FAST-NEXT: movq %rax, (%rdi)
-; ALL_AVX_FAST-NEXT: movq %rdi, %rax
-; ALL_AVX_FAST-NEXT: retq
-;
-; ALL_AVX_GREEDY-LABEL: test_store_double:
-; ALL_AVX_GREEDY: # BB#0:
-; ALL_AVX_GREEDY-NEXT: vmovsd %xmm0, (%rdi)
-; ALL_AVX_GREEDY-NEXT: movq %rdi, %rax
-; ALL_AVX_GREEDY-NEXT: retq
store double %val, double* %p1
ret double * %p1;
}
diff --git a/test/CodeGen/X86/GlobalISel/memop-vec.ll b/test/CodeGen/X86/GlobalISel/memop-vec.ll
new file mode 100644
index 000000000000..e218fded4d5f
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/memop-vec.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX
+
+define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) {
+; ALL-LABEL: test_load_v4i32_noalign:
+; ALL: # BB#0:
+; ALL-NEXT: vmovups (%rdi), %xmm0
+; ALL-NEXT: retq
+ %r = load <4 x i32>, <4 x i32>* %p1, align 1
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @test_load_v4i32_align(<4 x i32> * %p1) {
+; ALL-LABEL: test_load_v4i32_align:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps (%rdi), %xmm0
+; ALL-NEXT: retq
+ %r = load <4 x i32>, <4 x i32>* %p1, align 16
+ ret <4 x i32> %r
+}
+
+define void @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
+; ALL-LABEL: test_store_v4i32_noalign:
+; ALL: # BB#0:
+; ALL-NEXT: vmovups %xmm0, (%rdi)
+; ALL-NEXT: retq
+ store <4 x i32> %val, <4 x i32>* %p1, align 1
+ ret void
+}
+
+define void @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
+; ALL-LABEL: test_store_v4i32_align:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps %xmm0, (%rdi)
+; ALL-NEXT: retq
+ store <4 x i32> %val, <4 x i32>* %p1, align 16
+ ret void
+}
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index 3a65a9003773..1ea922ee475a 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -2,11 +2,6 @@
# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
--- |
- ; ModuleID = 'tmp.ll'
- source_filename = "tmp.ll"
- target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
- target triple = "x86_64--linux-gnu"
-
define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
%ret = add i8 %arg1, %arg2
ret i8 %ret
@@ -120,6 +115,26 @@
ret void
}
+ define i1 @test_icmp_eq_i8(i8 %a, i8 %b) {
+ %r = icmp eq i8 %a, %b
+ ret i1 %r
+ }
+
+ define i1 @test_icmp_eq_i16(i16 %a, i16 %b) {
+ %r = icmp eq i16 %a, %b
+ ret i1 %r
+ }
+
+ define i1 @test_icmp_eq_i32(i32 %a, i32 %b) {
+ %r = icmp eq i32 %a, %b
+ ret i1 %r
+ }
+
+ define i1 @test_icmp_eq_i64(i64 %a, i64 %b) {
+ %r = icmp eq i64 %a, %b
+ ret i1 %r
+ }
+
...
---
name: test_add_i8
@@ -735,3 +750,103 @@ body: |
RET 0
...
+---
+name: test_icmp_eq_i8
+# CHECK-LABEL: name: test_icmp_eq_i8
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s1) = G_ICMP intpred(eq), %0(s8), %1
+ %al = COPY %2(s1)
+ RET 0, implicit %al
+
+...
+---
+name: test_icmp_eq_i16
+# CHECK-LABEL: name: test_icmp_eq_i16
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s1) = G_ICMP intpred(eq), %0(s16), %1
+ %al = COPY %2(s1)
+ RET 0, implicit %al
+
+...
+---
+name: test_icmp_eq_i32
+# CHECK-LABEL: name: test_icmp_eq_i32
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(eq), %0(s32), %1
+ %al = COPY %2(s1)
+ RET 0, implicit %al
+
+...
+---
+name: test_icmp_eq_i64
+# CHECK-LABEL: name: test_icmp_eq_i64
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s1) = G_ICMP intpred(eq), %0(s64), %1
+ %al = COPY %2(s1)
+ RET 0, implicit %al
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-br.mir b/test/CodeGen/X86/GlobalISel/select-br.mir
new file mode 100644
index 000000000000..6d8cd2b1367d
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-br.mir
@@ -0,0 +1,39 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32
+
+--- |
+ define void @uncondbr() {
+ entry:
+ br label %bb2
+
+ end: ; preds = %bb2
+ ret void
+
+ bb2: ; preds = %entry
+ br label %end
+ }
+
+...
+---
+name: uncondbr
+# CHECK-LABEL: name: uncondbr
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: JMP_1 %bb.2.bb2
+# CHECK: JMP_1 %bb.1.end
+body: |
+ bb.1.entry:
+ successors: %bb.3.bb2(0x80000000)
+
+ G_BR %bb.3.bb2
+
+ bb.2.end:
+ RET 0
+
+ bb.3.bb2:
+ successors: %bb.2.end(0x80000000)
+
+ G_BR %bb.2.end
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-cmp.mir b/test/CodeGen/X86/GlobalISel/select-cmp.mir
new file mode 100644
index 000000000000..1d3da6cb88b9
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-cmp.mir
@@ -0,0 +1,563 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK
+
+--- |
+ define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
+ %r = icmp eq i8 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_eq_i16(i16 %a, i16 %b) {
+ %r = icmp eq i16 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_eq_i64(i64 %a, i64 %b) {
+ %r = icmp eq i64 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_eq_i32(i32 %a, i32 %b) {
+ %r = icmp eq i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_ne_i32(i32 %a, i32 %b) {
+ %r = icmp ne i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_ugt_i32(i32 %a, i32 %b) {
+ %r = icmp ugt i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_uge_i32(i32 %a, i32 %b) {
+ %r = icmp uge i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_ult_i32(i32 %a, i32 %b) {
+ %r = icmp ult i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_ule_i32(i32 %a, i32 %b) {
+ %r = icmp ule i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_sgt_i32(i32 %a, i32 %b) {
+ %r = icmp sgt i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_sge_i32(i32 %a, i32 %b) {
+ %r = icmp sge i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_slt_i32(i32 %a, i32 %b) {
+ %r = icmp slt i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+ define i32 @test_icmp_sle_i32(i32 %a, i32 %b) {
+ %r = icmp sle i32 %a, %b
+ %res = zext i1 %r to i32
+ ret i32 %res
+ }
+
+...
+---
+name: test_icmp_eq_i8
+# CHECK-LABEL: name: test_icmp_eq_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr8 }
+# CHECK-NEXT: - { id: 1, class: gr8 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %dil
+# CHECK-NEXT: %1 = COPY %sil
+# CHECK-NEXT: CMP8rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s1) = G_ICMP intpred(eq), %0(s8), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_eq_i16
+# CHECK-LABEL: name: test_icmp_eq_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr16 }
+# CHECK-NEXT: - { id: 1, class: gr16 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %di
+# CHECK-NEXT: %1 = COPY %si
+# CHECK-NEXT: CMP16rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s1) = G_ICMP intpred(eq), %0(s16), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_eq_i64
+# CHECK-LABEL: name: test_icmp_eq_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64 }
+# CHECK-NEXT: - { id: 1, class: gr64 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %rdi
+# CHECK-NEXT: %1 = COPY %rsi
+# CHECK-NEXT: CMP64rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s1) = G_ICMP intpred(eq), %0(s64), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_eq_i32
+# CHECK-LABEL: name: test_icmp_eq_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(eq), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_ne_i32
+# CHECK-LABEL: name: test_icmp_ne_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETNEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(ne), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_ugt_i32
+# CHECK-LABEL: name: test_icmp_ugt_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETAr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(ugt), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_uge_i32
+# CHECK-LABEL: name: test_icmp_uge_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETAEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(uge), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_ult_i32
+# CHECK-LABEL: name: test_icmp_ult_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETBr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(ult), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_ule_i32
+# CHECK-LABEL: name: test_icmp_ule_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETBEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(ule), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_sgt_i32
+# CHECK-LABEL: name: test_icmp_sgt_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETGr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(sgt), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_sge_i32
+# CHECK-LABEL: name: test_icmp_sge_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETGEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(sge), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_slt_i32
+# CHECK-LABEL: name: test_icmp_slt_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETLr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(slt), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_icmp_sle_i32
+# CHECK-LABEL: name: test_icmp_sle_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK-NEXT: - { id: 2, class: gr8 }
+# CHECK-NEXT: - { id: 3, class: gr32 }
+# CHECK-NEXT: - { id: 4, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %esi
+# CHECK-NEXT: CMP32rr %0, %1, implicit-def %eflags
+# CHECK-NEXT: %2 = SETLEr implicit %eflags
+# CHECK-NEXT: %4 = SUBREG_TO_REG 0, %2, 1
+# CHECK-NEXT: %3 = AND32ri8 %4, 1, implicit-def %eflags
+# CHECK-NEXT: %eax = COPY %3
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s1) = G_ICMP intpred(sle), %0(s32), %1
+ %3(s32) = G_ZEXT %2(s1)
+ %eax = COPY %3(s32)
+ RET 0, implicit %eax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir b/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
index 85b3f61a9e44..0844701487bc 100644
--- a/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
+++ b/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
@@ -1,6 +1,12 @@
# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
--- |
+ define i64 @test_zext_i1(i8 %a) {
+ %val = trunc i8 %a to i1
+ %r = zext i1 %val to i64
+ ret i64 %r
+ }
+
define i64 @test_sext_i8(i8 %val) {
%r = sext i8 %val to i64
ret i64 %r
@@ -13,6 +19,38 @@
...
---
+name: test_zext_i1
+# ALL-LABEL: name: test_zext_i1
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8 }
+# ALL-NEXT: - { id: 1, class: gr8 }
+# ALL-NEXT: - { id: 2, class: gr64 }
+# ALL-NEXT: - { id: 3, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = COPY %0
+# ALL-NEXT: %3 = SUBREG_TO_REG 0, %1, 1
+# ALL-NEXT: %2 = AND64ri8 %3, 1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s8) = COPY %edi
+ %1(s1) = G_TRUNC %0(s8)
+ %2(s64) = G_ZEXT %1(s1)
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
+---
name: test_sext_i8
# ALL-LABEL: name: test_sext_i8
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/select-ext.mir b/test/CodeGen/X86/GlobalISel/select-ext.mir
index 63aeae89bd1a..831d6efb75f1 100644
--- a/test/CodeGen/X86/GlobalISel/select-ext.mir
+++ b/test/CodeGen/X86/GlobalISel/select-ext.mir
@@ -2,6 +2,11 @@
# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
--- |
+ define i32 @test_zext_i1(i1 %a) {
+ %r = zext i1 %a to i32
+ ret i32 %r
+ }
+
define i32 @test_zext_i8(i8 %val) {
%r = zext i8 %val to i32
ret i32 %r
@@ -24,6 +29,34 @@
...
---
+name: test_zext_i1
+# ALL-LABEL: name: test_zext_i1
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8 }
+# ALL-NEXT: - { id: 1, class: gr32 }
+# ALL-NEXT: - { id: 2, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %2 = SUBREG_TO_REG 0, %0, 1
+# ALL-NEXT: %1 = AND32ri8 %2, 1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %1
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi
+
+ %0(s1) = COPY %edi
+ %1(s32) = G_ZEXT %0(s1)
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
name: test_zext_i8
# ALL-LABEL: name: test_zext_i8
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/select-memop-x32.mir b/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
index 8e6a2771db6e..8e6a2771db6e 100644
--- a/test/CodeGen/X86/GlobalISel/select-memop-x32.mir
+++ b/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
diff --git a/test/CodeGen/X86/GlobalISel/select-memop.mir b/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
index 817dc3cc9764..b57c9b0cca98 100644
--- a/test/CodeGen/X86/GlobalISel/select-memop.mir
+++ b/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
@@ -34,7 +34,6 @@
ret float %r
}
-
define double @test_load_double(double* %p1) {
%r = load double, double* %p1
ret double %r
@@ -45,16 +44,6 @@
ret double %r
}
- define <4 x i32> @test_load_v4i32_noalign(<4 x i32>* %p1) {
- %r = load <4 x i32>, <4 x i32>* %p1, align 1
- ret <4 x i32> %r
- }
-
- define <4 x i32> @test_load_v4i32_align(<4 x i32>* %p1) {
- %r = load <4 x i32>, <4 x i32>* %p1, align 16
- ret <4 x i32> %r
- }
-
define i32* @test_store_i32(i32 %val, i32* %p1) {
store i32 %val, i32* %p1
ret i32* %p1
@@ -85,16 +74,6 @@
ret double* %p1
}
- define <4 x i32>* @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
- store <4 x i32> %val, <4 x i32>* %p1, align 16
- ret <4 x i32>* %p1
- }
-
- define <4 x i32>* @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
- store <4 x i32> %val, <4 x i32>* %p1, align 1
- ret <4 x i32>* %p1
- }
-
define i32* @test_load_ptr(i32** %ptr1) {
%p = load i32*, i32** %ptr1
ret i32* %p
@@ -304,62 +283,6 @@ body: |
...
---
-# ALL-LABEL: name: test_load_v4i32_noalign
-name: test_load_v4i32_noalign
-alignment: 4
-legalized: true
-regBankSelected: true
-registers:
-# ALL: - { id: 0, class: gr64 }
-# NO_AVX512F: - { id: 1, class: vr128 }
-# AVX512ALL: - { id: 1, class: vr128x }
- - { id: 0, class: gpr }
- - { id: 1, class: vecr }
-# ALL: %0 = COPY %rdi
-# SSE: %1 = MOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
-# AVX: %1 = VMOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
-# AVX512F: %1 = VMOVUPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
-# AVX512VL: %1 = VMOVUPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
-# ALL: %xmm0 = COPY %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %rdi
-
- %0(p0) = COPY %rdi
- %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
-
-...
----
-# ALL-LABEL: name: test_load_v4i32_align
-name: test_load_v4i32_align
-alignment: 4
-legalized: true
-regBankSelected: true
-registers:
-# ALL: - { id: 0, class: gr64 }
-# NO_AVX512F: - { id: 1, class: vr128 }
-# AVX512ALL: - { id: 1, class: vr128x }
- - { id: 0, class: gpr }
- - { id: 1, class: vecr }
-# ALL: %0 = COPY %rdi
-# SSE: %1 = MOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
-# AVX: %1 = VMOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
-# AVX512F: %1 = VMOVAPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1)
-# AVX512VL: %1 = VMOVAPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
-# ALL: %xmm0 = COPY %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %rdi
-
- %0(p0) = COPY %rdi
- %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
-
-...
----
# ALL-LABEL: name: test_store_i32
name: test_store_i32
alignment: 4
@@ -530,66 +453,6 @@ body: |
...
---
-# ALL-LABEL: name: test_store_v4i32_align
-name: test_store_v4i32_align
-alignment: 4
-legalized: true
-regBankSelected: true
-registers:
-# NO_AVX512F: - { id: 0, class: vr128 }
-# AVX512ALL: - { id: 0, class: vr128x }
-# ALL: - { id: 1, class: gr64 }
- - { id: 0, class: vecr }
- - { id: 1, class: gpr }
-# ALL: %0 = COPY %xmm0
-# ALL: %1 = COPY %rdi
-# SSE: MOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
-# AVX: VMOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
-# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
-# AVX512VL: VMOVAPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
-# ALL: %rax = COPY %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
-
- %0(<4 x s32>) = COPY %xmm0
- %1(p0) = COPY %rdi
- G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 16)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
-
-...
----
-# ALL-LABEL: name: test_store_v4i32_noalign
-name: test_store_v4i32_noalign
-alignment: 4
-legalized: true
-regBankSelected: true
-registers:
-# NO_AVX512F: - { id: 0, class: vr128 }
-# AVX512ALL: - { id: 0, class: vr128x }
-# ALL: - { id: 1, class: gr64 }
- - { id: 0, class: vecr }
- - { id: 1, class: gpr }
-# ALL: %0 = COPY %xmm0
-# ALL: %1 = COPY %rdi
-# SSE: MOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
-# AVX: VMOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512VL: VMOVUPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
-# ALL: %rax = COPY %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
-
- %0(<4 x s32>) = COPY %xmm0
- %1(p0) = COPY %rdi
- G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
-
-...
----
# ALL-LABEL: name: test_load_ptr
name: test_load_ptr
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/select-memop-v128.mir b/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
new file mode 100644
index 000000000000..ce3f6b91dcf6
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
@@ -0,0 +1,143 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+
+--- |
+ define <4 x i32> @test_load_v4i32_noalign(<4 x i32>* %p1) {
+ %r = load <4 x i32>, <4 x i32>* %p1, align 1
+ ret <4 x i32> %r
+ }
+
+ define <4 x i32> @test_load_v4i32_align(<4 x i32>* %p1) {
+ %r = load <4 x i32>, <4 x i32>* %p1, align 16
+ ret <4 x i32> %r
+ }
+
+ define <4 x i32>* @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
+ store <4 x i32> %val, <4 x i32>* %p1, align 16
+ ret <4 x i32>* %p1
+ }
+
+ define <4 x i32>* @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
+ store <4 x i32> %val, <4 x i32>* %p1, align 1
+ ret <4 x i32>* %p1
+ }
+
+...
+---
+# ALL-LABEL: name: test_load_v4i32_noalign
+name: test_load_v4i32_noalign
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# NO_AVX512F: - { id: 1, class: vr128 }
+# AVX512ALL: - { id: 1, class: vr128x }
+ - { id: 0, class: gpr }
+ - { id: 1, class: vecr }
+# ALL: %0 = COPY %rdi
+# SSE: %1 = MOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# AVX: %1 = VMOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# AVX512F: %1 = VMOVUPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# AVX512VL: %1 = VMOVUPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
+ %xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_load_v4i32_align
+name: test_load_v4i32_align
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# NO_AVX512F: - { id: 1, class: vr128 }
+# AVX512ALL: - { id: 1, class: vr128x }
+ - { id: 0, class: gpr }
+ - { id: 1, class: vecr }
+# ALL: %0 = COPY %rdi
+# SSE: %1 = MOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# AVX: %1 = VMOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# AVX512F: %1 = VMOVAPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# AVX512VL: %1 = VMOVAPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1)
+ %xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_store_v4i32_align
+name: test_store_v4i32_align
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# NO_AVX512F: - { id: 0, class: vr128 }
+# AVX512ALL: - { id: 0, class: vr128x }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# SSE: MOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# AVX: VMOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# AVX512VL: VMOVAPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 16)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_v4i32_noalign
+name: test_store_v4i32_noalign
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# NO_AVX512F: - { id: 0, class: vr128 }
+# AVX512ALL: - { id: 0, class: vr128x }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# SSE: MOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# AVX: VMOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512VL: VMOVUPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/O0-pipeline.ll b/test/CodeGen/X86/O0-pipeline.ll
new file mode 100644
index 000000000000..262cb96ca6d8
--- /dev/null
+++ b/test/CodeGen/X86/O0-pipeline.ll
@@ -0,0 +1,67 @@
+; RUN: llc -mtriple=x86_64-- -O0 -debug-pass=Structure < %s -o /dev/null 2>&1 | FileCheck %s
+
+; REQUIRES: asserts
+
+; CHECK-LABEL: Pass Arguments:
+; CHECK-NEXT: Target Library Information
+; CHECK-NEXT: Target Transform Information
+; CHECK-NEXT: Target Pass Configuration
+; CHECK-NEXT: Type-Based Alias Analysis
+; CHECK-NEXT: Scoped NoAlias Alias Analysis
+; CHECK-NEXT: Assumption Cache Tracker
+; CHECK-NEXT: Create Garbage Collector Module Metadata
+; CHECK-NEXT: Machine Module Information
+; CHECK-NEXT: Machine Branch Probability Analysis
+; CHECK-NEXT: ModulePass Manager
+; CHECK-NEXT: Pre-ISel Intrinsic Lowering
+; CHECK-NEXT: FunctionPass Manager
+; CHECK-NEXT: Expand Atomic instructions
+; CHECK-NEXT: Dominator Tree Construction
+; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT: Module Verifier
+; CHECK-NEXT: Lower Garbage Collection Instructions
+; CHECK-NEXT: Shadow Stack GC Lowering
+; CHECK-NEXT: Remove unreachable blocks from the CFG
+; CHECK-NEXT: Inserts calls to mcount-like functions
+; CHECK-NEXT: Scalarize Masked Memory Intrinsics
+; CHECK-NEXT: Expand reduction intrinsics
+; CHECK-NEXT: Rewrite Symbols
+; CHECK-NEXT: FunctionPass Manager
+; CHECK-NEXT: Dominator Tree Construction
+; CHECK-NEXT: Exception handling preparation
+; CHECK-NEXT: Safe Stack instrumentation pass
+; CHECK-NEXT: Insert stack protectors
+; CHECK-NEXT: Module Verifier
+; CHECK-NEXT: X86 DAG->DAG Instruction Selection
+; CHECK-NEXT: X86 PIC Global Base Reg Initialization
+; CHECK-NEXT: Expand ISel Pseudo-instructions
+; CHECK-NEXT: Local Stack Slot Allocation
+; CHECK-NEXT: X86 WinAlloca Expander
+; CHECK-NEXT: Eliminate PHI nodes for register allocation
+; CHECK-NEXT: Two-Address instruction pass
+; CHECK-NEXT: Fast Register Allocator
+; CHECK-NEXT: Bundle Machine CFG Edges
+; CHECK-NEXT: X86 FP Stackifier
+; CHECK-NEXT: Prologue/Epilogue Insertion & Frame Finalization
+; CHECK-NEXT: Post-RA pseudo instruction expansion pass
+; CHECK-NEXT: X86 pseudo instruction expansion pass
+; CHECK-NEXT: Analyze Machine Code For Garbage Collection
+; CHECK-NEXT: X86 vzeroupper inserter
+; CHECK-NEXT: Contiguously Lay Out Funclets
+; CHECK-NEXT: StackMap Liveness Analysis
+; CHECK-NEXT: Live DEBUG_VALUE analysis
+; CHECK-NEXT: Insert fentry calls
+; CHECK-NEXT: MachineDominator Tree Construction
+; CHECK-NEXT: Machine Natural Loop Construction
+; CHECK-NEXT: Insert XRay ops
+; CHECK-NEXT: Implement the 'patchable-function' attribute
+; CHECK-NEXT: Lazy Machine Block Frequency Analysis
+; CHECK-NEXT: Machine Optimization Remark Emitter
+; CHECK-NEXT: MachineDominator Tree Construction
+; CHECK-NEXT: Machine Natural Loop Construction
+; CHECK-NEXT: X86 Assembly Printer
+; CHECK-NEXT: Free MachineFunction
+
+define void @f() {
+ ret void
+}
diff --git a/test/CodeGen/X86/all-ones-vector.ll b/test/CodeGen/X86/all-ones-vector.ll
index 35f488ea448c..d0160a5b84df 100644
--- a/test/CodeGen/X86/all-ones-vector.ll
+++ b/test/CodeGen/X86/all-ones-vector.ll
@@ -157,8 +157,8 @@ define <32 x i8> @allones_v32i8() nounwind {
;
; X32-AVX1-LABEL: allones_v32i8:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v32i8:
@@ -174,8 +174,8 @@ define <32 x i8> @allones_v32i8() nounwind {
;
; X64-AVX1-LABEL: allones_v32i8:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v32i8:
@@ -194,8 +194,8 @@ define <16 x i16> @allones_v16i16() nounwind {
;
; X32-AVX1-LABEL: allones_v16i16:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v16i16:
@@ -211,8 +211,8 @@ define <16 x i16> @allones_v16i16() nounwind {
;
; X64-AVX1-LABEL: allones_v16i16:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v16i16:
@@ -231,8 +231,8 @@ define <8 x i32> @allones_v8i32() nounwind {
;
; X32-AVX1-LABEL: allones_v8i32:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v8i32:
@@ -248,8 +248,8 @@ define <8 x i32> @allones_v8i32() nounwind {
;
; X64-AVX1-LABEL: allones_v8i32:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v8i32:
@@ -268,8 +268,8 @@ define <4 x i64> @allones_v4i64() nounwind {
;
; X32-AVX1-LABEL: allones_v4i64:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v4i64:
@@ -285,8 +285,8 @@ define <4 x i64> @allones_v4i64() nounwind {
;
; X64-AVX1-LABEL: allones_v4i64:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v4i64:
@@ -305,8 +305,8 @@ define <4 x double> @allones_v4f64() nounwind {
;
; X32-AVX1-LABEL: allones_v4f64:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v4f64:
@@ -322,8 +322,8 @@ define <4 x double> @allones_v4f64() nounwind {
;
; X64-AVX1-LABEL: allones_v4f64:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v4f64:
@@ -342,8 +342,8 @@ define <4 x double> @allones_v4f64_optsize() nounwind optsize {
;
; X32-AVX1-LABEL: allones_v4f64_optsize:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v4f64_optsize:
@@ -359,8 +359,8 @@ define <4 x double> @allones_v4f64_optsize() nounwind optsize {
;
; X64-AVX1-LABEL: allones_v4f64_optsize:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v4f64_optsize:
@@ -379,8 +379,8 @@ define <8 x float> @allones_v8f32() nounwind {
;
; X32-AVX1-LABEL: allones_v8f32:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v8f32:
@@ -396,8 +396,8 @@ define <8 x float> @allones_v8f32() nounwind {
;
; X64-AVX1-LABEL: allones_v8f32:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v8f32:
@@ -416,8 +416,8 @@ define <8 x float> @allones_v8f32_optsize() nounwind optsize {
;
; X32-AVX1-LABEL: allones_v8f32_optsize:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v8f32_optsize:
@@ -433,8 +433,8 @@ define <8 x float> @allones_v8f32_optsize() nounwind optsize {
;
; X64-AVX1-LABEL: allones_v8f32_optsize:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v8f32_optsize:
@@ -455,8 +455,8 @@ define <64 x i8> @allones_v64i8() nounwind {
;
; X32-AVX1-LABEL: allones_v64i8:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
@@ -487,8 +487,8 @@ define <64 x i8> @allones_v64i8() nounwind {
;
; X64-AVX1-LABEL: allones_v64i8:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
@@ -522,8 +522,8 @@ define <32 x i16> @allones_v32i16() nounwind {
;
; X32-AVX1-LABEL: allones_v32i16:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
@@ -554,8 +554,8 @@ define <32 x i16> @allones_v32i16() nounwind {
;
; X64-AVX1-LABEL: allones_v32i16:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
@@ -589,8 +589,8 @@ define <16 x i32> @allones_v16i32() nounwind {
;
; X32-AVX1-LABEL: allones_v16i32:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
@@ -615,8 +615,8 @@ define <16 x i32> @allones_v16i32() nounwind {
;
; X64-AVX1-LABEL: allones_v16i32:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
@@ -644,8 +644,8 @@ define <8 x i64> @allones_v8i64() nounwind {
;
; X32-AVX1-LABEL: allones_v8i64:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
@@ -670,8 +670,8 @@ define <8 x i64> @allones_v8i64() nounwind {
;
; X64-AVX1-LABEL: allones_v8i64:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
@@ -699,8 +699,8 @@ define <8 x double> @allones_v8f64() nounwind {
;
; X32-AVX1-LABEL: allones_v8f64:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
@@ -725,8 +725,8 @@ define <8 x double> @allones_v8f64() nounwind {
;
; X64-AVX1-LABEL: allones_v8f64:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
@@ -754,8 +754,8 @@ define <16 x float> @allones_v16f32() nounwind {
;
; X32-AVX1-LABEL: allones_v16f32:
; X32-AVX1: # BB#0:
-; X32-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
@@ -780,8 +780,8 @@ define <16 x float> @allones_v16f32() nounwind {
;
; X64-AVX1-LABEL: allones_v16f32:
; X64-AVX1: # BB#0:
-; X64-AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll
index 2aaf14001758..aa28ef5175ed 100644
--- a/test/CodeGen/X86/avg.ll
+++ b/test/CodeGen/X86/avg.ll
@@ -135,88 +135,87 @@ define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) {
define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-LABEL: avg_v32i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm8
-; SSE2-NEXT: movdqa 16(%rdi), %xmm11
+; SSE2-NEXT: movdqa (%rdi), %xmm3
+; SSE2-NEXT: movdqa 16(%rdi), %xmm8
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm8, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm10, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm8, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm15, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm11, %xmm9
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm6, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm5, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm12, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm7, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm10, %xmm7
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm11, %xmm1
-; SSE2-NEXT: paddd %xmm9, %xmm13
-; SSE2-NEXT: paddd %xmm15, %xmm2
-; SSE2-NEXT: paddd %xmm14, %xmm5
-; SSE2-NEXT: paddd %xmm8, %xmm0
-; SSE2-NEXT: paddd %xmm12, %xmm6
-; SSE2-NEXT: paddd %xmm10, %xmm3
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm8, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm4, %xmm7
-; SSE2-NEXT: paddd %xmm4, %xmm3
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm5
+; SSE2-NEXT: paddd %xmm4, %xmm9
; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: paddd %xmm4, %xmm13
+; SSE2-NEXT: paddd %xmm4, %xmm5
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: paddd %xmm4, %xmm7
; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: psrld $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm4, %xmm7
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: packuswb %xmm7, %xmm3
-; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm6, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm9
; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm13
-; SSE2-NEXT: pand %xmm4, %xmm13
+; SSE2-NEXT: packuswb %xmm9, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm7
; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm13, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: packuswb %xmm7, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
@@ -259,198 +258,183 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-LABEL: avg_v64i8:
; SSE2: # BB#0:
-; SSE2-NEXT: subq $152, %rsp
-; SSE2-NEXT: .Lcfi0:
-; SSE2-NEXT: .cfi_def_cfa_offset 160
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm4
-; SSE2-NEXT: movdqa 32(%rdi), %xmm5
-; SSE2-NEXT: movdqa 48(%rdi), %xmm6
+; SSE2-NEXT: movdqa (%rdi), %xmm6
+; SSE2-NEXT: movdqa 16(%rdi), %xmm2
+; SSE2-NEXT: movdqa 32(%rdi), %xmm1
+; SSE2-NEXT: movdqa 48(%rdi), %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsi), %xmm5
+; SSE2-NEXT: movdqa 16(%rsi), %xmm13
+; SSE2-NEXT: movdqa 32(%rsi), %xmm11
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm5, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm15, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm5, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm10, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm4, %xmm10
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm5, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm12, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm6, %xmm5
; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm6, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm8, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa (%rsi), %xmm14
-; SSE2-NEXT: movdqa %xmm14, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm13, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm14, %xmm12
+; SSE2-NEXT: movdqa %xmm7, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm14, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
-; SSE2-NEXT: movdqa 16(%rsi), %xmm12
-; SSE2-NEXT: movdqa %xmm12, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm15, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm13, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm8, %xmm15
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm2, %xmm13
+; SSE2-NEXT: movdqa %xmm11, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm5, %xmm9
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm12, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm5, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm7, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm2, %xmm14
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm1, %xmm11
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa 48(%rsi), %xmm7
+; SSE2-NEXT: movdqa %xmm7, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm1, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm7, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm1, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa 48(%rsi), %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: paddd %xmm8, %xmm4
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload
-; SSE2-NEXT: paddd (%rsp), %xmm11 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm12 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm10 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm14 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm15 # 16-byte Folded Reload
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm2, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm0, %xmm10
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: paddd %xmm0, %xmm12
+; SSE2-NEXT: paddd %xmm0, %xmm4
; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm9
-; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: paddd %xmm0, %xmm9
; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd %xmm0, %xmm10
-; SSE2-NEXT: paddd %xmm0, %xmm12
+; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm11
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: psrld $1, %xmm7
-; SSE2-NEXT: psrld $1, %xmm15
+; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: paddd %xmm0, %xmm7
+; SSE2-NEXT: psrld $1, %xmm10
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm10
+; SSE2-NEXT: packuswb %xmm1, %xmm10
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm10, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: psrld $1, %xmm12
+; SSE2-NEXT: pand %xmm0, %xmm12
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: packuswb %xmm12, %xmm4
+; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: psrld $1, %xmm15
; SSE2-NEXT: pand %xmm0, %xmm15
-; SSE2-NEXT: pand %xmm0, %xmm7
-; SSE2-NEXT: packuswb %xmm15, %xmm7
-; SSE2-NEXT: psrld $1, %xmm14
+; SSE2-NEXT: pand %xmm0, %xmm13
+; SSE2-NEXT: packuswb %xmm15, %xmm13
+; SSE2-NEXT: packuswb %xmm4, %xmm13
+; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: psrld $1, %xmm9
; SSE2-NEXT: pand %xmm0, %xmm9
-; SSE2-NEXT: pand %xmm0, %xmm14
-; SSE2-NEXT: packuswb %xmm9, %xmm14
-; SSE2-NEXT: packuswb %xmm7, %xmm14
-; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: psrld $1, %xmm13
-; SSE2-NEXT: pand %xmm0, %xmm13
; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: packuswb %xmm13, %xmm6
-; SSE2-NEXT: psrld $1, %xmm12
-; SSE2-NEXT: psrld $1, %xmm10
-; SSE2-NEXT: pand %xmm0, %xmm10
-; SSE2-NEXT: pand %xmm0, %xmm12
-; SSE2-NEXT: packuswb %xmm10, %xmm12
-; SSE2-NEXT: packuswb %xmm6, %xmm12
-; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: packuswb %xmm9, %xmm6
; SSE2-NEXT: psrld $1, %xmm11
+; SSE2-NEXT: psrld $1, %xmm14
+; SSE2-NEXT: pand %xmm0, %xmm14
; SSE2-NEXT: pand %xmm0, %xmm11
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: packuswb %xmm11, %xmm5
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm6, %xmm2
-; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: movdqa %xmm8, %xmm5
-; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm4
-; SSE2-NEXT: packuswb %xmm5, %xmm4
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: packuswb %xmm14, %xmm11
+; SSE2-NEXT: packuswb %xmm6, %xmm11
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm8
+; SSE2-NEXT: pand %xmm0, %xmm8
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: packuswb %xmm8, %xmm3
+; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm5, %xmm1
-; SSE2-NEXT: packuswb %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: packuswb %xmm5, %xmm7
+; SSE2-NEXT: packuswb %xmm3, %xmm7
+; SSE2-NEXT: movdqu %xmm7, (%rax)
+; SSE2-NEXT: movdqu %xmm11, (%rax)
+; SSE2-NEXT: movdqu %xmm13, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm2, (%rax)
-; SSE2-NEXT: movdqu %xmm12, (%rax)
-; SSE2-NEXT: movdqu %xmm14, (%rax)
-; SSE2-NEXT: addq $152, %rsp
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v64i8:
@@ -464,21 +448,21 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm15, %ymm7, %ymm7
-; AVX2-NEXT: vpaddd %ymm14, %ymm6, %ymm6
-; AVX2-NEXT: vpaddd %ymm13, %ymm5, %ymm5
-; AVX2-NEXT: vpaddd %ymm12, %ymm4, %ymm4
-; AVX2-NEXT: vpaddd %ymm11, %ymm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm10, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm9, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm1
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm3, %ymm3
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm5
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm6
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm7
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8
; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm9
; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm10
@@ -540,13 +524,13 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpaddd %zmm7, %zmm3, %zmm3
-; AVX512F-NEXT: vpaddd %zmm6, %zmm2, %zmm2
-; AVX512F-NEXT: vpaddd %zmm5, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4
; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0
; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1
@@ -673,27 +657,27 @@ define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) {
define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) {
; SSE2-LABEL: avg_v16i16:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm4
-; SSE2-NEXT: movdqa 16(%rdi), %xmm5
+; SSE2-NEXT: movdqa (%rdi), %xmm2
+; SSE2-NEXT: movdqa 16(%rdi), %xmm4
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: movdqa %xmm4, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE2-NEXT: paddd %xmm6, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
; SSE2-NEXT: paddd %xmm7, %xmm2
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: paddd %xmm4, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm4, %xmm3
; SSE2-NEXT: paddd %xmm4, %xmm0
@@ -755,80 +739,79 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) {
define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-LABEL: avg_v32i16:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm10
-; SSE2-NEXT: movdqa 16(%rdi), %xmm9
-; SSE2-NEXT: movdqa 32(%rdi), %xmm11
+; SSE2-NEXT: movdqa (%rdi), %xmm4
+; SSE2-NEXT: movdqa 16(%rdi), %xmm11
+; SSE2-NEXT: movdqa 32(%rdi), %xmm10
; SSE2-NEXT: movdqa 48(%rdi), %xmm8
-; SSE2-NEXT: movdqa (%rsi), %xmm14
+; SSE2-NEXT: movdqa (%rsi), %xmm9
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
; SSE2-NEXT: movdqa 48(%rsi), %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm10, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm9, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm10, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm8, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: movdqa %xmm9, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm4, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm5, %xmm6
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm11, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm13, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: paddd %xmm8, %xmm3
-; SSE2-NEXT: paddd %xmm13, %xmm4
-; SSE2-NEXT: paddd %xmm11, %xmm2
-; SSE2-NEXT: paddd %xmm15, %xmm5
-; SSE2-NEXT: paddd %xmm9, %xmm1
-; SSE2-NEXT: paddd %xmm12, %xmm6
-; SSE2-NEXT: paddd %xmm10, %xmm14
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm14
+; SSE2-NEXT: paddd %xmm0, %xmm9
; SSE2-NEXT: paddd %xmm0, %xmm6
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: paddd %xmm0, %xmm5
; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm4
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: psrld $1, %xmm14
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm9
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: pslld $16, %xmm7
; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $16, %xmm14
-; SSE2-NEXT: psrad $16, %xmm14
-; SSE2-NEXT: packssdw %xmm7, %xmm14
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pslld $16, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: packssdw %xmm7, %xmm9
; SSE2-NEXT: pslld $16, %xmm6
; SSE2-NEXT: psrad $16, %xmm6
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: packssdw %xmm6, %xmm1
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pslld $16, %xmm5
; SSE2-NEXT: psrad $16, %xmm5
; SSE2-NEXT: pslld $16, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: pslld $16, %xmm4
; SSE2-NEXT: psrad $16, %xmm4
; SSE2-NEXT: pslld $16, %xmm3
@@ -837,7 +820,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm14, (%rax)
+; SSE2-NEXT: movdqu %xmm9, (%rax)
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v32i16:
@@ -847,13 +830,13 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
@@ -884,9 +867,9 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
@@ -1047,88 +1030,87 @@ define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) {
define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-LABEL: avg_v32i8_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm8
-; SSE2-NEXT: movdqa 16(%rdi), %xmm11
+; SSE2-NEXT: movdqa (%rdi), %xmm3
+; SSE2-NEXT: movdqa 16(%rdi), %xmm8
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm8, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm10, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm8, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm15, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm11, %xmm9
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm6, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm5, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm12, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm7, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm10, %xmm7
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm11, %xmm1
-; SSE2-NEXT: paddd %xmm9, %xmm13
-; SSE2-NEXT: paddd %xmm15, %xmm2
-; SSE2-NEXT: paddd %xmm14, %xmm5
-; SSE2-NEXT: paddd %xmm8, %xmm0
-; SSE2-NEXT: paddd %xmm12, %xmm6
-; SSE2-NEXT: paddd %xmm10, %xmm3
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm8, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm4, %xmm7
-; SSE2-NEXT: paddd %xmm4, %xmm3
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm5
+; SSE2-NEXT: paddd %xmm4, %xmm9
; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: paddd %xmm4, %xmm13
+; SSE2-NEXT: paddd %xmm4, %xmm5
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: paddd %xmm4, %xmm7
; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: psrld $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm4, %xmm7
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: packuswb %xmm7, %xmm3
-; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm6, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm9
; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm13
-; SSE2-NEXT: pand %xmm4, %xmm13
+; SSE2-NEXT: packuswb %xmm9, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm7
; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm13, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: packuswb %xmm7, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
@@ -1512,27 +1494,27 @@ define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) {
define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) {
; SSE2-LABEL: avg_v16i16_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm4
-; SSE2-NEXT: movdqa 16(%rdi), %xmm5
+; SSE2-NEXT: movdqa (%rdi), %xmm2
+; SSE2-NEXT: movdqa 16(%rdi), %xmm4
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: movdqa %xmm4, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE2-NEXT: paddd %xmm6, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
; SSE2-NEXT: paddd %xmm7, %xmm2
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE2-NEXT: paddd %xmm4, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm4, %xmm3
; SSE2-NEXT: paddd %xmm4, %xmm0
@@ -1594,80 +1576,79 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) {
define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-LABEL: avg_v32i16_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm10
-; SSE2-NEXT: movdqa 16(%rdi), %xmm9
-; SSE2-NEXT: movdqa 32(%rdi), %xmm11
+; SSE2-NEXT: movdqa (%rdi), %xmm4
+; SSE2-NEXT: movdqa 16(%rdi), %xmm11
+; SSE2-NEXT: movdqa 32(%rdi), %xmm10
; SSE2-NEXT: movdqa 48(%rdi), %xmm8
-; SSE2-NEXT: movdqa (%rsi), %xmm14
+; SSE2-NEXT: movdqa (%rsi), %xmm9
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
; SSE2-NEXT: movdqa 48(%rsi), %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm10, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm9, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm10, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm8, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: movdqa %xmm9, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm4, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm5, %xmm6
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm11, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm13, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: paddd %xmm8, %xmm3
-; SSE2-NEXT: paddd %xmm13, %xmm4
-; SSE2-NEXT: paddd %xmm11, %xmm2
-; SSE2-NEXT: paddd %xmm15, %xmm5
-; SSE2-NEXT: paddd %xmm9, %xmm1
-; SSE2-NEXT: paddd %xmm12, %xmm6
-; SSE2-NEXT: paddd %xmm10, %xmm14
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm14
+; SSE2-NEXT: paddd %xmm0, %xmm9
; SSE2-NEXT: paddd %xmm0, %xmm6
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: paddd %xmm0, %xmm5
; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm4
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: psrld $1, %xmm14
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm9
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: pslld $16, %xmm7
; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $16, %xmm14
-; SSE2-NEXT: psrad $16, %xmm14
-; SSE2-NEXT: packssdw %xmm7, %xmm14
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pslld $16, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: packssdw %xmm7, %xmm9
; SSE2-NEXT: pslld $16, %xmm6
; SSE2-NEXT: psrad $16, %xmm6
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: packssdw %xmm6, %xmm1
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pslld $16, %xmm5
; SSE2-NEXT: psrad $16, %xmm5
; SSE2-NEXT: pslld $16, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: pslld $16, %xmm4
; SSE2-NEXT: psrad $16, %xmm4
; SSE2-NEXT: pslld $16, %xmm3
@@ -1676,7 +1657,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm14, (%rax)
+; SSE2-NEXT: movdqu %xmm9, (%rax)
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v32i16_2:
@@ -1686,13 +1667,13 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
@@ -1723,9 +1704,9 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
diff --git a/test/CodeGen/X86/avx-basic.ll b/test/CodeGen/X86/avx-basic.ll
index e6cc95fcdb23..6869d088e7cd 100644
--- a/test/CodeGen/X86/avx-basic.ll
+++ b/test/CodeGen/X86/avx-basic.ll
@@ -34,8 +34,8 @@ define void @zero256() nounwind ssp {
define void @ones([0 x float]* nocapture %RET, [0 x float]* nocapture %aFOO) nounwind {
; CHECK-LABEL: ones:
; CHECK: ## BB#0: ## %allocas
-; CHECK-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; CHECK-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; CHECK-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: vmovaps %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -51,8 +51,8 @@ float>* %ptr2vec615, align 32
define void @ones2([0 x i32]* nocapture %RET, [0 x i32]* nocapture %aFOO) nounwind {
; CHECK-LABEL: ones2:
; CHECK: ## BB#0: ## %allocas
-; CHECK-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; CHECK-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; CHECK-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: vmovaps %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx-cvt-3.ll b/test/CodeGen/X86/avx-cvt-3.ll
index 066719b3bfe8..231334ddcb85 100644
--- a/test/CodeGen/X86/avx-cvt-3.ll
+++ b/test/CodeGen/X86/avx-cvt-3.ll
@@ -48,16 +48,16 @@ define <8 x float> @sitofp_shuffle_zero_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_insert_allbits_v8i32:
; X86: # BB#0:
-; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: sitofp_insert_allbits_v8i32:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
; X64-NEXT: retq
@@ -72,16 +72,16 @@ define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_shuffle_allbits_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_shuffle_allbits_v8i32:
; X86: # BB#0:
-; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: sitofp_shuffle_allbits_v8i32:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
; X64-NEXT: retq
@@ -95,8 +95,7 @@ define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
; X86: # BB#0:
; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-NEXT: movl $2, %eax
@@ -111,8 +110,7 @@ define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
; X64: # BB#0:
; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-NEXT: movl $2, %eax
diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
index 1d925ff8e9bd..3cadbe2a8db3 100644
--- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
@@ -99,16 +99,16 @@ define <8 x float> @test_mm256_and_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define <4 x double> @test_mm256_andnot_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_andnot_pd:
; X32: # BB#0:
-; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; X32-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; X32-NEXT: vxorps %ymm2, %ymm2, %ymm2
+; X32-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2
; X32-NEXT: vxorps %ymm2, %ymm0, %ymm0
; X32-NEXT: vandps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_andnot_pd:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; X64-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; X64-NEXT: vxorps %ymm2, %ymm2, %ymm2
+; X64-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2
; X64-NEXT: vxorps %ymm2, %ymm0, %ymm0
; X64-NEXT: vandps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
@@ -2244,11 +2244,11 @@ define <4 x double> @test_mm256_set_pd(double %a0, double %a1, double %a2, doubl
; X32: # BB#0:
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
-; X32-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
-; X32-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_pd:
@@ -2269,19 +2269,19 @@ define <8 x float> @test_mm256_set_ps(float %a0, float %a1, float %a2, float %a3
; X32: # BB#0:
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
-; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
-; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
-; X32-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_ps:
@@ -2881,10 +2881,10 @@ define <4 x double> @test_mm256_setr_pd(double %a0, double %a1, double %a2, doub
; X32: # BB#0:
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
-; X32-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm2[0]
+; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
@@ -2908,16 +2908,16 @@ define <8 x float> @test_mm256_setr_ps(float %a0, float %a1, float %a2, float %a
; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
; X32-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[0],xmm6[0],xmm7[2,3]
-; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3]
-; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; X32-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; X32-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0],xmm3[3]
+; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
diff --git a/test/CodeGen/X86/avx-schedule.ll b/test/CodeGen/X86/avx-schedule.ll
index 052cacfea4dc..bb05481e313d 100644
--- a/test/CodeGen/X86/avx-schedule.ll
+++ b/test/CodeGen/X86/avx-schedule.ll
@@ -2837,4 +2837,54 @@ define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
ret <8 x float> %8
}
+define void @test_zeroall() {
+; SANDY-LABEL: test_zeroall:
+; SANDY: # BB#0:
+; SANDY-NEXT: vzeroall # sched: [?:0.000000e+00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: test_zeroall:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vzeroall # sched: [1:0.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_zeroall:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vzeroall # sched: [?:0.000000e+00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_zeroall:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vzeroall # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ call void @llvm.x86.avx.vzeroall()
+ ret void
+}
+declare void @llvm.x86.avx.vzeroall() nounwind
+
+define void @test_zeroupper() {
+; SANDY-LABEL: test_zeroupper:
+; SANDY: # BB#0:
+; SANDY-NEXT: vzeroupper # sched: [?:0.000000e+00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: test_zeroupper:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_zeroupper:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vzeroupper # sched: [?:0.000000e+00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_zeroupper:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ call void @llvm.x86.avx.vzeroupper()
+ ret void
+}
+declare void @llvm.x86.avx.vzeroupper() nounwind
+
!0 = !{i32 1}
diff --git a/test/CodeGen/X86/avx.ll b/test/CodeGen/X86/avx.ll
index 341dd867e4ff..647b7a8f4dfc 100644
--- a/test/CodeGen/X86/avx.ll
+++ b/test/CodeGen/X86/avx.ll
@@ -113,11 +113,11 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; CHECK-NOT: mov
; CHECK: insertps $48
; CHECK: insertps $48
+; CHECK: vaddps
; CHECK: insertps $48
; CHECK: insertps $48
; CHECK: vaddps
; CHECK: vaddps
-; CHECK: vaddps
; CHECK-NEXT: ret
%1 = getelementptr inbounds float, float* %fb, i64 %index
%2 = load float, float* %1, align 4
diff --git a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
index 63b0281a7339..e29cf09718ad 100644
--- a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
+++ b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
@@ -13,10 +13,10 @@ define zeroext i16 @cmp_kor_seq_16(<16 x float> %a, <16 x float> %b, <16 x float
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vcmpgeps %zmm4, %zmm0, %k0
; CHECK-NEXT: vcmpgeps %zmm4, %zmm1, %k1
-; CHECK-NEXT: vcmpgeps %zmm4, %zmm2, %k2
-; CHECK-NEXT: vcmpgeps %zmm4, %zmm3, %k3
; CHECK-NEXT: korw %k1, %k0, %k0
-; CHECK-NEXT: korw %k3, %k2, %k1
+; CHECK-NEXT: vcmpgeps %zmm4, %zmm2, %k1
+; CHECK-NEXT: vcmpgeps %zmm4, %zmm3, %k2
+; CHECK-NEXT: korw %k2, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 4890afec2164..c03623a2f035 100644
--- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -852,16 +852,16 @@ define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %b
; CHECK-NEXT: kxorw %k0, %k0, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1}
+; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm2
; CHECK-NEXT: movw $1, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vmovaps %zmm1, %zmm4
-; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm4 {%k1}
+; CHECK-NEXT: vmovaps %zmm1, %zmm3
+; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1}
; CHECK-NEXT: movw $220, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
-; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0
-; CHECK-NEXT: vaddps %zmm4, %zmm1, %zmm1
-; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vaddps %zmm3, %zmm1, %zmm0
+; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 -1, i32 4)
%res1 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 0, i32 4)
diff --git a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index 0e7a8d25c56f..56962ca2671d 100644
--- a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -9,8 +9,8 @@ define <16 x float> @test_x86_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x float>
; CHECK-NEXT: vbroadcastss %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
+; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -30,8 +30,8 @@ define <8 x double> @test_x86_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x double
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vaddpd %zmm1, %zmm2, %zmm1
+; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -51,8 +51,8 @@ define <16 x i32>@test_int_x86_avx512_pbroadcastd_512(<4 x i32> %x0, <16 x i32>
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpbroadcastd %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %zmm1, %zmm2, %zmm1
+; CHECK-NEXT: vpbroadcastd %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32> %x0, <16 x i32> %x1, i16 -1)
@@ -71,8 +71,8 @@ define <8 x i64>@test_int_x86_avx512_pbroadcastq_512(<2 x i64> %x0, <8 x i64> %x
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpbroadcastq %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm1, %zmm2, %zmm1
+; CHECK-NEXT: vpbroadcastq %xmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64> %x0, <8 x i64> %x1,i8 -1)
@@ -91,8 +91,8 @@ define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16
; CHECK-NEXT: vmovsldup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2)
@@ -111,8 +111,8 @@ define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16
; CHECK-NEXT: vmovshdup {{.*#+}} zmm2 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2)
@@ -131,8 +131,8 @@ define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x
; CHECK-NEXT: vmovddup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
-; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double> %x0, <8 x double> %x1, i8 %x2)
@@ -671,9 +671,9 @@ define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i6
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
@@ -1616,9 +1616,9 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x
; CHECK-NEXT: vshufpd {{.*#+}} zmm3 = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
+; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
-; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 %x4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 -1)
@@ -2031,8 +2031,8 @@ define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> %x2, i8 %x3)
@@ -2051,8 +2051,8 @@ define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1,
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpsrld $4, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vpsrld $4, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> %x2, i16 %x3)
@@ -2651,8 +2651,8 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15]
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
-; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
+; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3>, <16 x float> %x2, i16 %x3)
@@ -2989,9 +2989,9 @@ define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 %x4)
%res1 = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 -1)
@@ -3010,9 +3010,9 @@ define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 %x4)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 -1)
@@ -3030,9 +3030,9 @@ define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: vaddpd %zmm2, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 %x4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 -1)
@@ -3050,9 +3050,9 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i6
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 %x4)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 -1)
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index cc5e9e038e0b..f800d01064ba 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -274,11 +274,11 @@ define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
; CHECK-NEXT: vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm1
-; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res0 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 1)
@@ -301,11 +301,11 @@ define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
; CHECK-NEXT: vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm1
-; CHECK-NEXT: vaddpd %xmm0, %xmm4, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res0 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 1)
@@ -477,11 +477,11 @@ declare i64 @llvm.x86.avx512.cvttss2usi64(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2usi64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2usi %xmm0, %rcx
-; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %rax
-; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %rdx
+; CHECK-NEXT: vcvtsd2usi %xmm0, %rax
+; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT: addq %rax, %rcx
+; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
-; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 4)
@@ -496,11 +496,11 @@ declare i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtsd2si64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2si64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2si %xmm0, %rcx
-; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %rax
-; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %rdx
+; CHECK-NEXT: vcvtsd2si %xmm0, %rax
+; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT: addq %rax, %rcx
+; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
-; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 4)
@@ -515,11 +515,11 @@ declare i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtss2usi64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2usi64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2usi %xmm0, %rcx
-; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %rax
-; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %rdx
+; CHECK-NEXT: vcvtss2usi %xmm0, %rax
+; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT: addq %rax, %rcx
+; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
-; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 4)
@@ -534,11 +534,11 @@ declare i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2si64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2si %xmm0, %rcx
-; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %rax
-; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %rdx
+; CHECK-NEXT: vcvtss2si %xmm0, %rax
+; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT: addq %rax, %rcx
+; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
-; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 4)
@@ -553,11 +553,11 @@ declare i64 @llvm.x86.avx512.vcvtss2si64(<4 x float>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtsd2usi32(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2usi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2usi %xmm0, %ecx
-; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %eax
-; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %edx
+; CHECK-NEXT: vcvtsd2usi %xmm0, %eax
+; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %ecx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 4)
@@ -572,11 +572,11 @@ declare i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtsd2si32(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2si32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2si %xmm0, %ecx
-; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %eax
-; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %edx
+; CHECK-NEXT: vcvtsd2si %xmm0, %eax
+; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %ecx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 4)
@@ -591,11 +591,11 @@ declare i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtss2usi32(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2usi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2usi %xmm0, %ecx
-; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %eax
-; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %edx
+; CHECK-NEXT: vcvtss2usi %xmm0, %eax
+; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %ecx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 4)
@@ -610,11 +610,11 @@ declare i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtss2si32(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2si32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2si %xmm0, %ecx
-; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %eax
-; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %edx
+; CHECK-NEXT: vcvtss2si %xmm0, %eax
+; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %ecx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 4)
@@ -683,8 +683,9 @@ define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1
; CHECK-NEXT: vcvtps2ph $2, %zmm0, (%rsi)
-; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm0
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 -1)
%res2 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 %mask)
@@ -3656,11 +3657,11 @@ define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float>
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm1
-; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res0 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
@@ -3684,10 +3685,10 @@ define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x dou
; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4
; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm2
; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res0 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
@@ -3903,11 +3904,11 @@ define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x d
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
-; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm3
+; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4
; CHECK-NEXT: vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
-; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> zeroinitializer, i8 %x3, i32 4)
@@ -3928,11 +3929,11 @@ define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x flo
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3 {%k1} {z}
-; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
+; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm1
-; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 %x3, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> zeroinitializer, i8 %x3, i32 4)
@@ -4434,8 +4435,8 @@ define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, <
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprold $3, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vprold $3, %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vprold $3, %zmm0, %zmm0
; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vprold $3, %zmm0, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
@@ -4454,8 +4455,8 @@ define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprolq $3, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vprolq $3, %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vprolq $3, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vprolq $3, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
@@ -4556,9 +4557,9 @@ define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <
; CHECK-NEXT: vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
+; CHECK-NEXT: vaddpd %zmm4, %zmm3, %zmm3
; CHECK-NEXT: vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vaddpd %zmm4, %zmm3, %zmm1
-; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vaddpd %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 4, i8 %x4, i32 4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> zeroinitializer, <8 x double> %x1, <8 x i64> %x2, i32 5, i8 %x4, i32 4)
@@ -4579,9 +4580,9 @@ define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0,
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vmovapd %zmm0, %zmm5
; CHECK-NEXT: vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
+; CHECK-NEXT: vaddpd %zmm5, %zmm3, %zmm3
; CHECK-NEXT: vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vaddpd %zmm5, %zmm3, %zmm1
-; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vaddpd %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 %x4, i32 4)
%res1 = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> zeroinitializer, i32 5, i8 %x4, i32 4)
@@ -4603,9 +4604,9 @@ define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x fl
; CHECK-NEXT: vxorps %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vmovaps %xmm0, %xmm5
; CHECK-NEXT: vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1}
+; CHECK-NEXT: vaddps %xmm5, %xmm3, %xmm3
; CHECK-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddps %xmm5, %xmm3, %xmm1
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 4)
@@ -4650,9 +4651,9 @@ define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vmovaps %zmm0, %zmm5
; CHECK-NEXT: vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
+; CHECK-NEXT: vaddps %zmm5, %zmm3, %zmm3
; CHECK-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vaddps %zmm5, %zmm3, %zmm1
-; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vaddps %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4)
%res1 = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 4)
@@ -4721,9 +4722,9 @@ define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x
; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vmovapd %xmm0, %xmm5
; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z}
+; CHECK-NEXT: vaddpd %xmm5, %xmm3, %xmm3
; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vaddpd %xmm5, %xmm3, %xmm1
-; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
@@ -4821,12 +4822,12 @@ define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x do
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovapd %xmm0, %xmm4
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4
-; CHECK-NEXT: vmovapd %xmm0, %xmm5
-; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm5 {%k1}
+; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovapd %xmm0, %xmm4
+; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm1
-; CHECK-NEXT: vaddpd %xmm5, %xmm0, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -4849,12 +4850,12 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x floa
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovaps %xmm0, %xmm4
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm4
-; CHECK-NEXT: vmovaps %xmm0, %xmm5
-; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm5 {%k1}
+; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovaps %xmm0, %xmm4
+; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm1
-; CHECK-NEXT: vaddps %xmm5, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
@@ -4909,12 +4910,12 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x d
; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm4
; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovapd %xmm2, %xmm5
-; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
+; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
+; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
-; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -4937,12 +4938,12 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x flo
; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm4
; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovaps %xmm2, %xmm5
-; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
+; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovaps %xmm2, %xmm4
+; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm5, %xmm2, %xmm1
-; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
@@ -5069,12 +5070,12 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x d
; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm4
; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovapd %xmm2, %xmm5
-; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
+; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
+; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
-; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -5097,12 +5098,12 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x flo
; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm4
; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovaps %xmm2, %xmm5
-; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
+; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovaps %xmm2, %xmm4
+; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm5, %xmm2, %xmm1
-; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
@@ -5125,12 +5126,12 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x
; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm4
; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovapd %xmm2, %xmm5
-; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
+; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
+; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
-; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -5153,12 +5154,12 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl
; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm4
; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovaps %xmm2, %xmm5
-; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
+; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vmovaps %xmm2, %xmm4
+; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm5, %xmm2, %xmm1
-; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
diff --git a/test/CodeGen/X86/avx512-mask-spills.ll b/test/CodeGen/X86/avx512-mask-spills.ll
index 4ef88ac495c3..96aefdb10584 100644
--- a/test/CodeGen/X86/avx512-mask-spills.ll
+++ b/test/CodeGen/X86/avx512-mask-spills.ll
@@ -9,13 +9,11 @@ define <4 x i1> @test_4i1(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: Lcfi0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
+; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
-; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
-; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: vpmovm2d %k0, %xmm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -34,14 +32,12 @@ define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) {
; CHECK-NEXT: Lcfi1:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
+; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
-; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
-; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -60,14 +56,12 @@ define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) {
; CHECK-NEXT: Lcfi2:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
-; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
+; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
-; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
-; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -85,14 +79,12 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
; CHECK-NEXT: Lcfi3:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleuw %zmm1, %zmm0, %k0
+; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
+; CHECK-NEXT: kord %k1, %k0, %k0
; CHECK-NEXT: kmovd %k0, {{[0-9]+}}(%rsp) ## 4-byte Spill
-; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovd %k0, (%rsp) ## 4-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovd {{[0-9]+}}(%rsp), %k0 ## 4-byte Reload
-; CHECK-NEXT: kmovd (%rsp), %k1 ## 4-byte Reload
-; CHECK-NEXT: kord %k1, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %ymm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -106,20 +98,18 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
define <64 x i1> @test_64i1(<64 x i8> %a, <64 x i8> %b) {
; CHECK-LABEL: test_64i1:
; CHECK: ## BB#0:
-; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: pushq %rax
; CHECK-NEXT: Lcfi4:
-; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill
-; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill
+; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
+; CHECK-NEXT: korq %k1, %k0, %k0
+; CHECK-NEXT: kmovq %k0, (%rsp) ## 8-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
-; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k0 ## 8-byte Reload
-; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k1 ## 8-byte Reload
-; CHECK-NEXT: korq %k1, %k0, %k0
+; CHECK-NEXT: kmovq (%rsp), %k0 ## 8-byte Reload
; CHECK-NEXT: vpmovm2b %k0, %zmm0
-; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
%cmp_res = icmp ugt <64 x i8> %a, %b
diff --git a/test/CodeGen/X86/avx512-scalar_mask.ll b/test/CodeGen/X86/avx512-scalar_mask.ll
new file mode 100644
index 000000000000..47c6813fa8dc
--- /dev/null
+++ b/test/CodeGen/X86/avx512-scalar_mask.ll
@@ -0,0 +1,107 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32)
+declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32)
+
+define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
+; CHECK-LABEL: test_var_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 %mask, i32 4)
+ ret < 4 x float> %res
+}
+
+define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
+; CHECK-LABEL: test_var_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 %mask, i32 4)
+ ret < 4 x float> %res
+}
+
+; FIXME: we should just return %xmm0 here.
+define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4)
+ ret < 4 x float> %res
+}
+
+; FIXME: we should zero the lower element of xmm0 and return it.
+define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4)
+ ret < 4 x float> %res
+}
+
+; FIXME: we should just return %xmm0 here.
+define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4)
+ ret < 4 x float> %res
+}
+
+; FIXME: we should zero the lower element of xmm0 and return it.
+define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4)
+ ret < 4 x float> %res
+}
+
+define <4 x float>@test_const_allone_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const_allone_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 -1, i32 4)
+ ret < 4 x float> %res
+}
+
+define <4 x float>@test_const_allone_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const_allone_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 -1, i32 4)
+ ret < 4 x float> %res
+}
+
+define <4 x float>@test_const_3_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const_3_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 3, i32 4)
+ ret < 4 x float> %res
+}
+
+define <4 x float>@test_const_3_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_const_3_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 3, i32 4)
+ ret < 4 x float> %res
+}
diff --git a/test/CodeGen/X86/avx512-vselect.ll b/test/CodeGen/X86/avx512-vselect.ll
new file mode 100644
index 000000000000..1940864824ff
--- /dev/null
+++ b/test/CodeGen/X86/avx512-vselect.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mcpu=skx | FileCheck %s --check-prefixes=CHECK,CHECK-SKX
+; RUN: llc < %s -mcpu=knl | FileCheck %s --check-prefixes=CHECK,CHECK-KNL
+
+target triple = "x86_64-unknown-unknown"
+
+define <8 x i64> @test1(<8 x i64> %m, <8 x i64> %a, <8 x i64> %b) {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: vpsllq $63, %zmm0, %zmm0
+; CHECK-NEXT: vptestmq %zmm0, %zmm0, %k1
+; CHECK-NEXT: vpblendmq %zmm1, %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: retq
+entry:
+ %m.trunc = trunc <8 x i64> %m to <8 x i1>
+ %ret = select <8 x i1> %m.trunc, <8 x i64> %a, <8 x i64> %b
+ ret <8 x i64> %ret
+}
+
+; This is a very contrived test case to trick the legalizer into splitting the
+; v16i1 masks in the select during type legalization, and in so doing extend them
+; into two v8i64 types. This lets us ensure that the lowering code can handle
+; both formulations of vselect. All of this trickery is because we can't
+; directly form an SDAG input to the lowering.
+define <16 x double> @test2(<16 x float> %x, <16 x float> %y, <16 x double> %a, <16 x double> %b) {
+; CHECK-SKX-LABEL: test2:
+; CHECK-SKX: # BB#0: # %entry
+; CHECK-SKX-NEXT: vxorps %zmm6, %zmm6, %zmm6
+; CHECK-SKX-NEXT: vcmpltps %zmm0, %zmm6, %k0
+; CHECK-SKX-NEXT: vcmpltps %zmm6, %zmm1, %k1
+; CHECK-SKX-NEXT: korw %k1, %k0, %k0
+; CHECK-SKX-NEXT: kshiftrw $8, %k0, %k1
+; CHECK-SKX-NEXT: vpmovm2q %k1, %zmm1
+; CHECK-SKX-NEXT: vpmovm2q %k0, %zmm0
+; CHECK-SKX-NEXT: vptestmq %zmm0, %zmm0, %k1
+; CHECK-SKX-NEXT: vblendmpd %zmm2, %zmm4, %zmm0 {%k1}
+; CHECK-SKX-NEXT: vptestmq %zmm1, %zmm1, %k1
+; CHECK-SKX-NEXT: vblendmpd %zmm3, %zmm5, %zmm1 {%k1}
+; CHECK-SKX-NEXT: retq
+;
+; CHECK-KNL-LABEL: test2:
+; CHECK-KNL: # BB#0: # %entry
+; CHECK-KNL-NEXT: vpxord %zmm6, %zmm6, %zmm6
+; CHECK-KNL-NEXT: vcmpltps %zmm0, %zmm6, %k0
+; CHECK-KNL-NEXT: vcmpltps %zmm6, %zmm1, %k1
+; CHECK-KNL-NEXT: korw %k1, %k0, %k1
+; CHECK-KNL-NEXT: kshiftrw $8, %k1, %k2
+; CHECK-KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; CHECK-KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; CHECK-KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
+; CHECK-KNL-NEXT: vblendmpd %zmm2, %zmm4, %zmm0 {%k1}
+; CHECK-KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
+; CHECK-KNL-NEXT: vblendmpd %zmm3, %zmm5, %zmm1 {%k1}
+; CHECK-KNL-NEXT: retq
+entry:
+ %gt.m = fcmp ogt <16 x float> %x, zeroinitializer
+ %lt.m = fcmp olt <16 x float> %y, zeroinitializer
+ %m.or = or <16 x i1> %gt.m, %lt.m
+ %ret = select <16 x i1> %m.or, <16 x double> %a, <16 x double> %b
+ ret <16 x double> %ret
+}
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index 9b4e73a18fc2..faa055dfbbf3 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -796,9 +796,9 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16>
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1
-; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_w_512:
@@ -806,9 +806,9 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16>
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1
-; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
+; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1)
@@ -826,8 +826,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1,
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm2
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
@@ -836,8 +836,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1,
; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm2
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1}
-; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1
+; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3)
diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll
index 3337f42eb142..13b850ccc3b6 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -2159,9 +2159,9 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8>
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z}
+; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1
-; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_dbpsadbw_512:
@@ -2169,9 +2169,9 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8>
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z}
+; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm0
-; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1
-; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
+; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> %x3, i32 %x4)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> zeroinitializer, i32 %x4)
@@ -2411,9 +2411,9 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z}
+; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
-; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1
-; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_permvar_hi_512:
@@ -2421,9 +2421,9 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z}
+; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm0
-; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1
-; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
+; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
index 7df07b0413ed..571f345d4616 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
@@ -9,8 +9,8 @@ define <32 x i8>@test_int_x86_avx512_pbroadcastb_256(<16 x i8> %x0, <32 x i8> %x
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x78,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x78,0xc8]
-; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x78,0xc0]
; CHECK-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc9]
+; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x78,0xc0]
; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.pbroadcastb.256(<16 x i8> %x0, <32 x i8> %x1, i32 -1)
@@ -29,8 +29,8 @@ define <16 x i8>@test_int_x86_avx512_pbroadcastb_128(<16 x i8> %x0, <16 x i8> %x
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x78,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x78,0xc8]
-; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x78,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc9]
+; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x78,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.pbroadcastb.128(<16 x i8> %x0, <16 x i8> %x1, i16 -1)
@@ -49,8 +49,8 @@ define <16 x i16>@test_int_x86_avx512_pbroadcastw_256(<8 x i16> %x0, <16 x i16>
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x79,0xc8]
-; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x79,0xc0]
; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc9]
+; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x79,0xc0]
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.pbroadcastw.256(<8 x i16> %x0, <16 x i16> %x1, i16 -1)
@@ -69,8 +69,8 @@ define <8 x i16>@test_int_x86_avx512_pbroadcastw_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x79,0xc8]
-; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x79,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc9]
+; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x79,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.pbroadcastw.128(<8 x i16> %x0, <8 x i16> %x1, i8 -1)
@@ -89,8 +89,8 @@ define <64 x i8>@test_int_x86_avx512_pbroadcastb_512(<16 x i8> %x0, <64 x i8> %x
; CHECK-NEXT: vpbroadcastb %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x78,0xd0]
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x78,0xc8]
-; CHECK-NEXT: vpbroadcastb %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x78,0xc0]
; CHECK-NEXT: vpaddb %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc9]
+; CHECK-NEXT: vpbroadcastb %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x78,0xc0]
; CHECK-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <64 x i8> @llvm.x86.avx512.pbroadcastb.512(<16 x i8> %x0, <64 x i8> %x1, i64 -1)
@@ -109,8 +109,8 @@ define <32 x i16>@test_int_x86_avx512_pbroadcastw_512(<8 x i16> %x0, <32 x i16>
; CHECK-NEXT: vpbroadcastw %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x79,0xc8]
-; CHECK-NEXT: vpbroadcastw %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x79,0xc0]
; CHECK-NEXT: vpaddw %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc9]
+; CHECK-NEXT: vpbroadcastw %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x79,0xc0]
; CHECK-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i16> @llvm.x86.avx512.pbroadcastw.512(<8 x i16> %x0, <32 x i16> %x1, i32 -1)
@@ -1476,9 +1476,9 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_w_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd1,0xd1]
+; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xd3]
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd1,0xc1]
-; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xcb]
-; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
+; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psrl.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.psrl.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
@@ -1496,9 +1496,9 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_w_256(<16 x i16> %x0, <8 x i16>
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd1,0xd1]
+; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xd3]
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd1,0xc1]
-; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xcb]
-; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
+; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psrl.w.256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.psrl.w.256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 -1)
@@ -1596,8 +1596,8 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_wi_128(<8 x i16> %x0, i32 %x1, <8
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xd0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xd0,0x03]
-; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x71,0xd0,0x03]
; CHECK-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca]
+; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x71,0xd0,0x03]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psrl.wi.128(<8 x i16> %x0, i32 3, <8 x i16> %x2, i8 %x3)
@@ -1616,8 +1616,8 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_wi_256(<16 x i16> %x0, i32 %x1,
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xd0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xd0,0x03]
-; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x71,0xd0,0x03]
; CHECK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xca]
+; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x71,0xd0,0x03]
; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psrl.wi.256(<16 x i16> %x0, i32 3, <16 x i16> %x2, i16 %x3)
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
index 8f528394f5bd..f8f47c87100a 100644
--- a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
@@ -9,8 +9,8 @@ define <4 x i32>@test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32>
; CHECK-NEXT: vplzcntd %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics.ll b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
index 37aea45e6107..96254f7c95b0 100644
--- a/test/CodeGen/X86/avx512cdvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
@@ -7,8 +7,8 @@ define <4 x i32> @test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32
; CHECK-NEXT: vplzcntd %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
diff --git a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
index c5478dad4224..1377733739fe 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
@@ -40,8 +40,8 @@ define <8 x float>@test_int_x86_avx512_mask_vextractf32x8(<16 x float> %x0, <8 x
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float> %x0,i32 1, <8 x float> %x2, i8 %x3)
diff --git a/test/CodeGen/X86/avx512dq-intrinsics.ll b/test/CodeGen/X86/avx512dq-intrinsics.ll
index 000390404b54..97ac0fde10ec 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics.ll
@@ -414,8 +414,8 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0,
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x2.512(<4 x float> %x0, <16 x float> %x2, i16 %x3)
@@ -434,8 +434,8 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x2.512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3)
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
index 52a84deebf51..595b3e0ebb86 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
@@ -1568,8 +1568,8 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_256(<4 x double> %x0,
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc2,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x19,0xc1,0x01]
-; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x19,0xc0,0x01]
; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
+; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x19,0xc0,0x01]
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double> %x0,i32 1, <2 x double> %x2, i8 %x3)
@@ -1588,9 +1588,9 @@ define <4 x double>@test_int_x86_avx512_mask_insertf64x2_256(<4 x double> %x0, <
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xd1,0x01]
+; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xd3]
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x18,0xc1,0x01]
-; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xcb]
-; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
+; CHECK-NEXT: vaddpd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double> %x0, <2 x double> %x1, i32 1, <4 x double> %x3, i8 %x4)
%res1 = call <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double> %x0, <2 x double> %x1, i32 1, <4 x double> %x3, i8 -1)
@@ -1608,9 +1608,9 @@ define <4 x i64>@test_int_x86_avx512_mask_inserti64x2_256(<4 x i64> %x0, <2 x i6
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xd1,0x01]
+; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xd3]
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x38,0xc1,0x01]
-; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64> %x0, <2 x i64> %x1, i32 1, <4 x i64> %x3, i8 %x4)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64> %x0, <2 x i64> %x1, i32 1, <4 x i64> %x3, i8 -1)
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics.ll b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
index ad9ea93c2031..1bfdfd0e634d 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
@@ -635,8 +635,8 @@ define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0,
; CHECK-NEXT: ## ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x19,0xd0]
; CHECK-NEXT: ## ymm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xc0]
; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
+; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xc0]
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.broadcastf32x2.256(<4 x float> %x0, <8 x float> %x2, i8 %x3)
@@ -680,8 +680,8 @@ define <4 x i32>@test_int_x86_avx512_mask_broadcasti32x2_128(<4 x i32> %x0, <4 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x59,0xc8]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x59,0xd0]
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0]
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
+; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.broadcasti32x2.128(<4 x i32> %x0, <4 x i32> %x2, i8 %x3)
diff --git a/test/CodeGen/X86/avx512er-intrinsics.ll b/test/CodeGen/X86/avx512er-intrinsics.ll
index ca130bd2b676..b8531e25bfa1 100644
--- a/test/CodeGen/X86/avx512er-intrinsics.ll
+++ b/test/CodeGen/X86/avx512er-intrinsics.ll
@@ -118,78 +118,78 @@ define <4 x float> @test_rcp28_ss(<4 x float> %a0) {
}
declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
-define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0) {
+define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_maskz:
; CHECK: # BB#0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
-; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 7, i32 8) ;
+ %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 %mask, i32 8) ;
ret <4 x float> %res
}
-define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0) {
+define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_mask:
; CHECK: # BB#0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
-; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
; CHECK-NEXT: retq # encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 7, i32 8) ;
+ %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask, i32 8) ;
ret <4 x float> %res
}
-define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0) {
+define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz:
; CHECK: # BB#0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
-; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 7, i32 8) ;
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 %mask, i32 8) ;
ret <2 x double> %res
}
-define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0) {
+define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_mask:
; CHECK: # BB#0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
-; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
; CHECK-NEXT: retq # encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 7, i32 8) ;
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask, i32 8) ;
ret <2 x double> %res
}
declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
-define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr ) {
+define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem:
; CHECK: # BB#0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
-; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01]
+; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
%mem = load double , double * %ptr, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 %mask, i32 4) ;
ret <2 x double> %res
}
-define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr ) {
+define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem_offset:
; CHECK: # BB#0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
-; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01]
+; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
; CHECK-NEXT: retq # encoding: [0xc3]
%ptr1 = getelementptr double, double* %ptr, i32 18
%mem = load double , double * %ptr1, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 %mask, i32 4) ;
ret <2 x double> %res
}
diff --git a/test/CodeGen/X86/avx512ifma-intrinsics.ll b/test/CodeGen/X86/avx512ifma-intrinsics.ll
index 30ecc0d2e49e..9659dc6d455a 100644
--- a/test/CodeGen/X86/avx512ifma-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifma-intrinsics.ll
@@ -13,8 +13,8 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -41,8 +41,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512(<8 x i64> %x0, <8 x
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1} {z}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -69,8 +69,8 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -97,8 +97,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_512(<8 x i64> %x0, <8 x
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1} {z}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
index 3ca686cef3bf..b2fe6eba88ab 100644
--- a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
@@ -14,8 +14,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -42,8 +42,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -70,8 +70,8 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_128(<2 x i64> %x0, <2 x
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -98,8 +98,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_256(<4 x i64> %x0, <4 x
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1} {z}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1} {z}
-; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -126,8 +126,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -154,8 +154,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -182,8 +182,8 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_128(<2 x i64> %x0, <2 x
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -210,8 +210,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_256(<4 x i64> %x0, <4 x
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1} {z}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1} {z}
-; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 4d906a4fd29a..c2d8df6476b3 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -30,8 +30,8 @@ define <4 x i32>@test_int_x86_avx512_pbroadcastd_128(<4 x i32> %x0, <4 x i32> %x
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x58,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x58,0xc8]
-; CHECK-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x58,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc9]
+; CHECK-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x58,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.pbroadcastd.128(<4 x i32> %x0, <4 x i32> %x1, i8 -1)
@@ -50,8 +50,8 @@ define <4 x i64>@test_int_x86_avx512_pbroadcastq_256(<2 x i64> %x0, <4 x i64> %x
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x59,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x59,0xc8]
-; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x59,0xc0]
; CHECK-NEXT: vpaddq %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc9]
+; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x59,0xc0]
; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.pbroadcastq.256(<2 x i64> %x0, <4 x i64> %x1,i8 -1)
@@ -70,8 +70,8 @@ define <2 x i64>@test_int_x86_avx512_pbroadcastq_128(<2 x i64> %x0, <2 x i64> %x
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x59,0xc8]
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x59,0xc0]
; CHECK-NEXT: vpaddq %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc9]
+; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x59,0xc0]
; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.pbroadcastq.128(<2 x i64> %x0, <2 x i64> %x1,i8 -1)
@@ -90,8 +90,8 @@ define <4 x double> @test_x86_vbroadcast_sd_pd_256(<2 x double> %a0, <4 x double
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x19,0xc8]
-; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x19,0xc0]
; CHECK-NEXT: vaddpd %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc9]
+; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x19,0xc0]
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.256(<2 x double> %a0, <4 x double> zeroinitializer, i8 -1)
@@ -110,8 +110,8 @@ define <8 x float> @test_x86_vbroadcast_ss_ps_256(<4 x float> %a0, <8 x float> %
; CHECK-NEXT: vbroadcastss %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x18,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x18,0xc8]
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x18,0xc0]
; CHECK-NEXT: vaddps %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc9]
+; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x18,0xc0]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.256(<4 x float> %a0, <8 x float> zeroinitializer, i8 -1)
@@ -130,8 +130,8 @@ define <4 x float> @test_x86_vbroadcast_ss_ps_128(<4 x float> %a0, <4 x float> %
; CHECK-NEXT: vbroadcastss %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x18,0xc8]
-; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x18,0xc0]
; CHECK-NEXT: vaddps %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc9]
+; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x18,0xc0]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.128(<4 x float> %a0, <4 x float> zeroinitializer, i8 -1)
@@ -152,9 +152,9 @@ define <4 x float>@test_int_x86_avx512_mask_movsldup_128(<4 x float> %x0, <4 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovsldup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x12,0xc8]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0,0,2,2]
+; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vmovsldup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x12,0xc0]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[0,0,2,2]
-; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.movsldup.128(<4 x float> %x0, <4 x float> %x1, i8 %x2)
@@ -175,9 +175,9 @@ define <8 x float>@test_int_x86_avx512_mask_movsldup_256(<8 x float> %x0, <8 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovsldup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x12,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6]
+; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vmovsldup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x12,0xc0]
; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
-; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.movsldup.256(<8 x float> %x0, <8 x float> %x1, i8 %x2)
@@ -198,9 +198,9 @@ define <4 x float>@test_int_x86_avx512_mask_movshdup_128(<4 x float> %x0, <4 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovshdup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x16,0xc8]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[1,1,3,3]
+; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vmovshdup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x16,0xc0]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[1,1,3,3]
-; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.movshdup.128(<4 x float> %x0, <4 x float> %x1, i8 %x2)
@@ -221,9 +221,9 @@ define <8 x float>@test_int_x86_avx512_mask_movshdup_256(<8 x float> %x0, <8 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovshdup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x16,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7]
+; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vmovshdup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x16,0xc0]
; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
-; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.movshdup.256(<8 x float> %x0, <8 x float> %x1, i8 %x2)
@@ -243,9 +243,9 @@ define <2 x double>@test_int_x86_avx512_mask_movddup_128(<2 x double> %x0, <2 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovddup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x12,0xc8]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0,0]
+; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
; CHECK-NEXT: vmovddup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x12,0xc0]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[0,0]
-; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.movddup.128(<2 x double> %x0, <2 x double> %x1, i8 %x2)
@@ -266,9 +266,9 @@ define <4 x double>@test_int_x86_avx512_mask_movddup_256(<4 x double> %x0, <4 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovddup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x12,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[0,0,2,2]
+; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xca]
; CHECK-NEXT: vmovddup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0xa9,0x12,0xc0]
; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[0,0,2,2]
-; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xca]
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.movddup.256(<4 x double> %x0, <4 x double> %x1, i8 %x2)
@@ -3209,10 +3209,10 @@ define <2 x double>@test_int_x86_avx512_mask_shuf_pd_128(<2 x double> %x0, <2 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xc6,0xd1,0x01]
; CHECK-NEXT: ## xmm2 {%k1} = xmm0[1],xmm1[0]
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xd3]
; CHECK-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xc6,0xc1,0x01]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[1],xmm1[0]
-; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xcb]
-; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
+; CHECK-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.shuf.pd.128(<2 x double> %x0, <2 x double> %x1, i32 1, <2 x double> %x3, i8 %x4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.shuf.pd.128(<2 x double> %x0, <2 x double> %x1, i32 1, <2 x double> %x3, i8 -1)
@@ -3540,9 +3540,9 @@ define <2 x i64>@test_int_x86_avx512_mask_psrl_q_128(<2 x i64> %x0, <2 x i64> %x
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xd3,0xd1]
+; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xd3]
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xd3,0xc1]
-; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.psrl.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.psrl.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
@@ -3560,9 +3560,9 @@ define <4 x i64>@test_int_x86_avx512_mask_psrl_q_256(<4 x i64> %x0, <2 x i64> %x
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xd3,0xd1]
+; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xd3]
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xd3,0xc1]
-; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.psrl.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.psrl.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 -1)
@@ -3580,9 +3580,9 @@ define <4 x i32>@test_int_x86_avx512_mask_psrl_d_128(<4 x i32> %x0, <4 x i32> %x
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd2,0xd1]
+; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xd3]
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd2,0xc1]
-; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.psrl.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.psrl.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
@@ -3600,9 +3600,9 @@ define <8 x i32>@test_int_x86_avx512_mask_psrl_d_256(<8 x i32> %x0, <4 x i32> %x
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd2,0xd1]
+; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xd3]
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd2,0xc1]
-; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
+; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.psrl.d.256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.psrl.d.256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 -1)
@@ -3720,8 +3720,8 @@ define <2 x i64>@test_int_x86_avx512_mask_psrl_qi_128(<2 x i64> %x0, i32 %x1, <2
; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x73,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x73,0xd0,0x03]
-; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x73,0xd0,0x03]
; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
+; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x73,0xd0,0x03]
; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.psrl.qi.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
@@ -3740,8 +3740,8 @@ define <4 x i64>@test_int_x86_avx512_mask_psrl_qi_256(<4 x i64> %x0, i32 %x1, <4
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x73,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x73,0xd0,0x03]
-; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x73,0xd0,0x03]
; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
+; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x73,0xd0,0x03]
; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.psrl.qi.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
@@ -3760,8 +3760,8 @@ define <4 x i32>@test_int_x86_avx512_mask_psrl_di_128(<4 x i32> %x0, i32 %x1, <4
; CHECK-NEXT: vpsrld $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x72,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrld $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xd0,0x03]
-; CHECK-NEXT: vpsrld $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x72,0xd0,0x03]
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
+; CHECK-NEXT: vpsrld $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x72,0xd0,0x03]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.psrl.di.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
@@ -3780,8 +3780,8 @@ define <8 x i32>@test_int_x86_avx512_mask_psrl_di_256(<8 x i32> %x0, i32 %x1, <8
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x72,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xd0,0x03]
-; CHECK-NEXT: vpsrld $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xd0,0x03]
; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
+; CHECK-NEXT: vpsrld $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xd0,0x03]
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.psrl.di.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
@@ -4642,10 +4642,10 @@ define <4 x i32>@test_int_x86_avx512_mask_valign_d_128(<4 x i32> %x0, <4 x i32>
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: valignd $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x03,0xd1,0x02]
; CHECK-NEXT: ## xmm2 {%k1} = xmm1[2,3],xmm0[0,1]
+; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xd3]
; CHECK-NEXT: valignd $2, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x03,0xc1,0x02]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm1[2,3],xmm0[0,1]
-; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.valign.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 2, <4 x i32> %x3, i8 %x4)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.valign.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 2, <4 x i32> %x3, i8 -1)
@@ -4817,9 +4817,9 @@ define <8 x float>@test_int_x86_avx512_mask_insertf32x4_256(<8 x float> %x0, <4
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xd1,0x01]
+; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xd3]
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x18,0xc1,0x01]
-; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xcb]
-; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
+; CHECK-NEXT: vaddps %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.insertf32x4.256(<8 x float> %x0, <4 x float> %x1, i32 1, <8 x float> %x3, i8 %x4)
%res1 = call <8 x float> @llvm.x86.avx512.mask.insertf32x4.256(<8 x float> %x0, <4 x float> %x1, i32 1, <8 x float> %x3, i8 -1)
@@ -4837,9 +4837,9 @@ define <8 x i32>@test_int_x86_avx512_mask_inserti32x4_256(<8 x i32> %x0, <4 x i3
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xd1,0x01]
+; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xd3]
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x38,0xc1,0x01]
-; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
+; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.inserti32x4.256(<8 x i32> %x0, <4 x i32> %x1, i32 1, <8 x i32> %x3, i8 %x4)
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index 1f324d679564..684b0468cf51 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -4368,8 +4368,8 @@ define <4 x i32>@test_int_x86_avx512_mask_prol_d_128(<4 x i32> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprold $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
; CHECK-NEXT: vprold $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc8,0x03]
-; CHECK-NEXT: vprold $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
+; CHECK-NEXT: vprold $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
@@ -4388,8 +4388,8 @@ define <8 x i32>@test_int_x86_avx512_mask_prol_d_256(<8 x i32> %x0, i32 %x1, <8
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprold $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
; CHECK-NEXT: vprold $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc8,0x03]
-; CHECK-NEXT: vprold $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
+; CHECK-NEXT: vprold $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
@@ -4408,8 +4408,8 @@ define <2 x i64>@test_int_x86_avx512_mask_prol_q_128(<2 x i64> %x0, i32 %x1, <2
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
; CHECK-NEXT: vprolq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc8,0x03]
-; CHECK-NEXT: vprolq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
+; CHECK-NEXT: vprolq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
@@ -4428,8 +4428,8 @@ define <4 x i64>@test_int_x86_avx512_mask_prol_q_256(<4 x i64> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
; CHECK-NEXT: vprolq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc8,0x03]
-; CHECK-NEXT: vprolq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
+; CHECK-NEXT: vprolq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc8,0x03]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
@@ -4528,8 +4528,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pror_d_128(<4 x i32> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprord $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
; CHECK-NEXT: vprord $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc0,0x03]
-; CHECK-NEXT: vprord $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
+; CHECK-NEXT: vprord $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
@@ -4548,8 +4548,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pror_d_256(<8 x i32> %x0, i32 %x1, <8
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprord $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
; CHECK-NEXT: vprord $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc0,0x03]
-; CHECK-NEXT: vprord $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
+; CHECK-NEXT: vprord $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
@@ -4568,8 +4568,8 @@ define <2 x i64>@test_int_x86_avx512_mask_pror_q_128(<2 x i64> %x0, i32 %x1, <2
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
; CHECK-NEXT: vprorq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc0,0x03]
-; CHECK-NEXT: vprorq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
+; CHECK-NEXT: vprorq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
@@ -4588,8 +4588,8 @@ define <4 x i64>@test_int_x86_avx512_mask_pror_q_256(<4 x i64> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
; CHECK-NEXT: vprorq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc0,0x03]
-; CHECK-NEXT: vprorq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
+; CHECK-NEXT: vprorq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc0,0x03]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
@@ -4690,9 +4690,9 @@ define <2 x double>@test_int_x86_avx512_mask_fixupimm_pd_128(<2 x double> %x0, <
; CHECK-NEXT: vfixupimmpd $5, %xmm2, %xmm1, %xmm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x54,0xda,0x05]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vfixupimmpd $4, %xmm2, %xmm1, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0x54,0xe2,0x04]
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xdc]
; CHECK-NEXT: vfixupimmpd $3, %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf3,0xf5,0x08,0x54,0xc2,0x03]
-; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xcc]
-; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
+; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.pd.128(<2 x double> %x0, <2 x double> %x1,<2 x i64> %x2, i32 5, i8 %x4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.pd.128(<2 x double> zeroinitializer, <2 x double> %x1, <2 x i64> %x2, i32 4, i8 %x4)
@@ -4732,9 +4732,9 @@ define <4 x double>@test_int_x86_avx512_mask_fixupimm_pd_256(<4 x double> %x0, <
; CHECK-NEXT: vfixupimmpd $4, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x54,0xda,0x04]
; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
; CHECK-NEXT: vfixupimmpd $5, %ymm2, %ymm1, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xe2,0x05]
+; CHECK-NEXT: vaddpd %ymm4, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdc]
; CHECK-NEXT: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x54,0xc2,0x03]
-; CHECK-NEXT: vaddpd %ymm4, %ymm3, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xcc]
-; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
+; CHECK-NEXT: vaddpd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i32 4, i8 %x4)
%res1 = call <4 x double> @llvm.x86.avx512.mask.fixupimm.pd.256(<4 x double> zeroinitializer, <4 x double> %x1, <4 x i64> %x2 , i32 5, i8 %x4)
@@ -4755,9 +4755,9 @@ define <4 x double>@test_int_x86_avx512_maskz_fixupimm_pd_256(<4 x double> %x0,
; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
; CHECK-NEXT: vmovapd %ymm0, %ymm5 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xe8]
; CHECK-NEXT: vfixupimmpd $4, %ymm4, %ymm1, %ymm5 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xec,0x04]
+; CHECK-NEXT: vaddpd %ymm5, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdd]
; CHECK-NEXT: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x54,0xc2,0x03]
-; CHECK-NEXT: vaddpd %ymm5, %ymm3, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xcd]
-; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
+; CHECK-NEXT: vaddpd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.maskz.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i32 5, i8 %x4)
%res1 = call <4 x double> @llvm.x86.avx512.maskz.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> zeroinitializer, i32 4, i8 %x4)
diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll
index afeba4ef2d99..94e2ee7a0aa9 100644
--- a/test/CodeGen/X86/bmi.ll
+++ b/test/CodeGen/X86/bmi.ll
@@ -454,6 +454,30 @@ entry:
ret i32 %and
}
+define i32 @bzhi32d(i32 %a, i32 %b) {
+; CHECK-LABEL: bzhi32d:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: bzhil %esi, %edi, %eax
+; CHECK-NEXT: retq
+entry:
+ %sub = sub i32 32, %b
+ %shr = lshr i32 -1, %sub
+ %and = and i32 %shr, %a
+ ret i32 %and
+}
+
+define i32 @bzhi32e(i32 %a, i32 %b) {
+; CHECK-LABEL: bzhi32e:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: bzhil %esi, %edi, %eax
+; CHECK-NEXT: retq
+entry:
+ %sub = sub i32 32, %b
+ %shl = shl i32 %a, %sub
+ %shr = lshr i32 %shl, %sub
+ ret i32 %shr
+}
+
define i64 @bzhi64b(i64 %x, i8 zeroext %index) {
; CHECK-LABEL: bzhi64b:
; CHECK: # BB#0: # %entry
@@ -468,6 +492,58 @@ entry:
ret i64 %and
}
+define i64 @bzhi64c(i64 %a, i64 %b) {
+; CHECK-LABEL: bzhi64c:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: bzhiq %rsi, %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %sub = sub i64 64, %b
+ %shr = lshr i64 -1, %sub
+ %and = and i64 %shr, %a
+ ret i64 %and
+}
+
+define i64 @bzhi64d(i64 %a, i32 %b) {
+; CHECK-LABEL: bzhi64d:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; CHECK-NEXT: bzhiq %rsi, %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %sub = sub i32 64, %b
+ %sh_prom = zext i32 %sub to i64
+ %shr = lshr i64 -1, %sh_prom
+ %and = and i64 %shr, %a
+ ret i64 %and
+}
+
+define i64 @bzhi64e(i64 %a, i64 %b) {
+; CHECK-LABEL: bzhi64e:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: bzhiq %rsi, %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %sub = sub i64 64, %b
+ %shl = shl i64 %a, %sub
+ %shr = lshr i64 %shl, %sub
+ ret i64 %shr
+}
+
+define i64 @bzhi64f(i64 %a, i32 %b) {
+; CHECK-LABEL: bzhi64f:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; CHECK-NEXT: bzhiq %rsi, %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %sub = sub i32 64, %b
+ %sh_prom = zext i32 %sub to i64
+ %shl = shl i64 %a, %sh_prom
+ %shr = lshr i64 %shl, %sh_prom
+ ret i64 %shr
+}
+
define i64 @bzhi64_constant_mask(i64 %x) {
; CHECK-LABEL: bzhi64_constant_mask:
; CHECK: # BB#0: # %entry
diff --git a/test/CodeGen/X86/bswap_tree2.ll b/test/CodeGen/X86/bswap_tree2.ll
index a9c74df9d0d9..1340b7662a7a 100644
--- a/test/CodeGen/X86/bswap_tree2.ll
+++ b/test/CodeGen/X86/bswap_tree2.ll
@@ -9,31 +9,32 @@
define i32 @test1(i32 %x) nounwind {
; CHECK-LABEL: test1:
; CHECK: # BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: andl $16711680, %edx # imm = 0xFF0000
-; CHECK-NEXT: movl %ecx, %eax
-; CHECK-NEXT: orl $-16777216, %eax # imm = 0xFF000000
-; CHECK-NEXT: shll $8, %edx
-; CHECK-NEXT: shrl $8, %eax
-; CHECK-NEXT: bswapl %ecx
-; CHECK-NEXT: shrl $16, %ecx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: andl $16711680, %ecx # imm = 0xFF0000
+; CHECK-NEXT: movl %eax, %edx
+; CHECK-NEXT: orl $-16777216, %edx # imm = 0xFF000000
+; CHECK-NEXT: shll $8, %ecx
+; CHECK-NEXT: shrl $8, %edx
+; CHECK-NEXT: orl %ecx, %edx
+; CHECK-NEXT: bswapl %eax
+; CHECK-NEXT: shrl $16, %eax
; CHECK-NEXT: orl %edx, %eax
-; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test1:
; CHECK64: # BB#0:
-; CHECK64-NEXT: movl %edi, %ecx
-; CHECK64-NEXT: andl $16711680, %ecx # imm = 0xFF0000
; CHECK64-NEXT: movl %edi, %eax
-; CHECK64-NEXT: orl $-16777216, %eax # imm = 0xFF000000
-; CHECK64-NEXT: shll $8, %ecx
-; CHECK64-NEXT: shrl $8, %eax
+; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000
+; CHECK64-NEXT: movl %edi, %ecx
+; CHECK64-NEXT: orl $-16777216, %ecx # imm = 0xFF000000
+; CHECK64-NEXT: shll $8, %eax
+; CHECK64-NEXT: shrl $8, %ecx
+; CHECK64-NEXT: orl %eax, %ecx
; CHECK64-NEXT: bswapl %edi
; CHECK64-NEXT: shrl $16, %edi
-; CHECK64-NEXT: orl %ecx, %eax
-; CHECK64-NEXT: orl %edi, %eax
+; CHECK64-NEXT: orl %ecx, %edi
+; CHECK64-NEXT: movl %edi, %eax
; CHECK64-NEXT: retq
%byte0 = and i32 %x, 255 ; 0x000000ff
%byte1 = and i32 %x, 65280 ; 0x0000ff00
diff --git a/test/CodeGen/X86/cast-vsel.ll b/test/CodeGen/X86/cast-vsel.ll
index 1e44aec99fc5..83ab2fac2f16 100644
--- a/test/CodeGen/X86/cast-vsel.ll
+++ b/test/CodeGen/X86/cast-vsel.ll
@@ -200,32 +200,29 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
; SSE41: # BB#0:
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSE41-NEXT: pshufb %xmm1, %xmm5
-; SSE41-NEXT: pshufb %xmm1, %xmm4
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSE41-NEXT: pshufb %xmm1, %xmm3
; SSE41-NEXT: pshufb %xmm1, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE41-NEXT: pand %xmm0, %xmm2
-; SSE41-NEXT: pandn %xmm4, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
+; SSE41-NEXT: pshufb %xmm1, %xmm5
+; SSE41-NEXT: pshufb %xmm1, %xmm4
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc:
; AVX1: # BB#0:
; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
-; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -233,13 +230,11 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vpandn %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpor %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpblendvb %xmm0, %xmm2, %xmm1, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%cmp = icmp eq <8 x i16> %a, %b
diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll
index 887abe99f6ed..37beb438d737 100644
--- a/test/CodeGen/X86/combine-abs.ll
+++ b/test/CodeGen/X86/combine-abs.ll
@@ -50,12 +50,11 @@ define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
; AVX2-LABEL: combine_v4i64_abs_abs:
; AVX2: # BB#0:
-; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll
index 3ad38f2717d9..3dbff2680c22 100644
--- a/test/CodeGen/X86/combine-shl.ll
+++ b/test/CodeGen/X86/combine-shl.ll
@@ -11,8 +11,7 @@ define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_shl_zero:
; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpsllvd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> zeroinitializer, %x
ret <4 x i32> %1
diff --git a/test/CodeGen/X86/combine-srl.ll b/test/CodeGen/X86/combine-srl.ll
index 706e89051a3d..21564cdd7353 100644
--- a/test/CodeGen/X86/combine-srl.ll
+++ b/test/CodeGen/X86/combine-srl.ll
@@ -6,30 +6,12 @@
define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_zero:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: pxor %xmm3, %xmm3
-; SSE-NEXT: psrld %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: psrld %xmm2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE-NEXT: pxor %xmm3, %xmm3
-; SSE-NEXT: psrld %xmm0, %xmm3
-; SSE-NEXT: psrld %xmm2, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_zero:
; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpsrlvd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = lshr <4 x i32> zeroinitializer, %x
ret <4 x i32> %1
diff --git a/test/CodeGen/X86/constructor.ll b/test/CodeGen/X86/constructor.ll
index 7b82125dc372..2f3c343afac0 100644
--- a/test/CodeGen/X86/constructor.ll
+++ b/test/CodeGen/X86/constructor.ll
@@ -3,6 +3,8 @@
; RUN: llc -mtriple x86_64-pc-linux < %s | FileCheck --check-prefix=INIT-ARRAY %s
; RUN: llc -mtriple x86_64-unknown-freebsd < %s | FileCheck --check-prefix=INIT-ARRAY %s
; RUN: llc -mtriple x86_64-unknown-nacl < %s | FileCheck --check-prefix=NACL %s
+; RUN: llc -mtriple i586-intel-elfiamcu -use-ctors < %s | FileCheck %s --check-prefix=MCU-CTORS
+; RUN: llc -mtriple i586-intel-elfiamcu < %s | FileCheck %s --check-prefix=MCU-INIT-ARRAY
@llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @f, i8* null}, { i32, void ()*, i8* } { i32 15, void ()* @g, i8* @v }]
@v = weak_odr global i8 0
@@ -37,3 +39,6 @@ entry:
; NACL-NEXT: .section .init_array,"aw",@init_array
; NACL-NEXT: .p2align 2
; NACL-NEXT: .long f
+
+; MCU-CTORS: .section .ctors,"aw",@progbits
+; MCU-INIT-ARRAY: .section .init_array,"aw",@init_array
diff --git a/test/CodeGen/X86/dbg-baseptr.ll b/test/CodeGen/X86/dbg-baseptr.ll
index fb0da1b50d11..893ca93a9944 100644
--- a/test/CodeGen/X86/dbg-baseptr.ll
+++ b/test/CodeGen/X86/dbg-baseptr.ll
@@ -1,4 +1,5 @@
; RUN: llc -o - %s | FileCheck %s
+; RUN: llc -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s --check-prefix=DWARF
; This test checks that parameters on the stack pointer are correctly
; referenced by debug info.
target triple = "x86_64--"
@@ -7,24 +8,54 @@ target triple = "x86_64--"
@ptr = external global i32*
%struct.s = type { i32, i32, i32, i32, i32 }
+; Simple case: no FP, use offset from RSP.
+
; CHECK-LABEL: f0:
-; CHECK: DEBUG_VALUE: f:input <- [%RSP+8]
+; CHECK-NOT: pushq
+; CHECK: movl $42, %eax
+; CHECK: retq
define i32 @f0(%struct.s* byval align 8 %input) !dbg !8 {
call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18
- ret i32 42
+ ret i32 42, !dbg !18
}
+; DWARF-LABEL: .debug_info contents:
+
+; DWARF-LABEL: DW_TAG_subprogram
+; DWARF: DW_AT_frame_base [DW_FORM_exprloc] (<0x1> 57 )
+; 0x57 -> RSP
+; DWARF: DW_AT_name [DW_FORM_strp] ( {{.*}}"f0")
+; DWARF: DW_TAG_formal_parameter
+; DWARF-NEXT: DW_AT_location [DW_FORM_exprloc] (<0x2> 91 08 )
+; DW_OP_fbreg (0x91) 0x08
+; DWARF-NEXT: DW_AT_name [DW_FORM_strp] ( {{.*}}"input")
+
+
+; Dynamic alloca forces the use of RBP as the base pointer
+
; CHECK-LABEL: f1:
-; CHECK: DEBUG_VALUE: f:input <- [%RBP+16]
+; CHECK: pushq %rbp
+; CHECK: movl $42, %eax
+; CHECK: popq %rbp
+; CHECK: retq
define i32 @f1(%struct.s* byval align 8 %input) !dbg !19 {
%val = load i64, i64* @glob
; this alloca should force FP usage.
%stackspace = alloca i32, i64 %val, align 1
store i32* %stackspace, i32** @ptr
call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !20, metadata !17), !dbg !21
- ret i32 42
+ ret i32 42, !dbg !21
}
+; DWARF-LABEL: DW_TAG_subprogram
+; DWARF: DW_AT_frame_base [DW_FORM_exprloc] (<0x1> 56 )
+; 0x56 -> RBP
+; DWARF: DW_AT_name [DW_FORM_strp] ( {{.*}}"f1")
+; DWARF: DW_TAG_formal_parameter
+; DWARF-NEXT: DW_AT_location [DW_FORM_exprloc] (<0x2> 91 10 )
+; DW_OP_fbreg (0x91) 0x10
+; DWARF-NEXT: DW_AT_name [DW_FORM_strp] ( {{.*}}"input")
+
; CHECK-LABEL: f2:
; Just check that we are indeed aligning the stack and setting up a base pointer
; in RBX.
@@ -34,17 +65,24 @@ define i32 @f1(%struct.s* byval align 8 %input) !dbg !19 {
; CHECK: andq $-64, %rsp
; CHECK: subq $64, %rsp
; CHECK: movq %rsp, %rbx
-; The parameter should still be referenced through RBP though.
-; CHECK-NOT: DEBUG_VALUE: f:input <- [%RBX
-; CHECK: DEBUG_VALUE: f:input <- [%RBP+16]
define i32 @f2(%struct.s* byval align 8 %input) !dbg !22 {
%val = load i64, i64* @glob
%stackspace = alloca i32, i64 %val, align 64
store i32* %stackspace, i32** @ptr
call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !23, metadata !17), !dbg !24
- ret i32 42
+ ret i32 42, !dbg !24
}
+; "input" should still be referred to through RBP.
+; DWARF-LABEL: DW_TAG_subprogram
+; DWARF: DW_AT_frame_base [DW_FORM_exprloc] (<0x1> 56 )
+; 0x56 -> RBP
+; DWARF: DW_AT_name [DW_FORM_strp] ( {{.*}}"f2")
+; DWARF: DW_TAG_formal_parameter
+; DWARF-NEXT: DW_AT_location [DW_FORM_exprloc] (<0x2> 91 10 )
+; DW_OP_fbreg (0x91) 0x10
+; DWARF-NEXT: DW_AT_name [DW_FORM_strp] ( {{.*}}"input")
+
declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!2}
@@ -52,7 +90,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!0 = !{i32 2, !"Dwarf Version", i32 4}
!1 = !{i32 2, !"Debug Info Version", i32 3}
-!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, emissionKind: FullDebug)
!3 = !DIFile(filename: "dbg-baseptr.ll", directory: "/")
!4 = !DILocalVariable(name: "input", arg: 1, scope: !8, file: !3, line: 5, type: !9)
!5 = !{}
@@ -60,7 +98,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!6 = !DISubroutineType(types: !7)
!7 = !{!10, !9}
-!8 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5)
+!8 = distinct !DISubprogram(name: "f0", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, unit: !2, variables: !5)
!9 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s", elements: !11)
!10 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
@@ -74,9 +112,9 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!17 = !DIExpression()
!18 = !DILocation(line: 5, scope: !8)
-!19 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5)
+!19 = distinct !DISubprogram(name: "f1", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5)
!20 = !DILocalVariable(name: "input", arg: 1, scope: !19, file: !3, line: 5, type: !9)
!21 = !DILocation(line: 5, scope: !19)
-!22 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5)
+!22 = distinct !DISubprogram(name: "f2", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5)
!23 = !DILocalVariable(name: "input", arg: 1, scope: !22, file: !3, line: 5, type: !9)
!24 = !DILocation(line: 5, scope: !22)
diff --git a/test/CodeGen/X86/elf-associated.ll b/test/CodeGen/X86/elf-associated.ll
index 361cf66cce72..7d58c3437025 100644
--- a/test/CodeGen/X86/elf-associated.ll
+++ b/test/CodeGen/X86/elf-associated.ll
@@ -37,3 +37,8 @@
@l = global i32 1, section "ccc", !associated !5
!5 = !{i32* null}
; CHECK-DAG: .section ccc,"aw",@progbits
+
+; Null metadata.
+@m = global i32 1, section "ddd", !associated !6
+!6 = distinct !{null}
+; CHECK-DAG: .section ddd,"aw",@progbits
diff --git a/test/CodeGen/X86/fold-tied-op.ll b/test/CodeGen/X86/fold-tied-op.ll
index d68236e9d250..eb06eb75a4d7 100644
--- a/test/CodeGen/X86/fold-tied-op.ll
+++ b/test/CodeGen/X86/fold-tied-op.ll
@@ -6,9 +6,10 @@ target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
target triple = "i386--netbsd"
; CHECK-LABEL: fn1
-; CHECK: addl {{.*#+}} 4-byte Folded Reload
-; CHECK: imull {{.*#+}} 4-byte Folded Reload
-; CHECK: orl {{.*#+}} 4-byte Folded Reload
+; CHECK: orl {{.*#+}} 4-byte Folded Reload
+; CHECK: addl {{.*#+}} 4-byte Folded Reload
+; CHECK: xorl {{.*#+}} 4-byte Folded Reload
+; CHECK: xorl {{.*#+}} 4-byte Folded Reload
; CHECK: retl
%struct.XXH_state64_t = type { i32, i32, i64, i64, i64 }
diff --git a/test/CodeGen/X86/fp128-i128.ll b/test/CodeGen/X86/fp128-i128.ll
index 98082ec611d4..6c6bc8bdc1d1 100644
--- a/test/CodeGen/X86/fp128-i128.ll
+++ b/test/CodeGen/X86/fp128-i128.ll
@@ -50,8 +50,8 @@ define void @TestUnionLD1(fp128 %s, i64 %n) #0 {
; CHECK-NEXT: andq %rdi, %rcx
; CHECK-NEXT: movabsq $-281474976710656, %rdx # imm = 0xFFFF000000000000
; CHECK-NEXT: andq -{{[0-9]+}}(%rsp), %rdx
-; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: orq %rcx, %rdx
+; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; CHECK-NEXT: jmp foo # TAILCALL
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
index 4596b83f7bc2..b5507523a75a 100644
--- a/test/CodeGen/X86/haddsub-2.ll
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -933,14 +933,14 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
; AVX-NEXT: vsubss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
-; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
-; AVX-NEXT: vsubss %xmm4, %xmm3, %xmm3
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
-; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX-NEXT: vsubss %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; AVX-NEXT: vsubss %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX-NEXT: vsubss %xmm3, %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 2
%vecext1 = extractelement <4 x float> %A, i32 3
diff --git a/test/CodeGen/X86/known-signbits-vector.ll b/test/CodeGen/X86/known-signbits-vector.ll
index cea9ac26edbc..ec620b8ce877 100644
--- a/test/CodeGen/X86/known-signbits-vector.ll
+++ b/test/CodeGen/X86/known-signbits-vector.ll
@@ -137,3 +137,64 @@ define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwin
%6 = sitofp i64 %5 to float
ret float %6
}
+
+define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) nounwind {
+; X32-LABEL: signbits_sext_shuffle_sitofp:
+; X32: # BB#0:
+; X32-NEXT: vpmovsxdq %xmm0, %xmm1
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X32-NEXT: vpmovsxdq %xmm0, %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; X32-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X32-NEXT: vcvtdq2pd %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: signbits_sext_shuffle_sitofp:
+; X64: # BB#0:
+; X64-NEXT: vpmovsxdq %xmm0, %xmm1
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X64-NEXT: vpmovsxdq %xmm0, %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X64-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X64-NEXT: vcvtdq2pd %xmm0, %ymm0
+; X64-NEXT: retq
+ %1 = sext <4 x i32> %a0 to <4 x i64>
+ %2 = shufflevector <4 x i64> %1, <4 x i64>%a1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %3 = sitofp <4 x i64> %2 to <4 x double>
+ ret <4 x double> %3
+}
+
+define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 x i64> %a1) nounwind {
+; X32-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
+; X32: # BB#0:
+; X32-NEXT: vpsrad $16, %xmm0, %xmm1
+; X32-NEXT: vpsrlq $16, %xmm0, %xmm0
+; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X32-NEXT: vpsrlq $16, %xmm0, %xmm0
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
+; X64: # BB#0:
+; X64-NEXT: vpsrad $16, %xmm0, %xmm1
+; X64-NEXT: vpsrlq $16, %xmm0, %xmm0
+; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X64-NEXT: vpsrlq $16, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = ashr <2 x i64> %a0, <i64 16, i64 16>
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %3 = shufflevector <4 x i64> %a1, <4 x i64> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ %4 = ashr <4 x i64> %3, <i64 16, i64 16, i64 16, i64 16>
+ %5 = shufflevector <4 x i64> %4, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ %6 = sitofp <2 x i64> %5 to <2 x double>
+ ret <2 x double> %6
+}
diff --git a/test/CodeGen/X86/leaFixup32.mir b/test/CodeGen/X86/leaFixup32.mir
new file mode 100644
index 000000000000..70aac21c7ff2
--- /dev/null
+++ b/test/CodeGen/X86/leaFixup32.mir
@@ -0,0 +1,508 @@
+# RUN: llc -run-pass x86-fixup-LEAs -mcpu=corei7-avx -o - %s | FileCheck %s
+--- |
+ ; ModuleID = 'test/CodeGen/X86/fixup-lea.ll'
+ source_filename = "test/CodeGen/X86/fixup-lea.ll"
+ target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
+ target triple = "i386"
+ ;generated using: llc -stop-after x86-pad-short-functions fixup-lea.ll > leaFinxup32.mir
+
+ ;test2add_32: 3 operands LEA32r that can be replaced with 2 add instructions
+ ; where ADD32ri8 is chosen
+ define i32 @test2add_32() {
+ ret i32 0
+ }
+
+ ;test2add_ebp_32: 3 operands LEA32r that can be replaced with 2 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test2add_ebp_32() {
+ ret i32 0
+ }
+
+ ;test1add_ebp_32: 2 operands LEA32r where base register is ebp and can be replaced
+ ; with an add instruction
+ define i32 @test1add_ebp_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ define i32 @testleaadd_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_ebp_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ ; where the base is ebp register
+ define i32 @testleaadd_ebp_32() {
+ ret i32 0
+ }
+
+ ;test1lea_ebp_32: 2 operands LEA32r wher base register is rbp/r13/ebp and can be replaced
+ ; with a lea instruction
+ define i32 @test1lea_ebp_32() {
+ ret i32 0
+ }
+
+ ;test2addi32_32: 3 operands LEA32r that can be replaced with 2 add instructions where ADD32ri32
+ ; is chosen
+ define i32 @test2addi32_32() {
+ ret i32 0
+ }
+
+ ;test1mov1add_ebp_32: 2 operands LEA32r that can be replaced with 1 add 1 mov instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test1mov1add_ebp_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_ebp_index_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is offset
+ define i32 @testleaadd_ebp_index_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_ebp_index2_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is scale
+ define i32 @testleaadd_ebp_index2_32() {
+ ret i32 0
+ }
+
+ ;test_skip_opt_32: 3 operands LEA32r that can not be replaced with 2 instructions
+ define i32 @test_skip_opt_32() {
+ ret i32 0
+ }
+
+ ;test_skip_eflags_32: LEA32r that cannot be replaced since its not safe to clobber eflags
+ define i32 @test_skip_eflags_32() {
+ ret i32 0
+ }
+
+...
+---
+name: test2add_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %eax = ADD32rr %eax, killed %ebp
+ ; CHECK: %eax = ADD32ri8 %eax, -5
+
+ %eax = LEA32r killed %eax, 1, killed %ebp, -5, _
+ RETQ %eax
+
+...
+---
+name: test2add_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebp = ADD32rr %ebp, killed %eax
+ ; CHECK: %ebp = ADD32ri8 %ebp, -5
+
+ %ebp = LEA32r killed %ebp, 1, killed %eax, -5, _
+ RETQ %ebp
+
+...
+---
+name: test1add_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebp = ADD32rr %ebp, killed %eax
+
+ %ebp = LEA32r killed %ebp, 1, killed %eax, 0, _
+ RETQ %ebp
+
+...
+---
+name: testleaadd_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+ - { reg: '%ebx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %esi
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA32r killed %eax, 1, killed %ebp, -5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+ - { reg: '%ebx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA32r killed %ebp, 1, killed %eax, -5, _
+ RETQ %ebx
+
+...
+---
+name: test1lea_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+ - { reg: '%ebx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+
+ %ebx = LEA32r killed %ebp, 1, killed %eax, 0, _
+ RETQ %ebx
+
+...
+---
+name: test2addi32_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %eax = ADD32rr %eax, killed %ebp
+ ; CHECK: %eax = ADD32ri %eax, 129
+
+ %eax = LEA32r killed %eax, 1, killed %ebp, 129, _
+ RETQ %eax
+
+...
+---
+name: test1mov1add_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = MOV32rr killed %ebp
+ ; CHECK: %ebx = ADD32rr %ebx, killed %ebp
+
+ %ebx = LEA32r killed %ebp, 1, killed %ebp, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_ebp_index_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebx' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA32r _, 1, killed %ebp, 5, _
+ ; CHECK: %ebx = ADD32rr %ebx, killed %ebp
+
+ %ebx = LEA32r killed %ebp, 1, killed %ebp, 5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_ebp_index2_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebx' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA32r _, 4, killed %ebp, 5, _
+ ; CHECK: %ebx = ADD32rr %ebx, killed %ebp
+
+ %ebx = LEA32r killed %ebp, 4, killed %ebp, 5, _
+ RETQ %ebx
+
+...
+---
+name: test_skip_opt_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebx' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+
+ %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_eflags_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebp' }
+ - { reg: '%eax' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
+ ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, _
+ ; CHECK: %ebp = ADD32ri8 %ebp, 5
+
+ CMP32rr %eax, killed %ebx, implicit-def %eflags
+ %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
+ JE_1 %bb.1, implicit %eflags
+ RETQ %ebx
+ bb.1:
+ liveins: %eax, %ebp, %ebx
+ %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, _
+ RETQ %ebp
+
+...
+
+
+
diff --git a/test/CodeGen/X86/leaFixup64.mir b/test/CodeGen/X86/leaFixup64.mir
new file mode 100644
index 000000000000..9b0058750598
--- /dev/null
+++ b/test/CodeGen/X86/leaFixup64.mir
@@ -0,0 +1,1041 @@
+# RUN: llc -run-pass x86-fixup-LEAs -mcpu=corei7-avx -o - %s | FileCheck %s
+--- |
+ ; ModuleID = 'lea-2.ll'
+ source_filename = "lea-2.ll"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ ;generated using: llc -stop-after x86-pad-short-functions lea-2.ll > leaFinxup64.mir
+
+ ;testleaadd_64_32_1: 3 operands LEA64_32r cannot be replaced with 2 add instructions
+ ; but can be replaced with 1 lea + 1 add
+ define i32 @testleaadd_64_32_1() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_64_32_1: 3 operands LEA64_32r cannot be replaced with 2 add instructions
+ ; where the base is rbp/r13/ebp register but it can be replaced with 1 lea + 1 add
+ define i32 @testleaadd_rbp_64_32_1() {
+ ret i32 0
+ }
+
+ ;test1lea_rbp_64_32_1: 2 operands LEA64_32r where base register is rbp/r13/ebp and can not
+ ; be replaced with an add instruction but can be replaced with 1 lea instruction
+ define i32 @test1lea_rbp_64_32_1() {
+ ret i32 0
+ }
+
+ ;test2add_64: 3 operands LEA64r that can be replaced with 2 add instructions
+ define i32 @test2add_64() {
+ ret i32 0
+ }
+
+ ;test2add_rbp_64: 3 operands LEA64r that can be replaced with 2 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test2add_rbp_64() {
+ ret i32 0
+ }
+
+ ;test1add_rbp_64: 2 operands LEA64r where base register is rbp/r13/ebp and can be replaced
+ ; with an add instruction
+ define i32 @test1add_rbp_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_64_32: 3 operands LEA64_32r that can be replaced with 1 lea 1 add instructions
+ define i32 @testleaadd_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_64_32: 3 operands LEA64_32r that can be replaced with 1 lea 1 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @testleaadd_rbp_64_32() {
+ ret i32 0
+ }
+
+ ;test1lea_rbp_64_32: 2 operands LEA64_32r where base register is rbp/r13/ebp and can be replaced
+ ; with a lea instruction
+ define i32 @test1lea_rbp_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ define i32 @testleaadd_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @testleaadd_rbp_64() {
+ ret i32 0
+ }
+
+ ;test1lea_rbp_64: 2 operands LEA64r wher base register is rbp/r13/ebp and can be replaced
+ ; with a lea instruction
+ define i32 @test1lea_rbp_64() {
+ ret i32 0
+ }
+
+ ;test8: dst = base & scale!=1, can't optimize
+ define i32 @test8() {
+ ret i32 0
+ }
+
+ ;testleaaddi32_64_32: 3 operands LEA64_32r that can be replaced with 1 lea + 1 add instructions where
+ ; ADD64ri32 is chosen
+ define i32 @testleaaddi32_64_32() {
+ ret i32 0
+ }
+
+ ;test1mov1add_rbp_64_32: 2 operands LEA64_32r cannot be replaced with 1 add 1 mov instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test1mov1add_rbp_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index_64_32: 3 operands LEA64_32r that cannot replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is offset
+ define i32 @testleaadd_rbp_index_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index2_64_32: 3 operands LEA64_32r that cannot replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is scale
+ define i32 @testleaadd_rbp_index2_64_32() {
+ ret i32 0
+ }
+
+ ;test2addi32_64: 3 operands LEA64r that can be replaced with 2 add instructions where ADD64ri32
+ ; is chosen
+ define i32 @test2addi32_64() {
+ ret i32 0
+ }
+
+ ;test1mov1add_rbp_64: 2 operands LEA64r that can be replaced with 1 add 1 mov instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test1mov1add_rbp_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is offset
+ define i32 @testleaadd_rbp_index_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index2_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is scale
+ define i32 @testleaadd_rbp_index2_64() {
+ ret i32 0
+ }
+
+ ;test_skip_opt_64: 3 operands LEA64r that can not be replaced with 2 instructions
+ define i32 @test_skip_opt_64() {
+ ret i32 0
+ }
+
+ ;test_skip_eflags_64: LEA64r that cannot be replaced since its not safe to clobber eflags
+ define i32 @test_skip_eflags_64() {
+ ret i32 0
+ }
+
+ ;test_skip_opt_64_32: 3 operands LEA64_32r that can not be replaced with 2 instructions
+ define i32 @test_skip_opt_64_32() {
+ ret i32 0
+ }
+
+ ;test_skip_eflags_64_32: LEA64_32r that cannot be replaced since its not safe to clobber eflags
+ define i32 @test_skip_eflags_64_32() {
+ ret i32 0
+ }
+
+
+...
+---
+name: testleaadd_64_32_1
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
+ ; CHECK: %eax = ADD32ri8 %eax, -5
+
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ RETQ %eax
+
+...
+---
+name: testleaadd_rbp_64_32_1
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
+ ; CHECK: %ebp = ADD32ri8 %ebp, -5
+
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebp
+
+...
+---
+name: test1lea_rbp_64_32_1
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
+
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebp
+
+...
+---
+name: test2add_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rax = ADD64rr %rax, killed %rbp
+ ; CHECK: %rax = ADD64ri8 %rax, -5
+
+ %rax = LEA64r killed %rax, 1, killed %rbp, -5, _
+ RETQ %eax
+
+...
+---
+name: test2add_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbp = ADD64rr %rbp, killed %rax
+ ; CHECK: %rbp = ADD64ri8 %rbp, -5
+
+ %rbp = LEA64r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebp
+
+...
+---
+name: test1add_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbp = ADD64rr %rbp, killed %rax
+
+ %rbp = LEA64r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebp
+
+...
+---
+name: testleaadd_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebx
+
+...
+---
+name: test1lea_rbp_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = ADD64ri8 %rbx, -5
+
+ %rbx = LEA64r killed %rax, 1, killed %rbp, -5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = ADD64ri8 %rbx, -5
+
+ %rbx = LEA64r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebx
+
+...
+---
+name: test1lea_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+
+ %rbx = LEA64r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebx
+
+...
+---
+name: test8
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rdi, %rbp
+ ; CHECK: %r12 = LEA64r _, 2, killed %r13, 5, _
+ ; CHECK: %r12 = ADD64rr %r12, killed %rbp
+ %rbp = KILL %rbp, implicit-def %rbp
+ %r13 = KILL %rdi, implicit-def %r13
+ %r12 = LEA64r killed %rbp, 2, killed %r13, 5, _
+ RETQ %r12
+
+...
+---
+name: testleaaddi32_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
+ ; CHECK: %eax = ADD32ri %eax, 129
+
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, _
+ RETQ %eax
+
+...
+---
+name: test1mov1add_rbp_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index2_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+
+ %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: test2addi32_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rax = ADD64rr %rax, killed %rbp
+ ; CHECK: %rax = ADD64ri32 %rax, 129
+
+ %rax = LEA64r killed %rax, 1, killed %rbp, 129, _
+ RETQ %eax
+
+...
+---
+name: test1mov1add_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = MOV64rr killed %rbp
+ ; CHECK: %rbx = ADD64rr %rbx, killed %rbp
+
+ %rbx = LEA64r killed %rbp, 1, killed %rbp, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = LEA64r _, 1, killed %rbp, 5, _
+ ; CHECK: %rbx = ADD64rr %rbx, killed %rbp
+
+ %rbx = LEA64r killed %rbp, 1, killed %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index2_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = LEA64r _, 4, killed %rbp, 5, _
+ ; CHECK: %rbx = ADD64rr %rbx, killed %rbp
+
+ %rbx = LEA64r killed %rbp, 4, killed %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: test_skip_opt_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+
+ %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_eflags_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbp' }
+ - { reg: '%rax' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
+ ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %rbp = ADD64ri8 %rbp, 5
+
+ CMP64rr %rax, killed %rbx, implicit-def %eflags
+ %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
+ JE_1 %bb.1, implicit %eflags
+ RETQ %ebx
+ bb.1:
+ liveins: %rax, %rbp, %rbx
+ %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_opt_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+
+ %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_eflags_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbp' }
+ - { reg: '%rax' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
+ ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %ebp = ADD32ri8 %ebp, 5
+
+ CMP64rr %rax, killed %rbx, implicit-def %eflags
+ %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
+ JE_1 %bb.1, implicit %eflags
+ RETQ %ebx
+ bb.1:
+ liveins: %rax, %rbp, %rbx
+ %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, _
+ RETQ %ebp
+
+...
+
+
+
diff --git a/test/CodeGen/X86/lrshrink.ll b/test/CodeGen/X86/lrshrink.ll
new file mode 100644
index 000000000000..a9cf086dbd90
--- /dev/null
+++ b/test/CodeGen/X86/lrshrink.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+; Checks if "%7 = add nuw nsw i64 %4, %2" is moved before the last call
+; to minimize live-range.
+
+define i64 @test(i1 %a, i64 %r1, i64 %r2, i64 %s1, i64 %s2, i64 %t1, i64 %t2) {
+entry:
+ br i1 %a, label %then, label %else
+
+then:
+ br label %else
+
+else:
+ %0 = phi i64 [ 4, %entry ], [ 10, %then ]
+ %r = phi i64 [ %r1, %entry ], [ %r2, %then ]
+ %s = phi i64 [ %s1, %entry ], [ %s2, %then ]
+ %t = phi i64 [ %t1, %entry ], [ %t2, %then ]
+; CHECK-LABEL: test:
+; CHECK: add
+; CHECK: add
+; CHECK: call
+; CHECK: add
+; CHECK: call
+; CHECK: add
+; CHECK: call
+; CHECK: add
+ %1 = tail call i32 @_Z3foov()
+ %2 = zext i32 %1 to i64
+ %3 = tail call i32 @_Z3foov()
+ %4 = zext i32 %3 to i64
+ %5 = tail call i32 @_Z3foov()
+ %6 = zext i32 %5 to i64
+ %7 = add nuw nsw i64 %0, %r
+ tail call void @llvm.dbg.value(metadata i64 %7, i64 0, metadata !5, metadata !DIExpression()), !dbg !6
+ %8 = add nuw nsw i64 %2, %7
+ %9 = add nuw nsw i64 %4, %8
+ %10 = add nuw nsw i64 %6, %9
+ %11 = add nuw nsw i64 %s, %t
+ tail call void @llvm.dbg.value(metadata i64 %11, i64 0, metadata !5, metadata !DIExpression()), !dbg !6
+ %12 = add nuw nsw i64 %10, %11
+ ret i64 %12
+}
+
+declare i32 @_Z3foov()
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!1, !2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, emissionKind: FullDebug)
+!1 = !{i32 2, !"Dwarf Version", i32 4}
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = !DIFile(filename: "a.c", directory: "./")
+!4 = distinct !DISubprogram(name: "test", scope: !3, unit: !0)
+!5 = !DILocalVariable(name: "x", scope: !4)
+!6 = !DILocation(line: 4, scope: !4)
diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll
index d332b2f3169f..af86df510016 100644
--- a/test/CodeGen/X86/madd.ll
+++ b/test/CodeGen/X86/madd.ll
@@ -129,9 +129,9 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly
; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: addq $16, %rsi
; SSE2-NEXT: addq $16, %rdi
; SSE2-NEXT: addq $-8, %rax
@@ -246,23 +246,23 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: movq {{.*#+}} xmm6 = mem[0],zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psraw $8, %xmm6
-; SSE2-NEXT: movq {{.*#+}} xmm7 = mem[0],zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psraw $8, %xmm7
-; SSE2-NEXT: pmullw %xmm6, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: paddd %xmm7, %xmm2
-; SSE2-NEXT: paddd %xmm6, %xmm3
-; SSE2-NEXT: paddd %xmm5, %xmm1
; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: movq {{.*#+}} xmm5 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: pmullw %xmm4, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: paddd %xmm4, %xmm2
; SSE2-NEXT: addq $16, %rsi
; SSE2-NEXT: addq $16, %rdi
; SSE2-NEXT: addq $-16, %rax
diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll
index 29a662fb217e..c5de8dd96cbc 100644
--- a/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/test/CodeGen/X86/masked_gather_scatter.ll
@@ -3,7 +3,7 @@
; RUN: llc -mtriple=i386-unknown-linux-gnu -mattr=+avx512f < %s | FileCheck %s --check-prefix=ALL --check-prefix=KNL_32
; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl -mattr=+avx512dq < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX
; RUN: llc -mtriple=i386-unknown-linux-gnu -mattr=+avx512vl -mattr=+avx512dq < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX_32
-; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s -check-prefix=SCALAR
+; RUN: opt -mtriple=x86_64-apple-darwin -scalarize-masked-mem-intrin -mcpu=corei7-avx -S < %s | FileCheck %s -check-prefix=SCALAR
; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu -mcpu=skx < %s -o /dev/null
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/CodeGen/X86/merge-consecutive-loads-128.ll b/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 71417694b0d4..2f7714e63886 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -270,9 +270,9 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s
; SSE2: # BB#0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_012u:
@@ -292,9 +292,9 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_012u:
@@ -321,9 +321,9 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s
; SSE2: # BB#0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_019u:
@@ -343,9 +343,9 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_019u:
diff --git a/test/CodeGen/X86/misched-matrix.ll b/test/CodeGen/X86/misched-matrix.ll
index e62a1d04dad6..94bbe75702cb 100644
--- a/test/CodeGen/X86/misched-matrix.ll
+++ b/test/CodeGen/X86/misched-matrix.ll
@@ -17,9 +17,9 @@
;
; TOPDOWN-LABEL: %for.body
; TOPDOWN: movl %{{.*}}, (
-; TOPDOWN: imull {{[0-9]*}}(
+; TOPDOWN-NOT: imull {{[0-9]*}}(
; TOPDOWN: movl %{{.*}}, 4(
-; TOPDOWN: imull {{[0-9]*}}(
+; TOPDOWN-NOT: imull {{[0-9]*}}(
; TOPDOWN: movl %{{.*}}, 8(
; TOPDOWN: movl %{{.*}}, 12(
; TOPDOWN-LABEL: %for.end
diff --git a/test/CodeGen/X86/not-and-simplify.ll b/test/CodeGen/X86/not-and-simplify.ll
index dfce6c681500..83b2be83d552 100644
--- a/test/CodeGen/X86/not-and-simplify.ll
+++ b/test/CodeGen/X86/not-and-simplify.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-bmi | FileCheck %s --check-prefix=ALL --check-prefix=NO_BMI
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefix=ALL --check-prefix=BMI
@@ -11,13 +12,24 @@ define i32 @shrink_xor_constant1(i32 %x) {
; ALL-NEXT: xorl $1, %edi
; ALL-NEXT: movl %edi, %eax
; ALL-NEXT: retq
-;
%sh = lshr i32 %x, 31
%not = xor i32 %sh, -1
%and = and i32 %not, 1
ret i32 %and
}
+define <4 x i32> @shrink_xor_constant1_splat(<4 x i32> %x) {
+; ALL-LABEL: shrink_xor_constant1_splat:
+; ALL: # BB#0:
+; ALL-NEXT: psrld $31, %xmm0
+; ALL-NEXT: pandn {{.*}}(%rip), %xmm0
+; ALL-NEXT: retq
+ %sh = lshr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
+ %not = xor <4 x i32> %sh, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and = and <4 x i32> %not, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %and
+}
+
; Clear low bits via shift, set them with xor (not), then mask them off.
define i8 @shrink_xor_constant2(i8 %x) {
@@ -27,10 +39,22 @@ define i8 @shrink_xor_constant2(i8 %x) {
; ALL-NEXT: xorb $-32, %dil
; ALL-NEXT: movl %edi, %eax
; ALL-NEXT: retq
-;
%sh = shl i8 %x, 5
%not = xor i8 %sh, -1
%and = and i8 %not, 224 ; 0xE0
ret i8 %and
}
+define <16 x i8> @shrink_xor_constant2_splat(<16 x i8> %x) {
+; ALL-LABEL: shrink_xor_constant2_splat:
+; ALL: # BB#0:
+; ALL-NEXT: psllw $5, %xmm0
+; ALL-NEXT: pand {{.*}}(%rip), %xmm0
+; ALL-NEXT: pandn {{.*}}(%rip), %xmm0
+; ALL-NEXT: retq
+ %sh = shl <16 x i8> %x, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ %not = xor <16 x i8> %sh, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %and = and <16 x i8> %not, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %and
+}
+
diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll
index d26cf02dd942..0bda41a30c69 100644
--- a/test/CodeGen/X86/oddshuffles.ll
+++ b/test/CodeGen/X86/oddshuffles.ll
@@ -746,9 +746,9 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE2-LABEL: interleave_24i8_in:
; SSE2: # BB#0:
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -791,17 +791,17 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE42: # BB#0:
; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; SSE42-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE42-NEXT: movdqa %xmm0, %xmm1
-; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
-; SSE42-NEXT: movdqa %xmm2, %xmm3
+; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE42-NEXT: movdqa %xmm0, %xmm2
+; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,8],zero,xmm2[1,9],zero,xmm2[2,10],zero,xmm2[3,11],zero,xmm2[4,12],zero,xmm2[5]
+; SSE42-NEXT: movdqa %xmm1, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm3 = zero,zero,xmm3[0],zero,zero,xmm3[1],zero,zero,xmm3[2],zero,zero,xmm3[3],zero,zero,xmm3[4],zero
-; SSE42-NEXT: por %xmm1, %xmm3
+; SSE42-NEXT: por %xmm2, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
-; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,xmm2[5],zero,zero,xmm2[6],zero,zero,xmm2[7,u,u,u,u,u,u,u,u]
-; SSE42-NEXT: por %xmm0, %xmm2
-; SSE42-NEXT: movq %xmm2, 16(%rdi)
+; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
+; SSE42-NEXT: por %xmm0, %xmm1
+; SSE42-NEXT: movq %xmm1, 16(%rdi)
; SSE42-NEXT: movdqu %xmm3, (%rdi)
; SSE42-NEXT: retq
;
@@ -809,16 +809,16 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; AVX: # BB#0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
-; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm2[0],zero,zero,xmm2[1],zero,zero,xmm2[2],zero,zero,xmm2[3],zero,zero,xmm2[4],zero
-; AVX-NEXT: vpor %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
+; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[1],zero,zero,xmm1[2],zero,zero,xmm1[3],zero,zero,xmm1[4],zero
+; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[5],zero,zero,xmm2[6],zero,zero,xmm2[7,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, 16(%rdi)
-; AVX-NEXT: vmovdqu %xmm1, (%rdi)
+; AVX-NEXT: vmovdqu %xmm2, (%rdi)
; AVX-NEXT: retq
%s1 = load <8 x i8>, <8 x i8>* %q1, align 4
%s2 = load <8 x i8>, <8 x i8>* %q2, align 4
diff --git a/test/CodeGen/X86/packss.ll b/test/CodeGen/X86/packss.ll
index 5cd649bb3902..24db6ba9ca2f 100644
--- a/test/CodeGen/X86/packss.ll
+++ b/test/CodeGen/X86/packss.ll
@@ -26,18 +26,17 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
; X64-AVX1-LABEL: trunc_ashr_v4i64:
; X64-AVX1: # BB#0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; X64-AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
+; X64-AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: trunc_ashr_v4i64:
; X64-AVX2: # BB#0:
-; X64-AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
-; X64-AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; X64-AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X64-AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 88cb7a6d5825..50a661fcca11 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -1152,9 +1152,9 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
@@ -1166,9 +1166,9 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pmuludq %xmm2, %xmm4
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pmuludq %xmm3, %xmm0
-; SSE41-NEXT: pmuludq %xmm2, %xmm4
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
; SSE41-NEXT: retq
;
@@ -1312,17 +1312,17 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: movdqa %xmm2, %xmm7
; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: pmuludq %xmm7, %xmm5
+; SSE2-NEXT: pmuludq %xmm7, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
; SSE2-NEXT: pmuludq %xmm0, %xmm2
-; SSE2-NEXT: pmuludq %xmm8, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
+; SSE2-NEXT: pmuludq %xmm0, %xmm5
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3]
; SSE2-NEXT: movaps %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm5, %xmm1
@@ -1331,22 +1331,22 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE41-LABEL: mul_v8i64_zero_upper:
; SSE41: # BB#0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm4[0],zero,xmm4[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pmuludq %xmm4, %xmm1
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT: pmuludq %xmm5, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pmuludq %xmm6, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmuludq %xmm7, %xmm1
-; SSE41-NEXT: pmuludq %xmm6, %xmm2
-; SSE41-NEXT: pmuludq %xmm5, %xmm0
-; SSE41-NEXT: pmuludq %xmm8, %xmm4
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
@@ -1356,11 +1356,11 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm1
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,3],ymm0[1,3],ymm1[5,7],ymm0[5,7]
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,3],ymm0[1,3],ymm2[5,7],ymm0[5,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
@@ -1467,22 +1467,22 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE41-LABEL: mul_v8i64_sext:
; SSE41: # BB#0:
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
-; SSE41-NEXT: pmovsxwq %xmm3, %xmm8
+; SSE41-NEXT: pmovsxwq %xmm3, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwq %xmm3, %xmm6
+; SSE41-NEXT: pmovsxwq %xmm3, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmovsxwq %xmm3, %xmm7
-; SSE41-NEXT: pmovsxwq %xmm0, %xmm5
+; SSE41-NEXT: pmovsxwq %xmm3, %xmm6
+; SSE41-NEXT: pmovsxwq %xmm0, %xmm7
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE41-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE41-NEXT: pmuldq %xmm4, %xmm3
; SSE41-NEXT: pmovsxdq %xmm2, %xmm2
+; SSE41-NEXT: pmuldq %xmm5, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovsxdq %xmm0, %xmm4
+; SSE41-NEXT: pmuldq %xmm6, %xmm4
; SSE41-NEXT: pmovsxdq %xmm1, %xmm0
-; SSE41-NEXT: pmuldq %xmm5, %xmm0
-; SSE41-NEXT: pmuldq %xmm7, %xmm4
-; SSE41-NEXT: pmuldq %xmm6, %xmm2
-; SSE41-NEXT: pmuldq %xmm8, %xmm3
+; SSE41-NEXT: pmuldq %xmm7, %xmm0
; SSE41-NEXT: movdqa %xmm4, %xmm1
; SSE41-NEXT: retq
;
@@ -1493,9 +1493,10 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
+; AVX2-NEXT: vpmuldq %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpmuldq %ymm3, %ymm2, %ymm1
+; AVX2-NEXT: vmovdqa %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: mul_v8i64_sext:
diff --git a/test/CodeGen/X86/pr28129.ll b/test/CodeGen/X86/pr28129.ll
index a155f71f79c3..15bffffa207f 100644
--- a/test/CodeGen/X86/pr28129.ll
+++ b/test/CodeGen/X86/pr28129.ll
@@ -5,15 +5,15 @@
define <4 x double> @cmp4f64_domain(<4 x double> %a) {
; X86-LABEL: cmp4f64_domain:
; X86: # BB#0:
-; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp4f64_domain:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%cmp = fcmp oeq <4 x double> zeroinitializer, zeroinitializer
@@ -26,15 +26,15 @@ define <4 x double> @cmp4f64_domain(<4 x double> %a) {
define <4 x double> @cmp4f64_domain_optsize(<4 x double> %a) optsize {
; X86-LABEL: cmp4f64_domain_optsize:
; X86: # BB#0:
-; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp4f64_domain_optsize:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%cmp = fcmp oeq <4 x double> zeroinitializer, zeroinitializer
@@ -47,15 +47,15 @@ define <4 x double> @cmp4f64_domain_optsize(<4 x double> %a) optsize {
define <8 x float> @cmp8f32_domain(<8 x float> %a) {
; X86-LABEL: cmp8f32_domain:
; X86: # BB#0:
-; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp8f32_domain:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%cmp = fcmp oeq <8 x float> zeroinitializer, zeroinitializer
@@ -68,15 +68,15 @@ define <8 x float> @cmp8f32_domain(<8 x float> %a) {
define <8 x float> @cmp8f32_domain_optsize(<8 x float> %a) optsize {
; X86-LABEL: cmp8f32_domain_optsize:
; X86: # BB#0:
-; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp8f32_domain_optsize:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%cmp = fcmp oeq <8 x float> zeroinitializer, zeroinitializer
diff --git a/test/CodeGen/X86/pr29112.ll b/test/CodeGen/X86/pr29112.ll
index 8c970b3d4771..94904018872b 100644
--- a/test/CodeGen/X86/pr29112.ll
+++ b/test/CodeGen/X86/pr29112.ll
@@ -38,7 +38,8 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm8[0],xmm0[0],xmm8[2,3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[1],xmm1[3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm14 = xmm1[0,1,2],xmm3[1]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm10 = xmm10[0,1,2],xmm3[1]
+; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm10[0,1,2],xmm3[1]
+; CHECK-NEXT: vaddps %xmm14, %xmm1, %xmm10
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[2,3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[1],xmm0[3]
@@ -52,10 +53,9 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vmovaps %xmm15, %xmm1
; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
-; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
-; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm10, %xmm0, %xmm0
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %xmm9, (%rsp)
diff --git a/test/CodeGen/X86/pr30562.ll b/test/CodeGen/X86/pr30562.ll
index dda736a1a183..a8e648074194 100644
--- a/test/CodeGen/X86/pr30562.ll
+++ b/test/CodeGen/X86/pr30562.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
define i32 @foo(i64* nocapture %perm, i32 %n) {
entry:
br label %body
diff --git a/test/CodeGen/X86/pr31088.ll b/test/CodeGen/X86/pr31088.ll
index 0dd8eb0ece85..d7a546c7396d 100644
--- a/test/CodeGen/X86/pr31088.ll
+++ b/test/CodeGen/X86/pr31088.ll
@@ -150,12 +150,12 @@ define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
; F16C-NEXT: vcvtph2ps %xmm3, %xmm3
; F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; F16C-NEXT: vaddss %xmm3, %xmm1, %xmm1
; F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; F16C-NEXT: vcvtph2ps %xmm2, %xmm2
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vaddss %xmm2, %xmm0, %xmm0
-; F16C-NEXT: vaddss %xmm3, %xmm1, %xmm1
; F16C-NEXT: retq
%retval = fadd <2 x half> %arg0, %arg1
ret <2 x half> %retval
diff --git a/test/CodeGen/X86/pr32284.ll b/test/CodeGen/X86/pr32284.ll
index e05fc926b080..143e3af82eb7 100644
--- a/test/CodeGen/X86/pr32284.ll
+++ b/test/CodeGen/X86/pr32284.ll
@@ -30,25 +30,24 @@ define void @foo() {
; X86-O0-NEXT: subl $12, %esp
; X86-O0-NEXT: .Lcfi0:
; X86-O0-NEXT: .cfi_def_cfa_offset 16
-; X86-O0-NEXT: movzbl c, %eax
-; X86-O0-NEXT: testl %eax, %eax
-; X86-O0-NEXT: setne %cl
-; X86-O0-NEXT: movl %eax, %edx
-; X86-O0-NEXT: movb %dl, %ch
-; X86-O0-NEXT: testb %ch, %ch
+; X86-O0-NEXT: movb c, %al
+; X86-O0-NEXT: testb %al, %al
; X86-O0-NEXT: setne {{[0-9]+}}(%esp)
-; X86-O0-NEXT: movzbl %cl, %edx
-; X86-O0-NEXT: subl %eax, %edx
-; X86-O0-NEXT: setle %cl
-; X86-O0-NEXT: # implicit-def: %EAX
-; X86-O0-NEXT: movb %cl, %al
-; X86-O0-NEXT: andl $1, %eax
-; X86-O0-NEXT: kmovd %eax, %k0
-; X86-O0-NEXT: kmovd %k0, %eax
+; X86-O0-NEXT: movzbl c, %ecx
+; X86-O0-NEXT: testl %ecx, %ecx
+; X86-O0-NEXT: setne %al
+; X86-O0-NEXT: movzbl %al, %edx
+; X86-O0-NEXT: subl %ecx, %edx
+; X86-O0-NEXT: setle %al
+; X86-O0-NEXT: # implicit-def: %ECX
; X86-O0-NEXT: movb %al, %cl
-; X86-O0-NEXT: andb $1, %cl
-; X86-O0-NEXT: movzbl %cl, %eax
-; X86-O0-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-O0-NEXT: andl $1, %ecx
+; X86-O0-NEXT: kmovd %ecx, %k0
+; X86-O0-NEXT: kmovd %k0, %ecx
+; X86-O0-NEXT: movb %cl, %al
+; X86-O0-NEXT: andb $1, %al
+; X86-O0-NEXT: movzbl %al, %ecx
+; X86-O0-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-O0-NEXT: movl %edx, (%esp) # 4-byte Spill
; X86-O0-NEXT: addl $12, %esp
; X86-O0-NEXT: retl
@@ -69,27 +68,25 @@ define void @foo() {
;
; X64-O0-LABEL: foo:
; X64-O0: # BB#0: # %entry
-; X64-O0-NEXT: movzbl {{.*}}(%rip), %eax
-; X64-O0-NEXT: movl %eax, %ecx
-; X64-O0-NEXT: movb %cl, %dl
-; X64-O0-NEXT: movl %ecx, %eax
-; X64-O0-NEXT: testq %rcx, %rcx
-; X64-O0-NEXT: setne %sil
-; X64-O0-NEXT: testb %dl, %dl
+; X64-O0-NEXT: movb {{.*}}(%rip), %al
+; X64-O0-NEXT: testb %al, %al
; X64-O0-NEXT: setne -{{[0-9]+}}(%rsp)
-; X64-O0-NEXT: movzbl %sil, %edi
-; X64-O0-NEXT: subl %eax, %edi
-; X64-O0-NEXT: setle %dl
-; X64-O0-NEXT: # implicit-def: %EAX
-; X64-O0-NEXT: movb %dl, %al
-; X64-O0-NEXT: andl $1, %eax
-; X64-O0-NEXT: kmovd %eax, %k0
-; X64-O0-NEXT: kmovd %k0, %eax
-; X64-O0-NEXT: movb %al, %dl
-; X64-O0-NEXT: andb $1, %dl
-; X64-O0-NEXT: movzbl %dl, %eax
-; X64-O0-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
-; X64-O0-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # 4-byte Spill
+; X64-O0-NEXT: movzbl {{.*}}(%rip), %ecx
+; X64-O0-NEXT: testl %ecx, %ecx
+; X64-O0-NEXT: setne %al
+; X64-O0-NEXT: movzbl %al, %edx
+; X64-O0-NEXT: subl %ecx, %edx
+; X64-O0-NEXT: setle %al
+; X64-O0-NEXT: # implicit-def: %ECX
+; X64-O0-NEXT: movb %al, %cl
+; X64-O0-NEXT: andl $1, %ecx
+; X64-O0-NEXT: kmovd %ecx, %k0
+; X64-O0-NEXT: kmovd %k0, %ecx
+; X64-O0-NEXT: movb %cl, %al
+; X64-O0-NEXT: andb $1, %al
+; X64-O0-NEXT: movzbl %al, %ecx
+; X64-O0-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; X64-O0-NEXT: movl %edx, -{{[0-9]+}}(%rsp) # 4-byte Spill
; X64-O0-NEXT: retq
entry:
%a = alloca i8, align 1
diff --git a/test/CodeGen/X86/pr32907.ll b/test/CodeGen/X86/pr32907.ll
index bc03fbe06843..8057b31c961c 100644
--- a/test/CodeGen/X86/pr32907.ll
+++ b/test/CodeGen/X86/pr32907.ll
@@ -5,41 +5,44 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) {
-; SSE-LABEL: PR32907:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: psubq %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: psubq %xmm0, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: PR32907:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: psubq %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn %xmm0, %xmm2
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: PR32907:
+; SSE42: # BB#0: # %entry
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm1, %xmm1
+; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
+; SSE42-NEXT: pxor %xmm1, %xmm0
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: retq
;
; AVX2-LABEL: PR32907:
; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm2
-; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR32907:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpsraq $63, %zmm0, %zmm1
-; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm2
-; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/replace_unsupported_masked_mem_intrin.ll b/test/CodeGen/X86/replace_unsupported_masked_mem_intrin.ll
new file mode 100644
index 000000000000..9a5da33223ba
--- /dev/null
+++ b/test/CodeGen/X86/replace_unsupported_masked_mem_intrin.ll
@@ -0,0 +1,37 @@
+; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,+sse2 < %s -o /dev/null
+; pr33001 - Check that llc doesn't crash when running with O0 option.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define <4 x i32> @test_masked_load(<4 x i32>* %base, <4 x i1> %mask) {
+ %res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %base, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+
+
+define void @test_masked_store(<4 x i32>* %base, <4 x i32> %value, <4 x i1> %mask) {
+ call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %value, <4 x i32>* %base, i32 4, <4 x i1> %mask)
+ ret void
+}
+
+declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+
+
+define <4 x i32> @llvm_masked_gather(<4 x i32*> %ptrs, <4 x i1> %mask) {
+ %res = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 4, <4 x i1> %mask, <4 x i32> undef)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+
+
+define void @llvm_masked_scatter(<4 x i32*> %ptrs, <4 x i32> %value, <4 x i1> %mask) {
+ call void @llvm.masked.scatter.v4i32(<4 x i32> %value, <4 x i32*> %ptrs, i32 4, <4 x i1> %mask)
+ ret void
+}
+
+declare void @llvm.masked.scatter.v4i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
+
diff --git a/test/CodeGen/X86/rotate.ll b/test/CodeGen/X86/rotate.ll
index 5d5150ad62d6..4be3a4c2391b 100644
--- a/test/CodeGen/X86/rotate.ll
+++ b/test/CodeGen/X86/rotate.ll
@@ -33,8 +33,8 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB0_4:
-; 32-NEXT: orl %esi, %eax
; 32-NEXT: orl %ebx, %edx
+; 32-NEXT: orl %esi, %eax
; 32-NEXT: popl %esi
; 32-NEXT: popl %edi
; 32-NEXT: popl %ebx
@@ -86,8 +86,8 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB1_4:
-; 32-NEXT: orl %ebx, %eax
; 32-NEXT: orl %esi, %edx
+; 32-NEXT: orl %ebx, %eax
; 32-NEXT: popl %esi
; 32-NEXT: popl %edi
; 32-NEXT: popl %ebx
@@ -546,7 +546,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-LABEL: rotr1_64_mem:
; 32: # BB#0:
; 32-NEXT: pushl %esi
-; 32-NEXT: movl 8(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: movl (%eax), %ecx
; 32-NEXT: movl 4(%eax), %edx
; 32-NEXT: movl %edx, %esi
@@ -555,11 +555,13 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-NEXT: movl %ecx, 4(%eax)
; 32-NEXT: movl %esi, (%eax)
; 32-NEXT: popl %esi
-
+; 32-NEXT: retl
+;
; 64-LABEL: rotr1_64_mem:
; 64: # BB#0:
; 64-NEXT: rorq (%rdi)
; 64-NEXT: retq
+
%A = load i64, i64 *%Aptr
%B = shl i64 %A, 63
%C = lshr i64 %A, 1
@@ -571,7 +573,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
define void @rotr1_32_mem(i32* %Aptr) nounwind {
; 32-LABEL: rotr1_32_mem:
; 32: # BB#0:
-; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorl (%eax)
; 32-NEXT: retl
;
@@ -590,7 +592,7 @@ define void @rotr1_32_mem(i32* %Aptr) nounwind {
define void @rotr1_16_mem(i16* %Aptr) nounwind {
; 32-LABEL: rotr1_16_mem:
; 32: # BB#0:
-; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorw (%eax)
; 32-NEXT: retl
;
@@ -609,7 +611,7 @@ define void @rotr1_16_mem(i16* %Aptr) nounwind {
define void @rotr1_8_mem(i8* %Aptr) nounwind {
; 32-LABEL: rotr1_8_mem:
; 32: # BB#0:
-; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorb (%eax)
; 32-NEXT: retl
;
diff --git a/test/CodeGen/X86/sad.ll b/test/CodeGen/X86/sad.ll
index b8a8b8afd14f..6a565a5c76f0 100644
--- a/test/CodeGen/X86/sad.ll
+++ b/test/CodeGen/X86/sad.ll
@@ -149,127 +149,131 @@ middle.block:
define i32 @sad_32i8() nounwind {
; SSE2-LABEL: sad_32i8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm11, %xmm11
-; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm12, %xmm12
-; SSE2-NEXT: pxor %xmm15, %xmm15
+; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pxor %xmm14, %xmm14
+; SSE2-NEXT: pxor %xmm15, %xmm15
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: .p2align 4, 0x90
; SSE2-NEXT: .LBB1_1: # %vector.body
; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa a+1040(%rax), %xmm6
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa a+1040(%rax), %xmm8
; SSE2-NEXT: movdqa a+1024(%rax), %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm8
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm11[8],xmm3[9],xmm11[9],xmm3[10],xmm11[10],xmm3[11],xmm11[11],xmm3[12],xmm11[12],xmm3[13],xmm11[13],xmm3[14],xmm11[14],xmm3[15],xmm11[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm11[8],xmm6[9],xmm11[9],xmm6[10],xmm11[10],xmm6[11],xmm11[11],xmm6[12],xmm11[12],xmm6[13],xmm11[13],xmm6[14],xmm11[14],xmm6[15],xmm11[15]
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm12[8],xmm8[9],xmm12[9],xmm8[10],xmm12[10],xmm8[11],xmm12[11],xmm8[12],xmm12[12],xmm8[13],xmm12[13],xmm8[14],xmm12[14],xmm8[15],xmm12[15]
+; SSE2-NEXT: movdqa b+1024(%rax), %xmm11
+; SSE2-NEXT: movdqa %xmm11, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
+; SSE2-NEXT: psubd %xmm2, %xmm7
; SSE2-NEXT: movdqa b+1040(%rax), %xmm9
-; SSE2-NEXT: movdqa %xmm9, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
-; SSE2-NEXT: movdqa %xmm9, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; SSE2-NEXT: psubd %xmm9, %xmm6
-; SSE2-NEXT: movdqa b+1024(%rax), %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
-; SSE2-NEXT: psubd %xmm10, %xmm7
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
+; SSE2-NEXT: psubd %xmm10, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
+; SSE2-NEXT: movdqa %xmm11, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
; SSE2-NEXT: psubd %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm11[8],xmm4[9],xmm11[9],xmm4[10],xmm11[10],xmm4[11],xmm11[11],xmm4[12],xmm11[12],xmm4[13],xmm11[13],xmm4[14],xmm11[14],xmm4[15],xmm11[15]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
-; SSE2-NEXT: psubd %xmm9, %xmm0
-; SSE2-NEXT: movdqa %xmm4, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
-; SSE2-NEXT: psubd %xmm4, %xmm3
-; SSE2-NEXT: movdqa %xmm8, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
-; SSE2-NEXT: psubd %xmm9, %xmm5
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
-; SSE2-NEXT: psubd %xmm2, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
-; SSE2-NEXT: psubd %xmm4, %xmm10
-; SSE2-NEXT: movdqa %xmm10, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm10
-; SSE2-NEXT: pxor %xmm2, %xmm10
-; SSE2-NEXT: movdqa %xmm8, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm8
-; SSE2-NEXT: pxor %xmm2, %xmm8
-; SSE2-NEXT: movdqa %xmm5, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm2, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm7, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm7
-; SSE2-NEXT: pxor %xmm2, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; SSE2-NEXT: psubd %xmm11, %xmm3
+; SSE2-NEXT: movdqa %xmm6, %xmm10
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
; SSE2-NEXT: movdqa %xmm6, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm6
-; SSE2-NEXT: pxor %xmm2, %xmm6
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm6, %xmm14
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
+; SSE2-NEXT: psubd %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm8, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; SSE2-NEXT: psubd %xmm6, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
+; SSE2-NEXT: psubd %xmm6, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
+; SSE2-NEXT: psubd %xmm9, %xmm8
+; SSE2-NEXT: movdqa %xmm7, %xmm6
+; SSE2-NEXT: psrad $31, %xmm6
+; SSE2-NEXT: paddd %xmm6, %xmm7
+; SSE2-NEXT: pxor %xmm6, %xmm7
; SSE2-NEXT: paddd %xmm7, %xmm13
-; SSE2-NEXT: paddd %xmm1, %xmm15
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: psrad $31, %xmm6
+; SSE2-NEXT: paddd %xmm6, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm10, %xmm6
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm1, %xmm3
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm12
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm5, %xmm2
-; SSE2-NEXT: paddd %xmm8, %xmm3
-; SSE2-NEXT: paddd %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm5
+; SSE2-NEXT: pxor %xmm1, %xmm5
+; SSE2-NEXT: paddd %xmm5, %xmm14
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm15
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm8, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm8
+; SSE2-NEXT: pxor %xmm0, %xmm8
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm8, %xmm0
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB1_1
; SSE2-NEXT: # BB#2: # %middle.block
-; SSE2-NEXT: paddd %xmm15, %xmm3
-; SSE2-NEXT: paddd %xmm14, %xmm1
-; SSE2-NEXT: paddd %xmm12, %xmm0
-; SSE2-NEXT: paddd %xmm13, %xmm2
-; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: paddd %xmm15, %xmm6
+; SSE2-NEXT: paddd %xmm0, %xmm3
+; SSE2-NEXT: paddd %xmm6, %xmm3
+; SSE2-NEXT: paddd %xmm14, %xmm13
+; SSE2-NEXT: paddd %xmm1, %xmm4
+; SSE2-NEXT: paddd %xmm3, %xmm4
+; SSE2-NEXT: paddd %xmm13, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
+; SSE2-NEXT: paddd %xmm4, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
@@ -398,288 +402,284 @@ middle.block:
define i32 @sad_avx64i8() nounwind {
; SSE2-LABEL: sad_avx64i8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: subq $184, %rsp
-; SSE2-NEXT: pxor %xmm15, %xmm15
+; SSE2-NEXT: subq $200, %rsp
+; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
-; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pxor %xmm15, %xmm15
+; SSE2-NEXT: pxor %xmm10, %xmm10
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm11, %xmm11
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm11, %xmm11
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: pxor %xmm13, %xmm13
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm7, %xmm7
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm7, %xmm7
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm7, %xmm7
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm7, %xmm7
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: .p2align 4, 0x90
; SSE2-NEXT: .LBB2_1: # %vector.body
; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm13, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm11, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm7, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm11, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm5, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm14, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm6, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm8, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm12, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa a+1040(%rax), %xmm6
-; SSE2-NEXT: movdqa a+1024(%rax), %xmm4
-; SSE2-NEXT: movdqa a+1056(%rax), %xmm11
-; SSE2-NEXT: movdqa a+1072(%rax), %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
-; SSE2-NEXT: movdqa %xmm11, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3],xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm12
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm15[0],xmm12[1],xmm15[1],xmm12[2],xmm15[2],xmm12[3],xmm15[3],xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
-; SSE2-NEXT: movdqa %xmm12, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm15[8],xmm4[9],xmm15[9],xmm4[10],xmm15[10],xmm4[11],xmm15[11],xmm4[12],xmm15[12],xmm4[13],xmm15[13],xmm4[14],xmm15[14],xmm4[15],xmm15[15]
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm15[4],xmm4[5],xmm15[5],xmm4[6],xmm15[6],xmm4[7],xmm15[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm14
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; SSE2-NEXT: movdqa %xmm14, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm15[0],xmm7[1],xmm15[1],xmm7[2],xmm15[2],xmm7[3],xmm15[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm15[8],xmm6[9],xmm15[9],xmm6[10],xmm15[10],xmm6[11],xmm15[11],xmm6[12],xmm15[12],xmm6[13],xmm15[13],xmm6[14],xmm15[14],xmm6[15],xmm15[15]
-; SSE2-NEXT: movdqa %xmm6, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
-; SSE2-NEXT: movdqa b+1040(%rax), %xmm9
-; SSE2-NEXT: movdqa %xmm9, %xmm13
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm15[8],xmm9[9],xmm15[9],xmm9[10],xmm15[10],xmm9[11],xmm15[11],xmm9[12],xmm15[12],xmm9[13],xmm15[13],xmm9[14],xmm15[14],xmm9[15],xmm15[15]
-; SSE2-NEXT: movdqa %xmm9, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm9, %xmm6
-; SSE2-NEXT: movdqa b+1024(%rax), %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3],xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
-; SSE2-NEXT: psubd %xmm10, %xmm8
-; SSE2-NEXT: movdqa %xmm13, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm13, %xmm14
-; SSE2-NEXT: movdqa %xmm2, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
-; SSE2-NEXT: psubd %xmm9, %xmm7
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm2, %xmm4
-; SSE2-NEXT: movdqa b+1056(%rax), %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
-; SSE2-NEXT: psubd %xmm9, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm10, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm10, %xmm12
-; SSE2-NEXT: movdqa %xmm2, %xmm10
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: psubd %xmm9, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
-; SSE2-NEXT: psubd %xmm2, %xmm11
-; SSE2-NEXT: movdqa %xmm1, %xmm13
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm9, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm10, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
-; SSE2-NEXT: psubd %xmm10, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm10
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm2, %xmm13
-; SSE2-NEXT: movdqa b+1072(%rax), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
-; SSE2-NEXT: psubd %xmm2, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm9, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
-; SSE2-NEXT: movdqa %xmm5, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm15[8],xmm0[9],xmm15[9],xmm0[10],xmm15[10],xmm0[11],xmm15[11],xmm0[12],xmm15[12],xmm0[13],xmm15[13],xmm0[14],xmm15[14],xmm0[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm13, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm10, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps a+1040(%rax), %xmm0
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa a+1024(%rax), %xmm12
+; SSE2-NEXT: movdqa a+1056(%rax), %xmm15
+; SSE2-NEXT: movdqa a+1072(%rax), %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3]
+; SSE2-NEXT: movdqa %xmm15, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm14[8],xmm11[9],xmm14[9],xmm11[10],xmm14[10],xmm11[11],xmm14[11],xmm11[12],xmm14[12],xmm11[13],xmm14[13],xmm11[14],xmm14[14],xmm11[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm11, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1],xmm11[2],xmm14[2],xmm11[3],xmm14[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; SSE2-NEXT: movdqa %xmm15, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE2-NEXT: psubd %xmm0, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
-; SSE2-NEXT: psubd %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm9, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm9
-; SSE2-NEXT: pxor %xmm0, %xmm9
-; SSE2-NEXT: movdqa %xmm5, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; SSE2-NEXT: movdqa %xmm12, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
; SSE2-NEXT: movdqa %xmm10, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm10
-; SSE2-NEXT: pxor %xmm0, %xmm10
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm13, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm13
-; SSE2-NEXT: pxor %xmm0, %xmm13
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm11, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm11
-; SSE2-NEXT: pxor %xmm0, %xmm11
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
; SSE2-NEXT: movdqa %xmm12, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm12
-; SSE2-NEXT: pxor %xmm0, %xmm12
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
+; SSE2-NEXT: movdqa b+1072(%rax), %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm0, %xmm1
+; SSE2-NEXT: movdqa b+1056(%rax), %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm7, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm7, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm7, %xmm8
+; SSE2-NEXT: movdqa b+1024(%rax), %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm3, %xmm11
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm0, %xmm15
+; SSE2-NEXT: movdqa %xmm7, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm3, %xmm9
+; SSE2-NEXT: movdqa %xmm9, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm0, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm0, %xmm13
+; SSE2-NEXT: movdqa %xmm13, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm9, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm7, %xmm12
+; SSE2-NEXT: movdqa b+1040(%rax), %xmm13
+; SSE2-NEXT: movdqa %xmm13, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm7, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm3, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm14[8],xmm2[9],xmm14[9],xmm2[10],xmm14[10],xmm2[11],xmm14[11],xmm2[12],xmm14[12],xmm2[13],xmm14[13],xmm2[14],xmm14[14],xmm2[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm14[8],xmm13[9],xmm14[9],xmm13[10],xmm14[10],xmm13[11],xmm14[11],xmm13[12],xmm14[12],xmm13[13],xmm14[13],xmm13[14],xmm14[14],xmm13[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm13, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
+; SSE2-NEXT: psubd %xmm3, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
+; SSE2-NEXT: psubd %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrad $31, %xmm3
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm1
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm6
+; SSE2-NEXT: pxor %xmm1, %xmm6
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm5
+; SSE2-NEXT: pxor %xmm1, %xmm5
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm1, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm8, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm8
+; SSE2-NEXT: pxor %xmm1, %xmm8
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm11, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm11
+; SSE2-NEXT: pxor %xmm1, %xmm11
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm11, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa (%rsp), %xmm4 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm1, %xmm4
+; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm11
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm15, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm15
+; SSE2-NEXT: pxor %xmm1, %xmm15
+; SSE2-NEXT: paddd %xmm15, %xmm2
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm1, %xmm4
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm15
+; SSE2-NEXT: movdqa %xmm10, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm10
+; SSE2-NEXT: pxor %xmm1, %xmm10
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm10
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm6
+; SSE2-NEXT: pxor %xmm1, %xmm6
+; SSE2-NEXT: paddd %xmm6, %xmm3
+; SSE2-NEXT: movdqa %xmm12, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm12
+; SSE2-NEXT: pxor %xmm1, %xmm12
+; SSE2-NEXT: paddd %xmm12, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: movdqa %xmm9, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm0, %xmm4
+; SSE2-NEXT: paddd %xmm0, %xmm9
+; SSE2-NEXT: pxor %xmm0, %xmm9
+; SSE2-NEXT: paddd %xmm9, %xmm1
; SSE2-NEXT: movdqa %xmm7, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: paddd %xmm0, %xmm7
; SSE2-NEXT: pxor %xmm0, %xmm7
-; SSE2-NEXT: movdqa %xmm14, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm14
-; SSE2-NEXT: pxor %xmm0, %xmm14
-; SSE2-NEXT: movdqa %xmm8, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: movdqa %xmm6, %xmm0
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm7, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: pxor %xmm0, %xmm6
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm8, %xmm6
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm14, %xmm2
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm7, %xmm2
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm12, %xmm8
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm0, %xmm12
-; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm11, %xmm0
-; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa (%rsp), %xmm11 # 16-byte Reload
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm7
+; SSE2-NEXT: pxor %xmm0, %xmm7
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm1, %xmm2
-; SSE2-NEXT: paddd %xmm13, %xmm7
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm10, %xmm1
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm5, %xmm3
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm9, %xmm5
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm7, %xmm0
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB2_1
; SSE2-NEXT: # BB#2: # %middle.block
-; SSE2-NEXT: paddd %xmm2, %xmm4
-; SSE2-NEXT: paddd %xmm3, %xmm6
-; SSE2-NEXT: movdqa %xmm12, %xmm2
-; SSE2-NEXT: paddd %xmm11, %xmm2
-; SSE2-NEXT: paddd %xmm13, %xmm14
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm7, %xmm3
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm5, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm3, %xmm8
+; SSE2-NEXT: paddd %xmm2, %xmm15
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm8, %xmm13
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm5, %xmm0
+; SSE2-NEXT: paddd %xmm11, %xmm10
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm10, %xmm1
+; SSE2-NEXT: paddd %xmm13, %xmm1
+; SSE2-NEXT: paddd %xmm15, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: paddd %xmm3, %xmm7
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: paddd %xmm14, %xmm6
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm8, %xmm7
-; SSE2-NEXT: paddd %xmm6, %xmm7
-; SSE2-NEXT: paddd %xmm2, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,0,1]
-; SSE2-NEXT: paddd %xmm7, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: addq $184, %rsp
+; SSE2-NEXT: addq $200, %rsp
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_avx64i8:
@@ -688,8 +688,8 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm6, %ymm6, %ymm6
; AVX2-NEXT: vpxor %ymm5, %ymm5, %ymm5
; AVX2-NEXT: vpxor %ymm7, %ymm7, %ymm7
@@ -697,7 +697,6 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: .LBB2_1: # %vector.body
; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vmovdqu %ymm8, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -705,48 +704,49 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm8, %ymm15, %ymm8
+; AVX2-NEXT: vmovdqu %ymm15, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm14, %ymm14
+; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm8
+; AVX2-NEXT: vmovdqu %ymm8, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm13, %ymm13
+; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm9
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm12, %ymm12
+; AVX2-NEXT: vpsubd %ymm15, %ymm10, %ymm10
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpsubd %ymm15, %ymm11, %ymm11
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm10, %ymm10
+; AVX2-NEXT: vpsubd %ymm15, %ymm12, %ymm12
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm9
-; AVX2-NEXT: vmovdqu %ymm9, -{{[0-9]+}}(%rsp) # 32-byte Spill
+; AVX2-NEXT: vpsubd %ymm15, %ymm13, %ymm13
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpsubd %ymm15, %ymm14, %ymm14
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm9 # 32-byte Reload
-; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm15
-; AVX2-NEXT: vpabsd %ymm8, %ymm8
+; AVX2-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Reload
+; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm15
+; AVX2-NEXT: vpabsd -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Folded Reload
+; AVX2-NEXT: vpaddd %ymm7, %ymm8, %ymm7
+; AVX2-NEXT: vpabsd %ymm9, %ymm8
+; AVX2-NEXT: vpaddd %ymm5, %ymm8, %ymm5
+; AVX2-NEXT: vpabsd %ymm10, %ymm8
+; AVX2-NEXT: vpaddd %ymm6, %ymm8, %ymm6
+; AVX2-NEXT: vpabsd %ymm11, %ymm8
; AVX2-NEXT: vpaddd %ymm3, %ymm8, %ymm3
-; AVX2-NEXT: vpabsd %ymm14, %ymm8
-; AVX2-NEXT: vpaddd %ymm1, %ymm8, %ymm1
-; AVX2-NEXT: vpabsd %ymm13, %ymm8
-; AVX2-NEXT: vpaddd %ymm2, %ymm8, %ymm2
; AVX2-NEXT: vpabsd %ymm12, %ymm8
; AVX2-NEXT: vpaddd %ymm0, %ymm8, %ymm0
-; AVX2-NEXT: vpabsd %ymm11, %ymm8
-; AVX2-NEXT: vpaddd %ymm4, %ymm8, %ymm4
-; AVX2-NEXT: vpabsd %ymm10, %ymm8
-; AVX2-NEXT: vpaddd %ymm6, %ymm8, %ymm6
-; AVX2-NEXT: vpabsd -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Folded Reload
-; AVX2-NEXT: vpaddd %ymm5, %ymm8, %ymm5
+; AVX2-NEXT: vpabsd %ymm13, %ymm8
+; AVX2-NEXT: vpaddd %ymm2, %ymm8, %ymm2
+; AVX2-NEXT: vpabsd %ymm14, %ymm8
+; AVX2-NEXT: vpaddd %ymm1, %ymm8, %ymm1
; AVX2-NEXT: vpabsd %ymm15, %ymm8
-; AVX2-NEXT: vpaddd %ymm7, %ymm8, %ymm7
+; AVX2-NEXT: vpaddd %ymm4, %ymm8, %ymm4
; AVX2-NEXT: addq $4, %rax
; AVX2-NEXT: jne .LBB2_1
; AVX2-NEXT: # BB#2: # %middle.block
; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm7, %ymm4, %ymm4
+; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -773,21 +773,21 @@ define i32 @sad_avx64i8() nounwind {
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpsubd %zmm11, %zmm7, %zmm7
-; AVX512F-NEXT: vpsubd %zmm10, %zmm6, %zmm6
-; AVX512F-NEXT: vpsubd %zmm9, %zmm5, %zmm5
; AVX512F-NEXT: vpsubd %zmm8, %zmm4, %zmm4
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpsubd %zmm8, %zmm5, %zmm5
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpsubd %zmm8, %zmm6, %zmm6
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpsubd %zmm8, %zmm7, %zmm7
; AVX512F-NEXT: vpabsd %zmm4, %zmm4
-; AVX512F-NEXT: vpabsd %zmm5, %zmm5
-; AVX512F-NEXT: vpabsd %zmm6, %zmm6
-; AVX512F-NEXT: vpabsd %zmm7, %zmm7
-; AVX512F-NEXT: vpaddd %zmm3, %zmm7, %zmm3
-; AVX512F-NEXT: vpaddd %zmm2, %zmm6, %zmm2
-; AVX512F-NEXT: vpaddd %zmm1, %zmm5, %zmm1
; AVX512F-NEXT: vpaddd %zmm0, %zmm4, %zmm0
+; AVX512F-NEXT: vpabsd %zmm5, %zmm4
+; AVX512F-NEXT: vpaddd %zmm1, %zmm4, %zmm1
+; AVX512F-NEXT: vpabsd %zmm6, %zmm4
+; AVX512F-NEXT: vpaddd %zmm2, %zmm4, %zmm2
+; AVX512F-NEXT: vpabsd %zmm7, %zmm4
+; AVX512F-NEXT: vpaddd %zmm3, %zmm4, %zmm3
; AVX512F-NEXT: addq $4, %rax
; AVX512F-NEXT: jne .LBB2_1
; AVX512F-NEXT: # BB#2: # %middle.block
@@ -1154,59 +1154,54 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; SSE2-LABEL: sad_nonloop_32i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: movdqu 16(%rdi), %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm12
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm12, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm13
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3],xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm13, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: movdqu (%rdx), %xmm5
-; SSE2-NEXT: movdqu 16(%rdx), %xmm7
-; SSE2-NEXT: movdqa %xmm7, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm5, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm5, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-NEXT: psubd %xmm5, %xmm0
-; SSE2-NEXT: psubd %xmm7, %xmm3
-; SSE2-NEXT: psubd %xmm2, %xmm13
-; SSE2-NEXT: psubd %xmm1, %xmm12
-; SSE2-NEXT: psubd %xmm8, %xmm6
-; SSE2-NEXT: psubd %xmm15, %xmm11
-; SSE2-NEXT: psubd %xmm14, %xmm10
-; SSE2-NEXT: psubd -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm9, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm9
-; SSE2-NEXT: pxor %xmm1, %xmm9
+; SSE2-NEXT: movdqu 16(%rdi), %xmm12
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm12, %xmm8
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm9
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3],xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
+; SSE2-NEXT: movdqa %xmm9, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
+; SSE2-NEXT: movdqa %xmm12, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1],xmm12[2],xmm1[2],xmm12[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: movdqu (%rdx), %xmm7
+; SSE2-NEXT: movdqu 16(%rdx), %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-NEXT: psubd %xmm5, %xmm10
+; SSE2-NEXT: movdqa %xmm7, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-NEXT: psubd %xmm5, %xmm11
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-NEXT: psubd %xmm5, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-NEXT: psubd %xmm5, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; SSE2-NEXT: psubd %xmm6, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: psubd %xmm2, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-NEXT: psubd %xmm3, %xmm12
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; SSE2-NEXT: psubd %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm10, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm10
@@ -1215,33 +1210,37 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm11
; SSE2-NEXT: pxor %xmm1, %xmm11
-; SSE2-NEXT: movdqa %xmm6, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm6
-; SSE2-NEXT: pxor %xmm1, %xmm6
-; SSE2-NEXT: movdqa %xmm12, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm12
-; SSE2-NEXT: pxor %xmm1, %xmm12
; SSE2-NEXT: movdqa %xmm13, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm13
; SSE2-NEXT: pxor %xmm1, %xmm13
-; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm4, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: paddd %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm1, %xmm4
+; SSE2-NEXT: paddd %xmm13, %xmm4
+; SSE2-NEXT: paddd %xmm10, %xmm4
+; SSE2-NEXT: paddd %xmm11, %xmm4
+; SSE2-NEXT: movdqa %xmm8, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm8
+; SSE2-NEXT: pxor %xmm1, %xmm8
+; SSE2-NEXT: movdqa %xmm9, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm9
+; SSE2-NEXT: pxor %xmm1, %xmm9
+; SSE2-NEXT: movdqa %xmm12, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm12
+; SSE2-NEXT: pxor %xmm1, %xmm12
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: paddd %xmm3, %xmm0
-; SSE2-NEXT: paddd %xmm11, %xmm6
-; SSE2-NEXT: paddd %xmm9, %xmm6
-; SSE2-NEXT: paddd %xmm10, %xmm6
; SSE2-NEXT: paddd %xmm12, %xmm0
-; SSE2-NEXT: paddd %xmm6, %xmm0
-; SSE2-NEXT: paddd %xmm13, %xmm0
+; SSE2-NEXT: paddd %xmm8, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm9, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll
index ce42d0d643e8..1afef86a5f11 100644
--- a/test/CodeGen/X86/select.ll
+++ b/test/CodeGen/X86/select.ll
@@ -299,20 +299,21 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
; GENERIC-NEXT: testb %dil, %dil
; GENERIC-NEXT: jne LBB7_4
; GENERIC-NEXT: ## BB#5:
+; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; GENERIC-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; GENERIC-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; GENERIC-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: jmp LBB7_6
; GENERIC-NEXT: LBB7_4:
-; GENERIC-NEXT: movd %r9d, %xmm2
-; GENERIC-NEXT: movd %ecx, %xmm3
-; GENERIC-NEXT: movd %r8d, %xmm4
+; GENERIC-NEXT: movd %r9d, %xmm1
+; GENERIC-NEXT: movd %ecx, %xmm2
+; GENERIC-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; GENERIC-NEXT: movd %r8d, %xmm3
; GENERIC-NEXT: movd %edx, %xmm1
; GENERIC-NEXT: LBB7_6:
-; GENERIC-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm1
; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm0
; GENERIC-NEXT: movq %xmm0, 16(%rsi)
@@ -339,16 +340,19 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; ATOM-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; ATOM-NEXT: jmp LBB7_6
; ATOM-NEXT: LBB7_4:
-; ATOM-NEXT: movd %r9d, %xmm2
-; ATOM-NEXT: movd %ecx, %xmm3
-; ATOM-NEXT: movd %r8d, %xmm4
+; ATOM-NEXT: movd %r9d, %xmm1
+; ATOM-NEXT: movd %ecx, %xmm2
+; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; ATOM-NEXT: movd %r8d, %xmm3
; ATOM-NEXT: movd %edx, %xmm1
-; ATOM-NEXT: LBB7_6:
-; ATOM-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; ATOM-NEXT: LBB7_6:
; ATOM-NEXT: psubd {{.*}}(%rip), %xmm0
; ATOM-NEXT: psubd {{.*}}(%rip), %xmm1
; ATOM-NEXT: movq %xmm0, 16(%rsi)
diff --git a/test/CodeGen/X86/setcc-wide-types.ll b/test/CodeGen/X86/setcc-wide-types.ll
index 2996edaec3e0..332bf2887fb0 100644
--- a/test/CodeGen/X86/setcc-wide-types.ll
+++ b/test/CodeGen/X86/setcc-wide-types.ll
@@ -58,25 +58,25 @@ define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-LABEL: ne_i256:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %r8
+; SSE2-NEXT: movq %xmm4, %rax
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %r9
-; SSE2-NEXT: movq %xmm0, %r10
-; SSE2-NEXT: movq %xmm1, %rsi
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm1, %r8
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: xorq %rax, %rdi
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %xmm2, %rcx
-; SSE2-NEXT: movq %xmm3, %rdx
-; SSE2-NEXT: xorq %rsi, %rdx
-; SSE2-NEXT: xorq %r10, %rcx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: xorq %r9, %rax
-; SSE2-NEXT: xorq %r8, %rdi
-; SSE2-NEXT: orq %rax, %rdi
+; SSE2-NEXT: movq %xmm0, %rsi
+; SSE2-NEXT: xorq %rcx, %rsi
+; SSE2-NEXT: orq %rdi, %rsi
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: xorq %rdx, %rax
+; SSE2-NEXT: movq %xmm3, %rcx
+; SSE2-NEXT: xorq %r8, %rcx
+; SSE2-NEXT: orq %rax, %rcx
; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: orq %rcx, %rdi
+; SSE2-NEXT: orq %rsi, %rcx
; SSE2-NEXT: setne %al
; SSE2-NEXT: retq
;
@@ -100,25 +100,25 @@ define i32 @eq_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-LABEL: eq_i256:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %r8
+; SSE2-NEXT: movq %xmm4, %rax
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %r9
-; SSE2-NEXT: movq %xmm0, %r10
-; SSE2-NEXT: movq %xmm1, %rsi
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: movq %xmm0, %rdx
+; SSE2-NEXT: movq %xmm1, %r8
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rdi
+; SSE2-NEXT: xorq %rax, %rdi
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %xmm2, %rcx
-; SSE2-NEXT: movq %xmm3, %rdx
-; SSE2-NEXT: xorq %rsi, %rdx
-; SSE2-NEXT: xorq %r10, %rcx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: xorq %r9, %rax
-; SSE2-NEXT: xorq %r8, %rdi
-; SSE2-NEXT: orq %rax, %rdi
+; SSE2-NEXT: movq %xmm0, %rsi
+; SSE2-NEXT: xorq %rcx, %rsi
+; SSE2-NEXT: orq %rdi, %rsi
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: xorq %rdx, %rax
+; SSE2-NEXT: movq %xmm3, %rcx
+; SSE2-NEXT: xorq %r8, %rcx
+; SSE2-NEXT: orq %rax, %rcx
; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: orq %rcx, %rdi
+; SSE2-NEXT: orq %rsi, %rcx
; SSE2-NEXT: sete %al
; SSE2-NEXT: retq
;
diff --git a/test/CodeGen/X86/shrink_vmul_sse.ll b/test/CodeGen/X86/shrink_vmul_sse.ll
index c869dff9e642..6701c247e6fc 100644
--- a/test/CodeGen/X86/shrink_vmul_sse.ll
+++ b/test/CodeGen/X86/shrink_vmul_sse.ll
@@ -20,9 +20,9 @@ define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; CHECK-NEXT: movzbl 1(%edx,%ecx), %edi
; CHECK-NEXT: movzbl (%edx,%ecx), %edx
; CHECK-NEXT: movzbl 1(%eax,%ecx), %ebx
+; CHECK-NEXT: imull %edi, %ebx
; CHECK-NEXT: movzbl (%eax,%ecx), %eax
; CHECK-NEXT: imull %edx, %eax
-; CHECK-NEXT: imull %edi, %ebx
; CHECK-NEXT: movl %ebx, 4(%esi,%ecx,4)
; CHECK-NEXT: movl %eax, (%esi,%ecx,4)
; CHECK-NEXT: popl %esi
diff --git a/test/CodeGen/X86/shuffle-of-splat-multiuses.ll b/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
index d46082f20a45..cbd5c69b1772 100644
--- a/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
+++ b/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
@@ -5,9 +5,8 @@
define <2 x double> @foo2(<2 x double> %v, <2 x double> *%p) nounwind {
; AVX2-LABEL: foo2:
; AVX2: # BB#0:
-; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,1]
-; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
-; AVX2-NEXT: vmovapd %xmm1, (%rdi)
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
+; AVX2-NEXT: vmovapd %xmm0, (%rdi)
; AVX2-NEXT: retq
%res = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
%res1 = shufflevector<2 x double> %res, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
@@ -18,9 +17,8 @@ define <2 x double> @foo2(<2 x double> %v, <2 x double> *%p) nounwind {
define <4 x double> @foo4(<4 x double> %v, <4 x double> *%p) nounwind {
; AVX2-LABEL: foo4:
; AVX2: # BB#0:
-; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,2,2,2]
-; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm1[2,0,2,3]
-; AVX2-NEXT: vmovapd %ymm1, (%rdi)
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-NEXT: vmovapd %ymm0, (%rdi)
; AVX2-NEXT: retq
%res = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
%res1 = shufflevector<4 x double> %res, <4 x double> undef, <4 x i32> <i32 2, i32 0, i32 undef, i32 undef>
@@ -32,10 +30,8 @@ define <8 x float> @foo8(<8 x float> %v, <8 x float> *%p) nounwind {
; AVX2-LABEL: foo8:
; AVX2: # BB#0:
; AVX2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,2,2,2]
-; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = <2,0,u,u,5,1,3,7>
-; AVX2-NEXT: vpermps %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vmovapd %ymm1, (%rdi)
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-NEXT: vmovapd %ymm0, (%rdi)
; AVX2-NEXT: retq
%res = shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%res1 = shufflevector<8 x float> %res, <8 x float> undef, <8 x i32> <i32 2, i32 0, i32 undef, i32 undef, i32 5, i32 1, i32 3, i32 7>
@@ -46,7 +42,7 @@ define <8 x float> @foo8(<8 x float> %v, <8 x float> *%p) nounwind {
define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -66,7 +62,7 @@ define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask3:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 3>
@@ -76,9 +72,10 @@ define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask4(<4 x i32> %v, <4 x i32>* %p) nounwind {
; AVX2-LABEL: undef_splatmask4:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT: vmovdqa %xmm1, %xmm0
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -89,9 +86,10 @@ define <4 x i32> @undef_splatmask4(<4 x i32> %v, <4 x i32>* %p) nounwind {
define <4 x i32> @undef_splatmask5(<4 x i32> %v, <4 x i32>* %p) nounwind {
; AVX2-LABEL: undef_splatmask5:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
+; AVX2-NEXT: vpbroadcastd %xmm0, %xmm1
+; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT: vmovdqa %xmm1, %xmm0
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 0, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 3>
diff --git a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
index 0b03dffe99b5..d99cfaf535de 100644
--- a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
@@ -1537,9 +1537,9 @@ define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) n
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-NEXT: retl
;
@@ -1673,13 +1673,13 @@ define void @test_mm_setcsr(i32 %a0) nounwind {
define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
; X32-LABEL: test_mm_setr_ps:
; X32: # BB#0:
+; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_ps:
diff --git a/test/CodeGen/X86/sse1.ll b/test/CodeGen/X86/sse1.ll
index dfc1aefd31a6..68ab3f9f3205 100644
--- a/test/CodeGen/X86/sse1.ll
+++ b/test/CodeGen/X86/sse1.ll
@@ -66,7 +66,10 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-NEXT: jne .LBB1_8
; X32-NEXT: .LBB1_7:
; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X32-NEXT: jmp .LBB1_9
+; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB1_10
+; X32-NEXT: jmp .LBB1_11
; X32-NEXT: .LBB1_1:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
@@ -77,11 +80,10 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-NEXT: je .LBB1_7
; X32-NEXT: .LBB1_8: # %entry
; X32-NEXT: xorps %xmm3, %xmm3
-; X32-NEXT: .LBB1_9: # %entry
-; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: jne .LBB1_11
-; X32-NEXT: # BB#10:
+; X32-NEXT: .LBB1_10:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: .LBB1_11: # %entry
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -103,7 +105,10 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X64-NEXT: jne .LBB1_8
; X64-NEXT: .LBB1_7:
; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X64-NEXT: jmp .LBB1_9
+; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB1_10
+; X64-NEXT: jmp .LBB1_11
; X64-NEXT: .LBB1_1:
; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: testl %edx, %edx
@@ -114,11 +119,10 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X64-NEXT: je .LBB1_7
; X64-NEXT: .LBB1_8: # %entry
; X64-NEXT: xorps %xmm3, %xmm3
-; X64-NEXT: .LBB1_9: # %entry
-; X64-NEXT: testl %esi, %esi
; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X64-NEXT: testl %esi, %esi
; X64-NEXT: jne .LBB1_11
-; X64-NEXT: # BB#10:
+; X64-NEXT: .LBB1_10:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: .LBB1_11: # %entry
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
diff --git a/test/CodeGen/X86/sse3-avx-addsub-2.ll b/test/CodeGen/X86/sse3-avx-addsub-2.ll
index 4d895ea264c5..aed5e0d1c32e 100644
--- a/test/CodeGen/X86/sse3-avx-addsub-2.ll
+++ b/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -412,14 +412,14 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: movaps %xmm1, %xmm4
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: subss %xmm4, %xmm3
-; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE-NEXT: addss %xmm0, %xmm4
+; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE-NEXT: addss %xmm0, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: addss %xmm0, %xmm1
-; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
@@ -431,12 +431,12 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; AVX-NEXT: vsubss %xmm4, %xmm3, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm4, %xmm4
+; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm4[0],xmm2[2,3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 0
%2 = extractelement <4 x float> %B, i32 0
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index 503b9416c8d3..4a0dc9c1eb17 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -273,8 +273,8 @@ define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
; X32: ## BB#0: ## %entry
; X32-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; X32-NEXT: addss %xmm1, %xmm0
; X32-NEXT: addss %xmm2, %xmm3
+; X32-NEXT: addss %xmm1, %xmm0
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
; X32-NEXT: retl
;
@@ -282,8 +282,8 @@ define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
; X64: ## BB#0: ## %entry
; X64-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X64-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; X64-NEXT: addss %xmm1, %xmm0
; X64-NEXT: addss %xmm2, %xmm3
+; X64-NEXT: addss %xmm1, %xmm0
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
; X64-NEXT: retq
entry:
@@ -896,9 +896,9 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X32-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; X32-NEXT: addps %xmm1, %xmm0
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
; X32-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
-; X32-NEXT: addps %xmm1, %xmm0
; X32-NEXT: addps %xmm2, %xmm3
; X32-NEXT: addps %xmm3, %xmm0
; X32-NEXT: retl
@@ -908,9 +908,9 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X64-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
; X64-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
-; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: addps %xmm2, %xmm3
; X64-NEXT: addps %xmm3, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/stackmap-frame-setup.ll b/test/CodeGen/X86/stackmap-frame-setup.ll
index b83a8d61f6a2..df5ed5431b8a 100644
--- a/test/CodeGen/X86/stackmap-frame-setup.ll
+++ b/test/CodeGen/X86/stackmap-frame-setup.ll
@@ -7,11 +7,11 @@ entry:
store i64 11, i64* %metadata
store i64 12, i64* %metadata
store i64 13, i64* %metadata
-; ISEL: ADJCALLSTACKDOWN64 0, 0, implicit-def
+; ISEL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def
; ISEL-NEXT: STACKMAP
; ISEL-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
-; FAST-ISEL: ADJCALLSTACKDOWN64 0, 0, implicit-def
+; FAST-ISEL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def
; FAST-ISEL-NEXT: STACKMAP
; FAST-ISEL-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def
ret void
diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll
index a42b3c96c3ae..1eef67764ab9 100644
--- a/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/test/CodeGen/X86/vec_int_to_fp.ll
@@ -4344,7 +4344,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_4
; AVX1-NEXT: # BB#5:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4
; AVX1-NEXT: jmp .LBB80_6
; AVX1-NEXT: .LBB80_4:
; AVX1-NEXT: movq %rax, %rcx
@@ -4352,22 +4352,22 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
-; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm4
; AVX1-NEXT: .LBB80_6:
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_7
; AVX1-NEXT: # BB#8:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
; AVX1-NEXT: jmp .LBB80_9
; AVX1-NEXT: .LBB80_7:
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq %rcx
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
-; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3
; AVX1-NEXT: .LBB80_9:
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
@@ -4397,29 +4397,29 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX1-NEXT: vaddss %xmm5, %xmm5, %xmm5
; AVX1-NEXT: .LBB80_15:
-; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_16
; AVX1-NEXT: # BB#17:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
; AVX1-NEXT: jmp .LBB80_18
; AVX1-NEXT: .LBB80_16:
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq %rcx
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
-; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
+; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4
; AVX1-NEXT: .LBB80_18:
-; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
-; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vmovq %xmm4, %rax
+; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovq %xmm3, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_19
; AVX1-NEXT: # BB#20:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
; AVX1-NEXT: jmp .LBB80_21
; AVX1-NEXT: .LBB80_19:
; AVX1-NEXT: movq %rax, %rcx
@@ -4427,25 +4427,25 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
-; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm5
+; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0
; AVX1-NEXT: .LBB80_21:
+; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0]
-; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3]
-; AVX1-NEXT: vpextrq $1, %xmm4, %rax
+; AVX1-NEXT: vpextrq $1, %xmm3, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_22
; AVX1-NEXT: # BB#23:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
; AVX1-NEXT: jmp .LBB80_24
; AVX1-NEXT: .LBB80_22:
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq %rcx
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
-; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
+; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1
; AVX1-NEXT: .LBB80_24:
-; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -4471,7 +4471,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_4
; AVX2-NEXT: # BB#5:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4
; AVX2-NEXT: jmp .LBB80_6
; AVX2-NEXT: .LBB80_4:
; AVX2-NEXT: movq %rax, %rcx
@@ -4479,22 +4479,22 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
-; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm4
; AVX2-NEXT: .LBB80_6:
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX2-NEXT: vmovq %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_7
; AVX2-NEXT: # BB#8:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
; AVX2-NEXT: jmp .LBB80_9
; AVX2-NEXT: .LBB80_7:
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq %rcx
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
-; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3
; AVX2-NEXT: .LBB80_9:
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
@@ -4524,29 +4524,29 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX2-NEXT: vaddss %xmm5, %xmm5, %xmm5
; AVX2-NEXT: .LBB80_15:
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_16
; AVX2-NEXT: # BB#17:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
; AVX2-NEXT: jmp .LBB80_18
; AVX2-NEXT: .LBB80_16:
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq %rcx
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
-; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
+; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4
; AVX2-NEXT: .LBB80_18:
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT: vmovq %xmm4, %rax
+; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX2-NEXT: vmovq %xmm3, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_19
; AVX2-NEXT: # BB#20:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
; AVX2-NEXT: jmp .LBB80_21
; AVX2-NEXT: .LBB80_19:
; AVX2-NEXT: movq %rax, %rcx
@@ -4554,25 +4554,25 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
-; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm5
+; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0
; AVX2-NEXT: .LBB80_21:
+; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3]
-; AVX2-NEXT: vpextrq $1, %xmm4, %rax
+; AVX2-NEXT: vpextrq $1, %xmm3, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_22
; AVX2-NEXT: # BB#23:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
; AVX2-NEXT: jmp .LBB80_24
; AVX2-NEXT: .LBB80_22:
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq %rcx
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
-; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
+; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1
; AVX2-NEXT: .LBB80_24:
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0]
; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
diff --git a/test/CodeGen/X86/vec_set-2.ll b/test/CodeGen/X86/vec_set-2.ll
index 443264cdffd4..51c8b2111107 100644
--- a/test/CodeGen/X86/vec_set-2.ll
+++ b/test/CodeGen/X86/vec_set-2.ll
@@ -1,11 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,-sse4.1 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X64
define <4 x float> @test1(float %a) nounwind {
-; CHECK-LABEL: test1:
-; CHECK: # BB#0:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: retl
+; X86-LABEL: test1:
+; X86: # BB#0:
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: # BB#0:
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: retq
%tmp = insertelement <4 x float> zeroinitializer, float %a, i32 0
%tmp5 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1
%tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, i32 2
@@ -14,10 +22,15 @@ define <4 x float> @test1(float %a) nounwind {
}
define <2 x i64> @test(i32 %a) nounwind {
-; CHECK-LABEL: test:
-; CHECK: # BB#0:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: retl
+; X86-LABEL: test:
+; X86: # BB#0:
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0:
+; X64-NEXT: movd %edi, %xmm0
+; X64-NEXT: retq
%tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0
%tmp6 = insertelement <4 x i32> %tmp, i32 0, i32 1
%tmp8 = insertelement <4 x i32> %tmp6, i32 0, i32 2
diff --git a/test/CodeGen/X86/vec_set-3.ll b/test/CodeGen/X86/vec_set-3.ll
index ee4a08599968..b34f30924a8d 100644
--- a/test/CodeGen/X86/vec_set-3.ll
+++ b/test/CodeGen/X86/vec_set-3.ll
@@ -1,11 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,+sse4.1 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,+sse4.1 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+sse4.1 | FileCheck %s --check-prefix=X64
define <4 x float> @test(float %a) {
-; CHECK-LABEL: test:
-; CHECK: # BB#0:
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = zero,mem[0],zero,zero
-; CHECK-NEXT: retl
+; X86-LABEL: test:
+; X86: # BB#0:
+; X86-NEXT: insertps {{.*#+}} xmm0 = zero,mem[0],zero,zero
+; X86-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
+; X64-NEXT: retq
%tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1
%tmp5 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 2
%tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, i32 3
@@ -13,11 +19,17 @@ define <4 x float> @test(float %a) {
}
define <2 x i64> @test2(i32 %a) {
-; CHECK-LABEL: test2:
-; CHECK: # BB#0:
-; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
-; CHECK-NEXT: retl
+; X86-LABEL: test2:
+; X86: # BB#0:
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
+; X86-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: # BB#0:
+; X64-NEXT: movd %edi, %xmm0
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
+; X64-NEXT: retq
%tmp7 = insertelement <4 x i32> zeroinitializer, i32 %a, i32 2
%tmp9 = insertelement <4 x i32> %tmp7, i32 0, i32 3
%tmp10 = bitcast <4 x i32> %tmp9 to <2 x i64>
@@ -25,10 +37,15 @@ define <2 x i64> @test2(i32 %a) {
}
define <4 x float> @test3(<4 x float> %A) {
-; CHECK-LABEL: test3:
-; CHECK: # BB#0:
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
-; CHECK-NEXT: retl
+; X86-LABEL: test3:
+; X86: # BB#0:
+; X86-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
+; X86-NEXT: retl
+;
+; X64-LABEL: test3:
+; X64: # BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
+; X64-NEXT: retq
%tmp0 = extractelement <4 x float> %A, i32 0
%tmp1 = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef >, float %tmp0, i32 1
%tmp2 = insertelement <4 x float> %tmp1, float 0.000000e+00, i32 2
diff --git a/test/CodeGen/X86/vec_set-4.ll b/test/CodeGen/X86/vec_set-4.ll
index 8f35529d61b4..09142e16aa6e 100644
--- a/test/CodeGen/X86/vec_set-4.ll
+++ b/test/CodeGen/X86/vec_set-4.ll
@@ -1,12 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
define <2 x i64> @test(i16 %a) nounwind {
-; CHECK-LABEL: test:
-; CHECK: # BB#0:
-; CHECK-NEXT: pxor %xmm0, %xmm0
-; CHECK-NEXT: pinsrw $3, {{[0-9]+}}(%esp), %xmm0
-; CHECK-NEXT: retl
+; X86-LABEL: test:
+; X86: # BB#0:
+; X86-NEXT: pxor %xmm0, %xmm0
+; X86-NEXT: pinsrw $3, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0:
+; X64-NEXT: pxor %xmm0, %xmm0
+; X64-NEXT: pinsrw $3, %edi, %xmm0
+; X64-NEXT: retq
%tmp10 = insertelement <8 x i16> zeroinitializer, i16 %a, i32 3
%tmp12 = insertelement <8 x i16> %tmp10, i16 0, i32 4
%tmp14 = insertelement <8 x i16> %tmp12, i16 0, i32 5
@@ -17,12 +24,19 @@ define <2 x i64> @test(i16 %a) nounwind {
}
define <2 x i64> @test2(i8 %a) nounwind {
-; CHECK-LABEL: test2:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: pxor %xmm0, %xmm0
-; CHECK-NEXT: pinsrw $5, %eax, %xmm0
-; CHECK-NEXT: retl
+; X86-LABEL: test2:
+; X86: # BB#0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pxor %xmm0, %xmm0
+; X86-NEXT: pinsrw $5, %eax, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: # BB#0:
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: pxor %xmm0, %xmm0
+; X64-NEXT: pinsrw $5, %eax, %xmm0
+; X64-NEXT: retq
%tmp24 = insertelement <16 x i8> zeroinitializer, i8 %a, i32 10
%tmp26 = insertelement <16 x i8> %tmp24, i8 0, i32 11
%tmp28 = insertelement <16 x i8> %tmp26, i8 0, i32 12
diff --git a/test/CodeGen/X86/vec_set-6.ll b/test/CodeGen/X86/vec_set-6.ll
index 4429834b8ef0..3c9aca3a02da 100644
--- a/test/CodeGen/X86/vec_set-6.ll
+++ b/test/CodeGen/X86/vec_set-6.ll
@@ -1,13 +1,22 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,+sse4.1 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,+sse4.1 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+sse4.1 | FileCheck %s --check-prefix=X64
define <4 x float> @test(float %a, float %b, float %c) nounwind {
-; CHECK-LABEL: test:
-; CHECK: # BB#0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
-; CHECK-NEXT: retl
+; X86-LABEL: test:
+; X86: # BB#0:
+; X86-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
+; X86-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0:
+; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
+; X64-NEXT: retq
%tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1
%tmp8 = insertelement <4 x float> %tmp, float %b, i32 2
%tmp10 = insertelement <4 x float> %tmp8, float %c, i32 3
diff --git a/test/CodeGen/X86/vec_set-7.ll b/test/CodeGen/X86/vec_set-7.ll
index e8fe6debb140..757a0d44cd43 100644
--- a/test/CodeGen/X86/vec_set-7.ll
+++ b/test/CodeGen/X86/vec_set-7.ll
@@ -1,12 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
define <2 x i64> @test(<2 x i64>* %p) nounwind {
-; CHECK-LABEL: test:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: retl
+; X86-LABEL: test:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0:
+; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: retq
%tmp = bitcast <2 x i64>* %p to double*
%tmp.upgrd.1 = load double, double* %tmp
%tmp.upgrd.2 = insertelement <2 x double> undef, double %tmp.upgrd.1, i32 0
diff --git a/test/CodeGen/X86/vec_set-8.ll b/test/CodeGen/X86/vec_set-8.ll
index 7a4326c01bb7..a9dceb90855a 100644
--- a/test/CodeGen/X86/vec_set-8.ll
+++ b/test/CodeGen/X86/vec_set-8.ll
@@ -1,11 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
define <2 x i64> @test(i64 %i) nounwind {
-; CHECK-LABEL: test:
-; CHECK: # BB#0:
-; CHECK-NEXT: movq %rdi, %xmm0
-; CHECK-NEXT: retq
+; X86-LABEL: test:
+; X86: # BB#0:
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %xmm0
+; X64-NEXT: retq
%tmp10 = insertelement <2 x i64> undef, i64 %i, i32 0
%tmp11 = insertelement <2 x i64> %tmp10, i64 0, i32 1
ret <2 x i64> %tmp11
diff --git a/test/CodeGen/X86/vec_set-A.ll b/test/CodeGen/X86/vec_set-A.ll
index cae39a3d775b..259ace98d362 100644
--- a/test/CodeGen/X86/vec_set-A.ll
+++ b/test/CodeGen/X86/vec_set-A.ll
@@ -1,12 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
define <2 x i64> @test1() nounwind {
-; CHECK-LABEL: test1:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl $1, %eax
-; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: retl
+; X86-LABEL: test1:
+; X86: # BB#0:
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: movd %eax, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: # BB#0:
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: movq %rax, %xmm0
+; X64-NEXT: retq
ret <2 x i64> < i64 1, i64 0 >
}
diff --git a/test/CodeGen/X86/vec_set-B.ll b/test/CodeGen/X86/vec_set-B.ll
index 0580a3376656..ecd9b57cfd0c 100644
--- a/test/CodeGen/X86/vec_set-B.ll
+++ b/test/CodeGen/X86/vec_set-B.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
; These should both generate something like this:
;_test3:
@@ -9,26 +10,37 @@
; ret
define <2 x i64> @test3(i64 %arg) nounwind {
-; CHECK-LABEL: test3:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl $1234567, %eax # imm = 0x12D687
-; CHECK-NEXT: andl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: retl
+; X86-LABEL: test3:
+; X86: # BB#0:
+; X86-NEXT: movl $1234567, %eax # imm = 0x12D687
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movd %eax, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: test3:
+; X64: # BB#0:
+; X64-NEXT: andl $1234567, %edi # imm = 0x12D687
+; X64-NEXT: movq %rdi, %xmm0
+; X64-NEXT: retq
%A = and i64 %arg, 1234567
%B = insertelement <2 x i64> zeroinitializer, i64 %A, i32 0
ret <2 x i64> %B
}
define <2 x i64> @test2(i64 %arg) nounwind {
-; CHECK-LABEL: test2:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl $1234567, %eax # imm = 0x12D687
-; CHECK-NEXT: andl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: retl
+; X86-LABEL: test2:
+; X86: # BB#0:
+; X86-NEXT: movl $1234567, %eax # imm = 0x12D687
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movd %eax, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: # BB#0:
+; X64-NEXT: andl $1234567, %edi # imm = 0x12D687
+; X64-NEXT: movq %rdi, %xmm0
+; X64-NEXT: retq
%A = and i64 %arg, 1234567
%B = insertelement <2 x i64> undef, i64 %A, i32 0
ret <2 x i64> %B
}
-
diff --git a/test/CodeGen/X86/vec_set-C.ll b/test/CodeGen/X86/vec_set-C.ll
index 994bc2b3056e..865e2fb83f17 100644
--- a/test/CodeGen/X86/vec_set-C.ll
+++ b/test/CodeGen/X86/vec_set-C.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-linux-gnu -mattr=+sse2,-avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i386-linux-gnu -mattr=+sse2,-avx | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse2,-avx | FileCheck %s --check-prefix=X64
define <2 x i64> @t1(i64 %x) nounwind {
-; X32-LABEL: t1:
-; X32: # BB#0:
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: retl
+; X86-LABEL: t1:
+; X86: # BB#0:
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: retl
;
; X64-LABEL: t1:
; X64: # BB#0:
diff --git a/test/CodeGen/X86/vec_set.ll b/test/CodeGen/X86/vec_set.ll
index 49bd3beef75a..6439a6dcb00b 100644
--- a/test/CodeGen/X86/vec_set.ll
+++ b/test/CodeGen/X86/vec_set.ll
@@ -1,27 +1,48 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,-sse4.1 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X64
define void @test(<8 x i16>* %b, i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
-; CHECK-LABEL: test:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; CHECK-NEXT: movdqa %xmm3, (%eax)
-; CHECK-NEXT: retl
+; X86-LABEL: test:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X86-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; X86-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; X86-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X86-NEXT: movdqa %xmm3, (%eax)
+; X86-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0:
+; X64-NEXT: movd %r8d, %xmm0
+; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT: movd %edx, %xmm1
+; X64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-NEXT: movd %ecx, %xmm0
+; X64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; X64-NEXT: movd %r9d, %xmm2
+; X64-NEXT: movd %esi, %xmm3
+; X64-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X64-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; X64-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; X64-NEXT: movdqa %xmm3, (%rdi)
+; X64-NEXT: retq
%tmp = insertelement <8 x i16> zeroinitializer, i16 %a0, i32 0
%tmp2 = insertelement <8 x i16> %tmp, i16 %a1, i32 1
%tmp4 = insertelement <8 x i16> %tmp2, i16 %a2, i32 2
diff --git a/test/CodeGen/X86/vector-bitreverse.ll b/test/CodeGen/X86/vector-bitreverse.ll
index 226c0adbaf3c..2fb821555dba 100644
--- a/test/CodeGen/X86/vector-bitreverse.ll
+++ b/test/CodeGen/X86/vector-bitreverse.ll
@@ -2372,10 +2372,10 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpsrlq $24, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
-; AVX512F-NEXT: vpsrlq $8, %zmm0, %zmm3
-; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm3, %zmm3
; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
-; AVX512F-NEXT: vporq %zmm1, %zmm3, %zmm1
+; AVX512F-NEXT: vpsrlq $8, %zmm0, %zmm2
+; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
+; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpsllq $8, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
; AVX512F-NEXT: vpsllq $24, %zmm0, %zmm3
diff --git a/test/CodeGen/X86/vector-blend.ll b/test/CodeGen/X86/vector-blend.ll
index a05a981daa1f..f0a5fe1dbfff 100644
--- a/test/CodeGen/X86/vector-blend.ll
+++ b/test/CodeGen/X86/vector-blend.ll
@@ -848,10 +848,10 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: pandn %xmm5, %xmm1
+; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: pandn %xmm4, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_logic_v8i32:
@@ -860,10 +860,10 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: pand %xmm1, %xmm3
; SSSE3-NEXT: pandn %xmm5, %xmm1
+; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm2
; SSSE3-NEXT: pandn %xmm4, %xmm0
; SSSE3-NEXT: por %xmm2, %xmm0
-; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_logic_v8i32:
diff --git a/test/CodeGen/X86/vector-lzcnt-128.ll b/test/CodeGen/X86/vector-lzcnt-128.ll
index f1f795bf3cb0..e3261d15538f 100644
--- a/test/CodeGen/X86/vector-lzcnt-128.ll
+++ b/test/CodeGen/X86/vector-lzcnt-128.ll
@@ -1,15 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VLCD
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefix=X64 --check-prefix=AVX512VLBWDQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX512 --check-prefix=AVX512VLCD
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX512 --check-prefix=AVX512CD
;
; Just one 32-bit run to make sure we do reasonable things for i64 lzcnt.
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE41
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32-SSE
define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
@@ -194,16 +196,46 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv2i64:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1
+; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
+; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqd %xmm4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv2i64:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv2i64:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv2i64:
@@ -429,16 +461,46 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv2i64u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1
+; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
+; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqd %xmm4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv2i64u:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv2i64u:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv2i64u:
@@ -651,16 +713,41 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv4i32:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1
+; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
+; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv4i32:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i32:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv4i32:
@@ -867,16 +954,41 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv4i32u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1
+; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
+; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv4i32u:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i32u:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv4i32u:
@@ -1054,8 +1166,28 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv8i16:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1
+; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
+; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv8i16:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
@@ -1063,7 +1195,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i16:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -1238,8 +1370,28 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv8i16u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1
+; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
+; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv8i16u:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
@@ -1247,7 +1399,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i16u:
-; AVX512CD: ## BB#0:
+; AVX512CD: # BB#0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -1399,8 +1551,23 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv16i8:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512VLBWDQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512-LABEL: testv16i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1546,8 +1713,23 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512VLBWDQ-LABEL: testv16i8u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512VLBWDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1
+; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm2, %xmm1
+; AVX512VLBWDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512VLBWDQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512-LABEL: testv16i8u:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1582,17 +1764,17 @@ define <2 x i64> @foldv2i64() nounwind {
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv2i64:
-; AVX: # BB#0:
-; AVX-NEXT: movl $55, %eax
-; AVX-NEXT: vmovq %rax, %xmm0
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv2i64:
+; NOBW: # BB#0:
+; NOBW-NEXT: movl $55, %eax
+; NOBW-NEXT: vmovq %rax, %xmm0
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv2i64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: movl $55, %eax
-; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv2i64:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: movl $55, %eax
+; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64:
; X32-SSE: # BB#0:
@@ -1610,17 +1792,17 @@ define <2 x i64> @foldv2i64u() nounwind {
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv2i64u:
-; AVX: # BB#0:
-; AVX-NEXT: movl $55, %eax
-; AVX-NEXT: vmovq %rax, %xmm0
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv2i64u:
+; NOBW: # BB#0:
+; NOBW-NEXT: movl $55, %eax
+; NOBW-NEXT: vmovq %rax, %xmm0
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv2i64u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: movl $55, %eax
-; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv2i64u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: movl $55, %eax
+; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64u:
; X32-SSE: # BB#0:
@@ -1637,15 +1819,15 @@ define <4 x i32> @foldv4i32() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv4i32:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv4i32:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv4i32:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv4i32:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32:
; X32-SSE: # BB#0:
@@ -1661,15 +1843,15 @@ define <4 x i32> @foldv4i32u() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv4i32u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv4i32u:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv4i32u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv4i32u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32u:
; X32-SSE: # BB#0:
@@ -1685,15 +1867,15 @@ define <8 x i16> @foldv8i16() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv8i16:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv8i16:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv8i16:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv8i16:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16:
; X32-SSE: # BB#0:
@@ -1709,15 +1891,15 @@ define <8 x i16> @foldv8i16u() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv8i16u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv8i16u:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv8i16u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv8i16u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16u:
; X32-SSE: # BB#0:
@@ -1733,15 +1915,15 @@ define <16 x i8> @foldv16i8() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv16i8:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv16i8:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv16i8:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv16i8:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8:
; X32-SSE: # BB#0:
@@ -1757,15 +1939,15 @@ define <16 x i8> @foldv16i8u() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv16i8u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
-; AVX-NEXT: retq
+; NOBW-LABEL: foldv16i8u:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; NOBW-NEXT: retq
;
-; AVX512-LABEL: foldv16i8u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
-; AVX512-NEXT: retq
+; AVX512VLBWDQ-LABEL: foldv16i8u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8u:
; X32-SSE: # BB#0:
diff --git a/test/CodeGen/X86/vector-lzcnt-256.ll b/test/CodeGen/X86/vector-lzcnt-256.ll
index 53cb4d8e445b..185e1f4865ea 100644
--- a/test/CodeGen/X86/vector-lzcnt-256.ll
+++ b/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -1,11 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VLCD
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefix=X64 --check-prefix=AVX512VLBWDQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX512 --check-prefix=AVX512VLCD
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX512 --check-prefix=AVX512CD
;
; Just one 32-bit run to make sure we do reasonable things for i64 lzcnt.
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX --check-prefix=X32-AVX2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32-AVX
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
@@ -93,16 +95,76 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv4i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv4i64:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VLBWDQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VLBWDQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv4i64:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i64:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64:
@@ -225,16 +287,76 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv4i64u:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv4i64u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VLBWDQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VLBWDQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv4i64u:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i64u:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64u:
@@ -342,16 +464,66 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv8i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv8i32:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VLBWDQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VLBWDQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv8i32:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i32:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32:
@@ -454,16 +626,66 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv8i32u:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv8i32u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VLBWDQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VLBWDQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpand %ymm2, %ymm1, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512VLCD-LABEL: testv8i32u:
-; AVX512VLCD: ## BB#0:
+; AVX512VLCD: # BB#0:
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i32u:
-; AVX512CD: ## BB#0:
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD: # BB#0:
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32u:
@@ -551,8 +773,48 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX2-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv16i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv16i16:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VLBWDQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VLBWDQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512-LABEL: testv16i16:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
@@ -638,8 +900,48 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX2-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv16i16u:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv16i16u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX512VLBWDQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm5
+; AVX512VLBWDQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VLBWDQ-NEXT: vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512-LABEL: testv16i16u:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
@@ -710,8 +1012,38 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv32i8:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv32i8:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm1
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VLBWDQ-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512-LABEL: testv32i8:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm1, %zmm1
@@ -784,8 +1116,38 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512VL-LABEL: testv32i8u:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: testv32i8u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512VLBWDQ-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512VLBWDQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512VLBWDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VLBWDQ-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm1
+; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX512VLBWDQ-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VLBWDQ-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VLBWDQ-NEXT: retq
+;
; AVX512-LABEL: testv32i8u:
-; AVX512: ## BB#0:
+; AVX512: # BB#0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm1, %zmm1
@@ -818,15 +1180,10 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
}
define <4 x i64> @foldv4i64() nounwind {
-; AVX-LABEL: foldv4i64:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv4i64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; AVX512-NEXT: retq
+; X64-LABEL: foldv4i64:
+; X64: # BB#0:
+; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64:
; X32-AVX: # BB#0:
@@ -837,15 +1194,10 @@ define <4 x i64> @foldv4i64() nounwind {
}
define <4 x i64> @foldv4i64u() nounwind {
-; AVX-LABEL: foldv4i64u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv4i64u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; AVX512-NEXT: retq
+; X64-LABEL: foldv4i64u:
+; X64: # BB#0:
+; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64u:
; X32-AVX: # BB#0:
@@ -856,15 +1208,10 @@ define <4 x i64> @foldv4i64u() nounwind {
}
define <8 x i32> @foldv8i32() nounwind {
-; AVX-LABEL: foldv8i32:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv8i32:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; AVX512-NEXT: retq
+; X64-LABEL: foldv8i32:
+; X64: # BB#0:
+; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32:
; X32-AVX: # BB#0:
@@ -875,15 +1222,10 @@ define <8 x i32> @foldv8i32() nounwind {
}
define <8 x i32> @foldv8i32u() nounwind {
-; AVX-LABEL: foldv8i32u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv8i32u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; AVX512-NEXT: retq
+; X64-LABEL: foldv8i32u:
+; X64: # BB#0:
+; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32u:
; X32-AVX: # BB#0:
@@ -894,15 +1236,15 @@ define <8 x i32> @foldv8i32u() nounwind {
}
define <16 x i16> @foldv16i16() nounwind {
-; AVX-LABEL: foldv16i16:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv16i16:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; AVX512-NEXT: retq
+; NOBW-LABEL: foldv16i16:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; NOBW-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: foldv16i16:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16:
; X32-AVX: # BB#0:
@@ -913,15 +1255,15 @@ define <16 x i16> @foldv16i16() nounwind {
}
define <16 x i16> @foldv16i16u() nounwind {
-; AVX-LABEL: foldv16i16u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv16i16u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; AVX512-NEXT: retq
+; NOBW-LABEL: foldv16i16u:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; NOBW-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: foldv16i16u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16u:
; X32-AVX: # BB#0:
@@ -932,15 +1274,15 @@ define <16 x i16> @foldv16i16u() nounwind {
}
define <32 x i8> @foldv32i8() nounwind {
-; AVX-LABEL: foldv32i8:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv32i8:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; AVX512-NEXT: retq
+; NOBW-LABEL: foldv32i8:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; NOBW-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: foldv32i8:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8:
; X32-AVX: # BB#0:
@@ -951,15 +1293,15 @@ define <32 x i8> @foldv32i8() nounwind {
}
define <32 x i8> @foldv32i8u() nounwind {
-; AVX-LABEL: foldv32i8u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: foldv32i8u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; AVX512-NEXT: retq
+; NOBW-LABEL: foldv32i8u:
+; NOBW: # BB#0:
+; NOBW-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; NOBW-NEXT: retq
+;
+; AVX512VLBWDQ-LABEL: foldv32i8u:
+; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX512VLBWDQ-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8u:
; X32-AVX: # BB#0:
diff --git a/test/CodeGen/X86/vector-narrow-binop.ll b/test/CodeGen/X86/vector-narrow-binop.ll
new file mode 100644
index 000000000000..f737ea2b7fba
--- /dev/null
+++ b/test/CodeGen/X86/vector-narrow-binop.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512DQ
+
+; AVX1 has support for 256-bit bitwise logic because the FP variants were included.
+; If using those ops requires extra insert/extract though, it's probably not worth it.
+
+define <8 x i32> @PR32790(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
+; SSE-LABEL: PR32790:
+; SSE: # BB#0:
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: psubd %xmm6, %xmm0
+; SSE-NEXT: psubd %xmm7, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: PR32790:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpaddd %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: PR32790:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: PR32790:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsubd %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %add = add <8 x i32> %a, %b
+ %and = and <8 x i32> %add, %c
+ %sub = sub <8 x i32> %and, %d
+ ret <8 x i32> %sub
+}
+
+; In a more extreme case, even the later AVX targets should avoid extract/insert just
+; because 256-bit ops are supported.
+
+define <4 x i32> @do_not_use_256bit_op(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
+; SSE-LABEL: do_not_use_256bit_op:
+; SSE: # BB#0:
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: psubd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: do_not_use_256bit_op:
+; AVX1: # BB#0:
+; AVX1-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
+; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: do_not_use_256bit_op:
+; AVX2: # BB#0:
+; AVX2-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
+; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: do_not_use_256bit_op:
+; AVX512: # BB#0:
+; AVX512-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %concat1 = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %concat2 = shufflevector <4 x i32> %c, <4 x i32> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %and = and <8 x i32> %concat1, %concat2
+ %extract1 = shufflevector <8 x i32> %and, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %extract2 = shufflevector <8 x i32> %and, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %sub = sub <4 x i32> %extract1, %extract2
+ ret <4 x i32> %sub
+}
+
diff --git a/test/CodeGen/X86/vector-pcmp.ll b/test/CodeGen/X86/vector-pcmp.ll
index f05588a2920c..99a05c3d49c0 100644
--- a/test/CodeGen/X86/vector-pcmp.ll
+++ b/test/CodeGen/X86/vector-pcmp.ll
@@ -148,8 +148,8 @@ define <32 x i8> @test_pcmpgtb_256(<32 x i8> %x) {
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -177,8 +177,8 @@ define <16 x i16> @test_pcmpgtw_256(<16 x i16> %x) {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -206,8 +206,8 @@ define <8 x i32> @test_pcmpgtd_256(<8 x i32> %x) {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -242,14 +242,13 @@ define <4 x i64> @test_pcmpgtq_256(<4 x i64> %x) {
;
; AVX1-LABEL: test_pcmpgtq_256:
; AVX1: # BB#0:
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-shift-ashr-256.ll b/test/CodeGen/X86/vector-shift-ashr-256.ll
index af3ddcf8048e..09e143ddcd4d 100644
--- a/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -7,6 +7,10 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512DQVL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512BWVL
+;
+; 32-bit runs to make sure we do reasonable things for i64 shifts.
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX --check-prefix=X32-AVX1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX --check-prefix=X32-AVX2
;
; Variable Shifts
@@ -81,6 +85,41 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsravq %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm6
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm6, %xmm2
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm6, %xmm5
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
+; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
+; X32-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsubq %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <4 x i64> %a, %b
ret <4 x i64> %shift
}
@@ -147,6 +186,41 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; X32-AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-AVX1-NEXT: vpsrad %xmm4, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpsrad %xmm5, %xmm2, %xmm5
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; X32-AVX1-NEXT: vpsrad %xmm6, %xmm2, %xmm6
+; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; X32-AVX1-NEXT: vpsrad %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; X32-AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
+; X32-AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; X32-AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <8 x i32> %a, %b
ret <8 x i32> %shift
}
@@ -253,6 +327,55 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
+; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X32-AVX1-NEXT: vpsraw $8, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
+; X32-AVX1-NEXT: vpsraw $4, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsraw $2, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsraw $1, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpsraw $8, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; X32-AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3
+; X32-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; X32-AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -436,6 +559,89 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
+; X32-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3
+; X32-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; X32-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
+; X32-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X32-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
+; X32-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $2, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $1, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; X32-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; X32-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; X32-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -499,6 +705,33 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsraq %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax
+; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
+; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = ashr <4 x i64> %a, %splat
ret <4 x i64> %shift
@@ -546,6 +779,21 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = ashr <8 x i32> %a, %splat
ret <8 x i32> %shift
@@ -593,6 +841,21 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X32-AVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsraw %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X32-AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = ashr <16 x i16> %a, %splat
ret <16 x i16> %shift
@@ -776,6 +1039,84 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; X32-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X32-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpsraw $2, %xmm4, %xmm6
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpsraw $1, %xmm4, %xmm6
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm2
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; X32-AVX1-NEXT: vpsraw $4, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-AVX1-NEXT: vpsraw $4, %xmm0, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $2, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsraw $2, %xmm0, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $1, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsraw $1, %xmm0, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm1
+; X32-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
+; X32-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; X32-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; X32-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; X32-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -843,6 +1184,43 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm6
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm6, %xmm2
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm6, %xmm5
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
+; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
+; X32-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsubq %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
@@ -893,6 +1271,29 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X32-AVX1-NEXT: vpsrad $6, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpsrad $4, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsrad $7, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpsrad $9, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsrad $8, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
@@ -980,6 +1381,40 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsraw $4, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsraw $2, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; X32-AVX1-NEXT: vpsraw $4, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsraw $2, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; X32-AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3
+; X32-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; X32-AVX2-NEXT: vpsravd %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -1149,6 +1584,81 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [8192,24640,41088,57536,49376,32928,16480,32]
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; X32-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X32-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpsraw $2, %xmm4, %xmm6
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpsraw $1, %xmm4, %xmm6
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm2
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; X32-AVX1-NEXT: vpsraw $4, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-AVX1-NEXT: vpsraw $4, %xmm0, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $2, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsraw $2, %xmm0, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $1, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsraw $1, %xmm0, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm1
+; X32-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
+; X32-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; X32-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
+; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; X32-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; X32-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
+; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -1206,6 +1716,25 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsraq $7, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsrad $7, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpsrlq $7, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vpsrad $7, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrad $7, %ymm0, %ymm1
+; X32-AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; X32-AVX2-NEXT: retl
%shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
@@ -1246,6 +1775,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrad $5, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsrad $5, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsrad $5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrad $5, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
@@ -1286,6 +1828,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsraw $3, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsraw $3, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsraw $3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsraw $3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -1352,6 +1907,31 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512VL-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; X32-AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; X32-AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; X32-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
diff --git a/test/CodeGen/X86/vector-shift-lshr-256.ll b/test/CodeGen/X86/vector-shift-lshr-256.ll
index 60575250d713..46be36b76e98 100644
--- a/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -7,6 +7,10 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512DQVL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512BWVL
+;
+; 32-bit runs to make sure we do reasonable things for i64 shifts.
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX --check-prefix=X32-AVX1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX --check-prefix=X32-AVX2
;
; Variable Shifts
@@ -59,6 +63,26 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <4 x i64> %a, %b
ret <4 x i64> %shift
}
@@ -125,6 +149,41 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; X32-AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-AVX1-NEXT: vpsrld %xmm4, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
+; X32-AVX1-NEXT: vpsrld %xmm5, %xmm2, %xmm5
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; X32-AVX1-NEXT: vpsrld %xmm6, %xmm2, %xmm6
+; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; X32-AVX1-NEXT: vpsrld %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; X32-AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
+; X32-AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; X32-AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <8 x i32> %a, %b
ret <8 x i32> %shift
}
@@ -231,6 +290,55 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
+; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X32-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
+; X32-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; X32-AVX2-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3
+; X32-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; X32-AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -357,6 +465,56 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; X32-AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; X32-AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $1, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; X32-AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -401,6 +559,23 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax
+; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = lshr <4 x i64> %a, %splat
ret <4 x i64> %shift
@@ -448,6 +623,21 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = lshr <8 x i32> %a, %splat
ret <8 x i32> %shift
@@ -495,6 +685,21 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X32-AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = lshr <16 x i16> %a, %splat
ret <16 x i16> %shift
@@ -625,6 +830,55 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
+; X32-AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; X32-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -677,6 +931,27 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
@@ -727,6 +1002,29 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsrld $7, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpsrld $5, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X32-AVX1-NEXT: vpsrld $6, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpsrld $4, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsrld $7, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpsrld $9, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
@@ -814,6 +1112,40 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlw $2, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; X32-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X32-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; X32-AVX2-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3
+; X32-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; X32-AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -930,6 +1262,52 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw $2, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; X32-AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; X32-AVX1-NEXT: vpsrlw $1, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; X32-AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $2, %ymm0, %ymm1
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrlw $1, %ymm0, %ymm1
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -974,6 +1352,19 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
@@ -1014,6 +1405,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrld $5, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsrld $5, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsrld $5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrld $5, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
@@ -1054,6 +1458,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsrlw $3, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -1103,6 +1520,23 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512VL-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; X32-AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
diff --git a/test/CodeGen/X86/vector-shift-shl-256.ll b/test/CodeGen/X86/vector-shift-shl-256.ll
index 7f534050b6a7..4a134f440a78 100644
--- a/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -7,6 +7,10 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512DQVL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512BWVL
+;
+; 32-bit runs to make sure we do reasonable things for i64 shifts.
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX --check-prefix=X32-AVX1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX --check-prefix=X32-AVX2
;
; Variable Shifts
@@ -56,6 +60,26 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <4 x i64> %a, %b
ret <4 x i64> %shift
}
@@ -105,6 +129,27 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vpslld $23, %xmm2, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; X32-AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X32-AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; X32-AVX1-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <8 x i32> %a, %b
ret <8 x i32> %shift
}
@@ -205,6 +250,55 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vpsllvw %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
+; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X32-AVX1-NEXT: vpsllw $8, %xmm4, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
+; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $1, %xmm2, %xmm4
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpsllw $8, %xmm0, %xmm4
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsllw $1, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; X32-AVX2-NEXT: vpsllvd %ymm3, %ymm4, %ymm3
+; X32-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; X32-AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -319,6 +413,52 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: var_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; X32-AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X32-AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; X32-AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: var_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsllw $2, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -363,6 +503,23 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax
+; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = shl <4 x i64> %a, %splat
ret <4 x i64> %shift
@@ -410,6 +567,21 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX1-NEXT: vpslld %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpslld %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X32-AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = shl <8 x i32> %a, %splat
ret <8 x i32> %shift
@@ -457,6 +629,21 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X32-AVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; X32-AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = shl <16 x i16> %a, %splat
ret <16 x i16> %shift
@@ -577,6 +764,51 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatvar_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X32-AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
+; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatvar_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsllw $2, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -626,6 +858,27 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm2
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
@@ -666,6 +919,19 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
@@ -719,6 +985,19 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -827,6 +1106,48 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
+;
+; X32-AVX1-LABEL: constant_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsllw $2, %xmm1, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6
+; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
+; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: constant_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsllw $2, %ymm0, %ymm1
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
+; X32-AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1
+; X32-AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -871,6 +1192,19 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v4i64:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsllq $7, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsllq $7, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v4i64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllq $7, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
@@ -911,6 +1245,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpslld $5, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v8i32:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpslld $5, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpslld $5, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v8i32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpslld $5, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
@@ -951,6 +1298,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v16i16:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vpsllw $3, %xmm0, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-AVX1-NEXT: vpsllw $3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v16i16:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -999,6 +1359,23 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512VL-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: splatconstant_shift_v32i8:
+; X32-AVX1: # BB#0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
+; X32-AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsllw $3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: splatconstant_shift_v32i8:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX2-NEXT: retl
%shift = shl <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
diff --git a/test/CodeGen/X86/vector-shuffle-512-v32.ll b/test/CodeGen/X86/vector-shuffle-512-v32.ll
index 26cd7301fe60..7a5c992bb829 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v32.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v32.ll
@@ -1,129 +1,235 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512f -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck --check-prefixes=ALL,KNL %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefixes=ALL,SKX %s
target triple = "x86_64-unknown-unknown"
-define <32 x i16> @shuffle_v32i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i16> %a) {
-; ALL-LABEL: shuffle_v32i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastw %xmm0, %zmm0
-; ALL-NEXT: retq
+define <32 x i16> @shuffle_v32i16(<32 x i16> %a) {
+; KNL-LABEL: shuffle_v32i16:
+; KNL: ## BB#0:
+; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
+; KNL-NEXT: vmovdqa %ymm0, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpbroadcastw %xmm0, %zmm0
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> undef, <32 x i32> zeroinitializer
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08(<32 x i16> %a) {
-; ALL-LABEL: shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08:
-; ALL: # BB#0:
-; ALL-NEXT: vextracti32x4 $1, %zmm0, %xmm0
-; ALL-NEXT: vpbroadcastw %xmm0, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08:
+; KNL: ## BB#0:
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
+; KNL-NEXT: vmovdqa %ymm0, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08:
+; SKX: ## BB#0:
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT: vpbroadcastw %xmm0, %zmm0
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> undef, <32 x i32> <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f(<32 x i16> %a) {
-; ALL-LABEL: shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f:
-; ALL: # BB#0:
-; ALL-NEXT: vmovdqu16 {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1,2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,31>
-; ALL-NEXT: vpermw %zmm0, %zmm1, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f:
+; KNL: ## BB#0:
+; KNL-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
+; KNL-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[0,1,10,11,8,9,8,9,14,15,2,3,4,5,2,3,16,17,26,27,24,25,24,25,30,31,18,19,20,21,18,19]
+; KNL-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[4,5,10,11,4,5,6,7,14,15,2,3,4,5,2,3,20,21,26,27,20,21,22,23,30,31,18,19,20,21,18,19]
+; KNL-NEXT: vmovdqa {{.*#+}} ymm0 = <0,0,0,0,u,u,u,u,0,0,u,u,255,255,0,0,255,255,255,255,u,u,255,255,255,255,u,u,0,0,255,255>
+; KNL-NEXT: vpblendvb %ymm0, %ymm3, %ymm4, %ymm0
+; KNL-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,10,11,8,9,8,9,14,15,6,7,4,5,14,15,16,17,26,27,24,25,24,25,30,31,22,23,20,21,30,31]
+; KNL-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,255,255,u,u,u,u,255,255,u,u,0,0,255,255,0,0,0,0,u,u,0,0,0,0,u,u,255,255,u,u>
+; KNL-NEXT: vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; KNL-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,255,255,u,u,u,u,255,255,u,u,255,255,255,255,255,255,255,255,u,u,255,255,255,255,u,u,255,255,0,0>
+; KNL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f:
+; SKX: ## BB#0:
+; SKX-NEXT: vmovdqu16 {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1,2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,31>
+; SKX-NEXT: vpermw %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> undef, <32 x i32> <i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 1, i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 31>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38:
-; ALL: # BB#0:
-; ALL-NEXT: vmovdqu16 {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24,15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,56]
-; ALL-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38:
+; KNL: ## BB#0:
+; KNL-NEXT: vextracti128 $1, %ymm1, %xmm2
+; KNL-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; KNL-NEXT: vpshufb {{.*#+}} xmm4 = xmm1[8,9,12,13,12,13,10,11,0,1,4,5,4,5,0,1]
+; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,1,0,3]
+; KNL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[0,3,2,2,4,5,6,7]
+; KNL-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm1
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm5
+; KNL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,10,11,8,9,14,15,4,5,2,3,2,3,6,7]
+; KNL-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,4,5,6,7,2,3,2,3,0,1,14,15]
+; KNL-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; KNL-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; KNL-NEXT: vextracti128 $1, %ymm3, %xmm3
+; KNL-NEXT: vpbroadcastw %xmm3, %ymm3
+; KNL-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0]
+; KNL-NEXT: vpblendvb %ymm5, %ymm1, %ymm3, %ymm1
+; KNL-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,4]
+; KNL-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; KNL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38:
+; SKX: ## BB#0:
+; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24,15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,56]
+; SKX-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24, i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 56>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u:
-; ALL: # BB#0:
-; ALL-NEXT: vpunpcklwd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u:
+; KNL: ## BB#0:
+; KNL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u:
+; SKX: ## BB#0:
+; SKX-NEXT: vpunpcklwd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u:
-; ALL: # BB#0:
-; ALL-NEXT: vpunpckhwd {{.*#+}} zmm0 = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u:
+; KNL: ## BB#0:
+; KNL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15]
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u:
+; SKX: ## BB#0:
+; SKX-NEXT: vpunpckhwd {{.*#+}} zmm0 = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_21_z_23_z_25_z_27_z_29_z_31_z(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_21_z_23_z_25_z_27_z_29_z_31_z:
-; ALL: # BB#0:
-; ALL-NEXT: vpsrld $16, %zmm0, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_21_z_23_z_25_z_27_z_29_z_31_z:
+; KNL: ## BB#0:
+; KNL-NEXT: vpsrld $16, %ymm0, %ymm0
+; KNL-NEXT: vpsrld $16, %ymm1, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_21_z_23_z_25_z_27_z_29_z_31_z:
+; SKX: ## BB#0:
+; SKX-NEXT: vpsrld $16, %zmm0, %zmm0
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 1, i32 34, i32 3, i32 34, i32 5, i32 34, i32 7, i32 34, i32 9, i32 34, i32 11, i32 34, i32 13, i32 34, i32 15, i32 34, i32 17, i32 34, i32 19, i32 34, i32 21, i32 34, i32 23, i32 34, i32 25, i32 34, i32 27, i32 34, i32 29, i32 34, i32 31, i32 34>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z_20_z_22_z_24_z_26_z_28_z_30(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z_20_z_22_z_24_z_26_z_28_z_30:
-; ALL: # BB#0:
-; ALL-NEXT: vpslld $16, %zmm0, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z_20_z_22_z_24_z_26_z_28_z_30:
+; KNL: ## BB#0:
+; KNL-NEXT: vpslld $16, %ymm0, %ymm0
+; KNL-NEXT: vpslld $16, %ymm1, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z_20_z_22_z_24_z_26_z_28_z_30:
+; SKX: ## BB#0:
+; SKX-NEXT: vpslld $16, %zmm0, %zmm0
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 34, i32 0, i32 34, i32 2, i32 34, i32 4, i32 34, i32 6, i32 34, i32 8, i32 34, i32 10, i32 34, i32 12, i32 34, i32 14, i32 34, i32 16, i32 34, i32 18, i32 34, i32 20, i32 34, i32 22, i32 34, i32 24, i32 34, i32 26, i32 34, i32 28, i32 34, i32 30>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_16_20_21_22_23_25_25_24_24_28_29_30_31(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_16_20_21_22_23_25_25_24_24_28_29_30_31:
-; ALL: # BB#0:
-; ALL-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15,17,17,16,16,20,21,22,23,25,25,24,24,28,29,30,31]
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_16_20_21_22_23_25_25_24_24_28_29_30_31:
+; KNL: ## BB#0:
+; KNL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
+; KNL-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_16_20_21_22_23_25_25_24_24_28_29_30_31:
+; SKX: ## BB#0:
+; SKX-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15,17,17,16,16,20,21,22,23,25,25,24,24,28,29,30,31]
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 9, i32 9, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 17, i32 17, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 25, i32 25, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18_19_21_21_20_20_24_25_26_27_29_29_28_28(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18_19_21_21_20_20_24_25_26_27_29_29_28_28:
-; ALL: # BB#0:
-; ALL-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12,16,17,18,19,21,21,20,20,24,25,26,27,29,29,28,28]
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18_19_21_21_20_20_24_25_26_27_29_29_28_28:
+; KNL: ## BB#0:
+; KNL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12]
+; KNL-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12]
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18_19_21_21_20_20_24_25_26_27_29_29_28_28:
+; SKX: ## BB#0:
+; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12,16,17,18,19,21,21,20,20,24,25,26,27,29,29,28,28]
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 13, i32 13, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 21, i32 21, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 29, i32 29, i32 28, i32 28>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28(<32 x i16> %a, <32 x i16> %b) {
-; ALL-LABEL: shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28:
-; ALL: # BB#0:
-; ALL-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15,17,17,16,16,20,21,22,23,25,25,24,24,28,29,30,31]
-; ALL-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12,16,17,18,19,21,21,20,20,24,25,26,27,29,29,28,28]
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28:
+; KNL: ## BB#0:
+; KNL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
+; KNL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12]
+; KNL-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
+; KNL-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12]
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28:
+; SKX: ## BB#0:
+; SKX-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15,17,17,16,16,20,21,22,23,25,25,24,24,28,29,30,31]
+; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12,16,17,18,19,21,21,20,20,24,25,26,27,29,29,28,28]
+; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 5, i32 5, i32 4, i32 4, i32 9, i32 9, i32 8, i32 8, i32 13, i32 13, i32 12, i32 12, i32 17, i32 17, i32 16, i32 16, i32 21, i32 21, i32 20, i32 20, i32 25, i32 25, i32 24, i32 24, i32 29, i32 29, i32 28, i32 28>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<32 x i16> %a) {
-; ALL-LABEL: shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
-; ALL: # BB#0:
-; ALL-NEXT: movl $1, %eax
-; ALL-NEXT: kmovd %eax, %k1
-; ALL-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
+; KNL: ## BB#0:
+; KNL-NEXT: movl $65535, %eax ## imm = 0xFFFF
+; KNL-NEXT: vmovd %eax, %xmm1
+; KNL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
+; SKX: ## BB#0:
+; SKX-NEXT: movl $1, %eax
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
+; SKX-NEXT: retq
%shuffle = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
ret <32 x i16> %shuffle
}
define <32 x i16> @insert_dup_mem_v32i16_i32(i32* %ptr) {
-; ALL-LABEL: insert_dup_mem_v32i16_i32:
-; ALL: # BB#0:
-; ALL-NEXT: movl (%rdi), %eax
-; ALL-NEXT: vpbroadcastw %ax, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: insert_dup_mem_v32i16_i32:
+; KNL: ## BB#0:
+; KNL-NEXT: vpbroadcastw (%rdi), %ymm0
+; KNL-NEXT: vmovdqa %ymm0, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_dup_mem_v32i16_i32:
+; SKX: ## BB#0:
+; SKX-NEXT: movl (%rdi), %eax
+; SKX-NEXT: vpbroadcastw %ax, %zmm0
+; SKX-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %tmp, i32 0
%tmp2 = bitcast <4 x i32> %tmp1 to <8 x i16>
@@ -132,11 +238,19 @@ define <32 x i16> @insert_dup_mem_v32i16_i32(i32* %ptr) {
}
define <32 x i16> @insert_dup_mem_v32i16_sext_i16(i16* %ptr) {
-; ALL-LABEL: insert_dup_mem_v32i16_sext_i16:
-; ALL: # BB#0:
-; ALL-NEXT: movswl (%rdi), %eax
-; ALL-NEXT: vpbroadcastw %ax, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: insert_dup_mem_v32i16_sext_i16:
+; KNL: ## BB#0:
+; KNL-NEXT: movswl (%rdi), %eax
+; KNL-NEXT: vmovd %eax, %xmm0
+; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
+; KNL-NEXT: vmovdqa %ymm0, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_dup_mem_v32i16_sext_i16:
+; SKX: ## BB#0:
+; SKX-NEXT: movswl (%rdi), %eax
+; SKX-NEXT: vpbroadcastw %ax, %zmm0
+; SKX-NEXT: retq
%tmp = load i16, i16* %ptr, align 2
%tmp1 = sext i16 %tmp to i32
%tmp2 = insertelement <4 x i32> zeroinitializer, i32 %tmp1, i32 0
@@ -146,11 +260,17 @@ define <32 x i16> @insert_dup_mem_v32i16_sext_i16(i16* %ptr) {
}
define <32 x i16> @insert_dup_elt1_mem_v32i16_i32(i32* %ptr) #0 {
-; ALL-LABEL: insert_dup_elt1_mem_v32i16_i32:
-; ALL: # BB#0:
-; ALL-NEXT: movzwl 2(%rdi), %eax
-; ALL-NEXT: vpbroadcastw %ax, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: insert_dup_elt1_mem_v32i16_i32:
+; KNL: ## BB#0:
+; KNL-NEXT: vpbroadcastw 2(%rdi), %ymm0
+; KNL-NEXT: vmovdqa %ymm0, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_dup_elt1_mem_v32i16_i32:
+; SKX: ## BB#0:
+; SKX-NEXT: movzwl 2(%rdi), %eax
+; SKX-NEXT: vpbroadcastw %ax, %zmm0
+; SKX-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %tmp, i32 0
%tmp2 = bitcast <4 x i32> %tmp1 to <8 x i16>
@@ -159,11 +279,17 @@ define <32 x i16> @insert_dup_elt1_mem_v32i16_i32(i32* %ptr) #0 {
}
define <32 x i16> @insert_dup_elt3_mem_v32i16_i32(i32* %ptr) #0 {
-; ALL-LABEL: insert_dup_elt3_mem_v32i16_i32:
-; ALL: # BB#0:
-; ALL-NEXT: movzwl 2(%rdi), %eax
-; ALL-NEXT: vpbroadcastw %ax, %zmm0
-; ALL-NEXT: retq
+; KNL-LABEL: insert_dup_elt3_mem_v32i16_i32:
+; KNL: ## BB#0:
+; KNL-NEXT: vpbroadcastw 2(%rdi), %ymm0
+; KNL-NEXT: vmovdqa %ymm0, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_dup_elt3_mem_v32i16_i32:
+; SKX: ## BB#0:
+; SKX-NEXT: movzwl 2(%rdi), %eax
+; SKX-NEXT: vpbroadcastw %ax, %zmm0
+; SKX-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %tmp, i32 1
%tmp2 = bitcast <4 x i32> %tmp1 to <8 x i16>
@@ -172,19 +298,79 @@ define <32 x i16> @insert_dup_elt3_mem_v32i16_i32(i32* %ptr) #0 {
}
define <32 x i16> @shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz(<32 x i16> %a) {
-; ALL-LABEL: shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
-; ALL: # BB#0:
-; ALL-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
+; KNL: ## BB#0:
+; KNL-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; KNL-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; KNL-NEXT: vmovdqa %ymm2, %ymm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
+; SKX: ## BB#0:
+; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; SKX-NEXT: retq
%shuffle = shufflevector <32 x i16> zeroinitializer, <32 x i16> %a, <32 x i32> <i32 32, i32 0, i32 0, i32 0, i32 33, i32 0, i32 0, i32 0, i32 34, i32 0, i32 0, i32 0, i32 35, i32 0, i32 0, i32 0, i32 36, i32 0, i32 0, i32 0, i32 37, i32 0, i32 0, i32 0, i32 38, i32 0, i32 0, i32 0, i32 39, i32 0, i32 0, i32 0>
ret <32 x i16> %shuffle
}
define <32 x i16> @shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz(<32 x i16> %a) {
-; ALL-LABEL: shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
-; ALL: # BB#0:
-; ALL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; ALL-NEXT: retq
+; KNL-LABEL: shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
+; KNL: ## BB#0:
+; KNL-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; KNL-NEXT: vmovdqa %ymm2, %ymm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
+; SKX: ## BB#0:
+; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; SKX-NEXT: retq
%shuffle = shufflevector <32 x i16> zeroinitializer, <32 x i16> %a, <32 x i32> <i32 32, i32 0, i32 33, i32 0, i32 34, i32 0, i32 35, i32 0, i32 36, i32 0, i32 37, i32 0, i32 38, i32 0, i32 39, i32 0, i32 40, i32 0, i32 41, i32 0, i32 42, i32 0, i32 43, i32 0, i32 44, i32 0, i32 45, i32 0, i32 46, i32 0, i32 47, i32 0>
ret <32 x i16> %shuffle
}
+
+define <8 x i16> @pr32967(<32 x i16> %v) {
+; KNL-LABEL: pr32967:
+; KNL: ## BB#0:
+; KNL-NEXT: vextracti128 $1, %ymm1, %xmm2
+; KNL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; KNL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
+; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; KNL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
+; KNL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; KNL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; KNL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
+; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; KNL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
+; KNL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; KNL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; KNL-NEXT: retq
+;
+; SKX-LABEL: pr32967:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrw $5, %xmm0, %eax
+; SKX-NEXT: vpextrw $1, %xmm0, %ecx
+; SKX-NEXT: vmovd %ecx, %xmm1
+; SKX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm2
+; SKX-NEXT: vpextrw $1, %xmm2, %eax
+; SKX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; SKX-NEXT: vpextrw $5, %xmm2, %eax
+; SKX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; SKX-NEXT: vextracti32x4 $2, %zmm0, %xmm2
+; SKX-NEXT: vpextrw $1, %xmm2, %eax
+; SKX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; SKX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5],xmm1[6,7]
+; SKX-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; SKX-NEXT: vpextrw $1, %xmm0, %eax
+; SKX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; SKX-NEXT: vpextrw $5, %xmm0, %eax
+; SKX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %shuffle = shufflevector <32 x i16> %v, <32 x i16> undef, <8 x i32> <i32 1,i32 5,i32 9,i32 13,i32 17,i32 21,i32 25,i32 29>
+ ret <8 x i16> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-sqrt.ll b/test/CodeGen/X86/vector-sqrt.ll
index c5ac4466b5fa..8081e9482d67 100644
--- a/test/CodeGen/X86/vector-sqrt.ll
+++ b/test/CodeGen/X86/vector-sqrt.ll
@@ -29,11 +29,11 @@ define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 {
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1
-; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2
-; CHECK-NEXT: vsqrtss 12(%rdi), %xmm3, %xmm3
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm1
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; CHECK-NEXT: vsqrtss 12(%rdi), %xmm2, %xmm1
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; CHECK-NEXT: retq
entry:
%0 = load float, float* %v, align 4
diff --git a/test/CodeGen/X86/viabs.ll b/test/CodeGen/X86/viabs.ll
index 34a9df1782a4..f5ec8e540b0b 100644
--- a/test/CodeGen/X86/viabs.ll
+++ b/test/CodeGen/X86/viabs.ll
@@ -405,16 +405,16 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
;
; AVX1-LABEL: test_abs_ge_v2i64:
; AVX1: # BB#0:
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_ge_v2i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -447,21 +447,20 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_abs_gt_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm4
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -504,35 +503,31 @@ define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
; AVX1-LABEL: test_abs_le_v8i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm6, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm5
+; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpsrad $31, %ymm0, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,1,3,3,5,5,7,7]
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrad $31, %ymm1, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,1,3,3,5,5,7,7]
+; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
@@ -581,37 +576,33 @@ define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm6, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; AVX1-NEXT: vpaddq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm5
+; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i64_fold:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
-; AVX2-NEXT: vpsrad $31, %ymm0, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,1,3,3,5,5,7,7]
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrad $31, %ymm1, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,1,3,3,5,5,7,7]
+; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vselect-pcmp.ll b/test/CodeGen/X86/vselect-pcmp.ll
index d33fda4f49c2..7807991b455d 100644
--- a/test/CodeGen/X86/vselect-pcmp.ll
+++ b/test/CodeGen/X86/vselect-pcmp.ll
@@ -35,9 +35,7 @@ define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask)
; AVX: # BB#0:
; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1
-; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%tr = icmp slt <8 x i16> %mask, zeroinitializer
%z = select <8 x i1> %tr, <8 x i16> %x, <8 x i16> %y
@@ -162,18 +160,14 @@ define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
; AVX2: # BB#0:
; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_sel_v16i16:
; AVX512: # BB#0:
; AVX512-NEXT: vpxor %ymm3, %ymm3, %ymm3
; AVX512-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
-; AVX512-NEXT: vpandn %ymm1, %ymm2, %ymm1
-; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512-NEXT: retq
%tr = icmp slt <16 x i16> %mask, zeroinitializer
%z = select <16 x i1> %tr, <16 x i16> %x, <16 x i16> %y
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 6fbec91e77a3..450e255313b3 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -11,13 +11,13 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm4
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT: vaddpd %ymm2, %ymm4, %ymm2
; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm1
-; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vaddpd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vaddpd %ymm0, %ymm2, %ymm0
; AVX-NEXT: retq
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
@@ -39,11 +39,11 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vmulpd %ymm0, %ymm4, %ymm0
; AVX-NEXT: retq
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
@@ -124,9 +124,9 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm1
-; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
%wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16
diff --git a/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
index 7e370c25e31b..3052a0f615eb 100644
--- a/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
+++ b/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py for function "bar"
; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
-;; In functions with 'no_caller_saved_registers' attribute, all registers should
+;; In functions with 'no_caller_saved_registers' attribute, all registers should
;; be preserved except for registers used for passing/returning arguments.
;; In the following function registers %RDI, %RSI and %XMM0 are used to store
;; arguments %a0, %a1 and %b0 accordingally. The value is returned in %RAX.
@@ -28,20 +28,20 @@ define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 {
ret i32 4
}
-;; Because "bar" has 'no_caller_saved_registers' attribute, function "foo"
-;; doesn't need to preserve registers except for the arguments passed
+;; Because "bar" has 'no_caller_saved_registers' attribute, function "foo"
+;; doesn't need to preserve registers except for the arguments passed
;; to "bar" (%ESI, %EDI and %XMM0).
define x86_64_sysvcc float @foo(i32 %a0, i32 %a1, float %b0) {
-; CHECK-LABEL: foo
-; CHECK: movaps %xmm0, %xmm1
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: movl %edi, %edx
-; CHECK-NEXT: callq bar
-; CHECK-NEXT: addl %edx, %eax
-; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: xorps %xmm0, %xmm0
-; CHECK-NEXT: cvtsi2ssl %eax, %xmm0
-; CHECK-NEXT: addss %xmm0, %xmm1
+; CHECK-LABEL: foo
+; CHECK: movaps %xmm0, %xmm1
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: movl %edi, %edx
+; CHECK-NEXT: callq bar
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: cvtsi2ssl %eax, %xmm0
+; CHECK-NEXT: addss %xmm0, %xmm1
; CHECK: retq
%call = call i32 @bar(i32 %a0, i32 %a1, float %b0) #0
%c0 = add i32 %a0, %call
diff --git a/test/CodeGen/X86/x86-no_caller_saved_registers.ll b/test/CodeGen/X86/x86-no_caller_saved_registers.ll
index 9c62e3ee6ba7..4e5403d1847f 100644
--- a/test/CodeGen/X86/x86-no_caller_saved_registers.ll
+++ b/test/CodeGen/X86/x86-no_caller_saved_registers.ll
@@ -1,31 +1,31 @@
-; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
-; RUN: llc -mtriple=x86_64-unknown-unknown -O0 < %s | FileCheck %s
-; RUN: llc -mtriple=i686-unknown-unknown -mattr=+sse2 < %s | FileCheck %s
-; RUN: llc -mtriple=i686-unknown-unknown -mattr=+sse2 -O0 < %s | FileCheck %s
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;; In functions with 'no_caller_saved_registers' attribute, all registers should
-;; be preserved except for registers used for passing/returning arguments.
-;; The test checks that function "bar" preserves xmm0 register.
-;; It also checks that caller function "foo" does not store registers for callee
-;; "bar". For example, there is no store/load/access to xmm registers.
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-define i32 @bar(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8) #0 {
-; CHECK-LABEL: bar
-; CHECK: mov{{.*}} %xmm0
-; CHECK: mov{{.*}} {{.*}}, %xmm0
-; CHECK: ret
- call void asm sideeffect "", "~{xmm0}"()
- ret i32 1
-}
-
-define x86_intrcc void @foo(i8* nocapture readnone %c) {
-; CHECK-LABEL: foo
-; CHECK-NOT: xmm
-entry:
- tail call i32 @bar(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8) #0
- ret void
-}
-
-attributes #0 = { "no_caller_saved_registers" }
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -O0 < %s | FileCheck %s
+; RUN: llc -mtriple=i686-unknown-unknown -mattr=+sse2 < %s | FileCheck %s
+; RUN: llc -mtriple=i686-unknown-unknown -mattr=+sse2 -O0 < %s | FileCheck %s
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; In functions with 'no_caller_saved_registers' attribute, all registers should
+;; be preserved except for registers used for passing/returning arguments.
+;; The test checks that function "bar" preserves xmm0 register.
+;; It also checks that caller function "foo" does not store registers for callee
+;; "bar". For example, there is no store/load/access to xmm registers.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define i32 @bar(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8) #0 {
+; CHECK-LABEL: bar
+; CHECK: mov{{.*}} %xmm0
+; CHECK: mov{{.*}} {{.*}}, %xmm0
+; CHECK: ret
+ call void asm sideeffect "", "~{xmm0}"()
+ ret i32 1
+}
+
+define x86_intrcc void @foo(i8* nocapture readnone %c) {
+; CHECK-LABEL: foo
+; CHECK-NOT: xmm
+entry:
+ tail call i32 @bar(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8) #0
+ ret void
+}
+
+attributes #0 = { "no_caller_saved_registers" }
diff --git a/test/CodeGen/X86/x86-shrink-wrapping.ll b/test/CodeGen/X86/x86-shrink-wrapping.ll
index 5b6e773fe5d4..519f0d0924e3 100644
--- a/test/CodeGen/X86/x86-shrink-wrapping.ll
+++ b/test/CodeGen/X86/x86-shrink-wrapping.ll
@@ -270,8 +270,6 @@ if.end: ; preds = %if.else, %for.end
ret i32 %sum.1
}
-declare void @somethingElse(...)
-
; Check with a more complex case that we do not have restore within the loop and
; save outside.
; CHECK-LABEL: loopInfoRestoreOutsideLoop:
@@ -982,3 +980,54 @@ for.inc:
}
attributes #4 = { "no-frame-pointer-elim"="true" }
+
+@x = external global i32, align 4
+@y = external global i32, align 4
+
+; The post-dominator tree does not include the branch containing the infinite
+; loop, which can occur into a misplacement of the restore block, if we're
+; looking for the nearest common post-dominator of an "unreachable" block.
+
+; CHECK-LABEL: infiniteLoopNoSuccessor:
+; CHECK: ## BB#0:
+; Make sure the prologue happens in the entry block.
+; CHECK-NEXT: pushq %rbp
+; ...
+; Make sure we don't shrink-wrap.
+; CHECK: ## BB#1
+; CHECK-NOT: pushq %rbp
+; ...
+; Make sure the epilogue happens in the exit block.
+; CHECK: ## BB#5
+; CHECK: popq %rbp
+; CHECK-NEXT: retq
+define void @infiniteLoopNoSuccessor() #5 {
+ %1 = load i32, i32* @x, align 4
+ %2 = icmp ne i32 %1, 0
+ br i1 %2, label %3, label %4
+
+; <label>:3:
+ store i32 0, i32* @x, align 4
+ br label %4
+
+; <label>:4:
+ call void (...) @somethingElse()
+ %5 = load i32, i32* @y, align 4
+ %6 = icmp ne i32 %5, 0
+ br i1 %6, label %10, label %7
+
+; <label>:7:
+ %8 = call i32 (...) @something()
+ br label %9
+
+; <label>:9:
+ call void (...) @somethingElse()
+ br label %9
+
+; <label>:10:
+ ret void
+}
+
+declare void @somethingElse(...)
+
+attributes #5 = { nounwind "no-frame-pointer-elim-non-leaf" }
diff --git a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
index a100a1425dd1..5f56e2d80d73 100644
--- a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
@@ -499,8 +499,8 @@ declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
define <4 x i64> @test_mm256_cmov_si256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; X32-LABEL: test_mm256_cmov_si256:
; X32: # BB#0:
-; X32-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; X32-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
+; X32-NEXT: vxorps %ymm3, %ymm3, %ymm3
+; X32-NEXT: vcmptrueps %ymm3, %ymm3, %ymm3
; X32-NEXT: vxorps %ymm3, %ymm2, %ymm3
; X32-NEXT: vandps %ymm2, %ymm0, %ymm0
; X32-NEXT: vandps %ymm3, %ymm1, %ymm1
@@ -509,8 +509,8 @@ define <4 x i64> @test_mm256_cmov_si256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64>
;
; X64-LABEL: test_mm256_cmov_si256:
; X64: # BB#0:
-; X64-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; X64-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
+; X64-NEXT: vxorps %ymm3, %ymm3, %ymm3
+; X64-NEXT: vcmptrueps %ymm3, %ymm3, %ymm3
; X64-NEXT: vxorps %ymm3, %ymm2, %ymm3
; X64-NEXT: vandps %ymm2, %ymm0, %ymm0
; X64-NEXT: vandps %ymm3, %ymm1, %ymm1
diff --git a/test/DebugInfo/COFF/local-variables.ll b/test/DebugInfo/COFF/local-variables.ll
index d1ad8767d413..c0bac0d174a9 100644
--- a/test/DebugInfo/COFF/local-variables.ll
+++ b/test/DebugInfo/COFF/local-variables.ll
@@ -28,7 +28,6 @@
; ASM: .seh_proc f
; ASM: # BB#0: # %entry
; ASM: subq $56, %rsp
-; ASM: #DEBUG_VALUE: f:param <- [%RSP+52]
; ASM: movl %ecx, 52(%rsp)
; ASM: [[prologue_end:\.Ltmp.*]]:
; ASM: .cv_loc 0 1 8 7 # t.cpp:8:7
@@ -36,8 +35,6 @@
; ASM: je .LBB0_2
; ASM: [[if_start:\.Ltmp.*]]:
; ASM: # BB#1: # %if.then
-; ASM: #DEBUG_VALUE: f:param <- [%RSP+52]
-; ASM: #DEBUG_VALUE: a <- [%RSP+40]
; ASM: .cv_loc 0 1 9 9 # t.cpp:9:9
; ASM: movl $42, 40(%rsp)
; ASM: [[inline_site1:\.Ltmp.*]]:
@@ -51,8 +48,6 @@
; ASM: jmp .LBB0_3
; ASM: [[else_start:\.Ltmp.*]]:
; ASM: .LBB0_2: # %if.else
-; ASM: #DEBUG_VALUE: f:param <- [%RSP+52]
-; ASM: #DEBUG_VALUE: b <- [%RSP+36]
; ASM: .cv_loc 0 1 13 9 # t.cpp:13:9
; ASM: movl $42, 36(%rsp)
; ASM: [[inline_site2:\.Ltmp.*]]:
diff --git a/test/DebugInfo/COFF/no-cus.ll b/test/DebugInfo/COFF/no-cus.ll
new file mode 100644
index 000000000000..349fe680de66
--- /dev/null
+++ b/test/DebugInfo/COFF/no-cus.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -filetype=obj -o %t.o
+; RUN: llvm-objdump -section-headers %t.o | FileCheck %s
+
+; Don't emit debug info in this scenario and don't crash.
+
+; CHECK-NOT: .debug$S
+; CHECK: .text
+; CHECK-NOT: .debug$S
+
+; ModuleID = 't.cpp'
+source_filename = "t.cpp"
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.10.24728"
+
+define void @f() {
+entry:
+ ret void
+}
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 2, !"CodeView", i32 1}
+!1 = !{i32 1, !"PIC Level", i32 2}
+!2 = !{!"clang version 5.0.0 "}
diff --git a/test/DebugInfo/Inputs/typeunit-header.elf-x86-64 b/test/DebugInfo/Inputs/typeunit-header.elf-x86-64
new file mode 100644
index 000000000000..26fb0a5177d0
--- /dev/null
+++ b/test/DebugInfo/Inputs/typeunit-header.elf-x86-64
Binary files differ
diff --git a/test/DebugInfo/Inputs/typeunit-header.s b/test/DebugInfo/Inputs/typeunit-header.s
new file mode 100644
index 000000000000..802eb01c552c
--- /dev/null
+++ b/test/DebugInfo/Inputs/typeunit-header.s
@@ -0,0 +1,49 @@
+# Test object with an artifically constructed type unit header to verify
+# that the length field is correctly used to verify the validity of the
+# type_offset field.
+#
+# To generate the test object:
+# llvm-mc -triple x86_64-unknown-linux typeunit-header.s -filetype=obj \
+# -o typeunit-header.elf-x86-64
+#
+# We only have an abbreviation for the type unit die which is all we need.
+# Real type unit dies have quite different attributes of course, but we
+# just need to demonstrate an issue with validating length, so we just give it
+# a single visibility attribute.
+ .section .debug_abbrev,"",@progbits
+ .byte 0x01 # Abbrev code
+ .byte 0x41 # DW_TAG_type_unit
+ .byte 0x01 # DW_CHILDREN_yes
+ .byte 0x17 # DW_AT_visibility
+ .byte 0x0b # DW_FORM_data1
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x02 # Abbrev code
+ .byte 0x13 # DW_TAG_structure_type
+ .byte 0x00 # DW_CHILDREN_no (no members)
+ .byte 0x17 # DW_AT_visibility
+ .byte 0x0b # DW_FORM_data1
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x00 # EOM(3)
+
+ .section .debug_types,"",@progbits
+# DWARF v4 Type unit header - DWARF32 format.
+TU_4_32_start:
+ .long TU_4_32_end-TU_4_32_version # Length of Unit
+TU_4_32_version:
+ .short 4 # DWARF version number
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 8 # Address Size (in bytes)
+ .quad 0x0011223344556677 # Type Signature
+ .long TU_4_32_type-TU_4_32_start # Type offset
+# The type-unit DIE, which has just a visibility attribute.
+ .byte 1 # Abbreviation code
+ .byte 1 # DW_VIS_local
+# The type DIE, which also just has a one-byte visibility attribute.
+TU_4_32_type:
+ .byte 2 # Abbreviation code
+ .byte 1 # DW_VIS_local
+ .byte 0 # NULL
+ .byte 0 # NULL
+TU_4_32_end:
diff --git a/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test b/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test
index 0bb3e001d3a4..997cdd9f6bac 100644
--- a/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test
+++ b/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test
@@ -1,5 +1,5 @@
-; RUN: llvm-pdbdump pretty -symbols %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=SYM_FORMAT_FPO %s
-; RUN: llvm-pdbdump pretty -symbols %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=SYM_FORMAT %s
+; RUN: llvm-pdbdump pretty -module-syms %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=SYM_FORMAT_FPO %s
+; RUN: llvm-pdbdump pretty -module-syms %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=SYM_FORMAT %s
; RUN: llvm-pdbdump pretty -types %p/../Inputs/symbolformat.pdb > %t.types
; RUN: FileCheck --check-prefix=TYPES_FORMAT %s < %t.types
; RUN: FileCheck --check-prefix=TYPES_1 %s < %t.types
diff --git a/test/DebugInfo/X86/dbg-declare-inalloca.ll b/test/DebugInfo/X86/dbg-declare-inalloca.ll
new file mode 100644
index 000000000000..e3f5c7e629b8
--- /dev/null
+++ b/test/DebugInfo/X86/dbg-declare-inalloca.ll
@@ -0,0 +1,199 @@
+; RUN: llc -O0 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=DEBUG
+; RUN: llc < %s | FileCheck %s
+; RUN: llc -filetype=obj -O0 < %s | llvm-readobj -codeview - | FileCheck %s --check-prefix=OBJ
+
+; IR generated by the following source:
+; struct NonTrivial {
+; NonTrivial();// : x(42) {}
+; ~NonTrivial();// {}
+; int x;
+; };
+; extern "C" void g(int);// {}
+; extern "C" void h(int);// {}
+; extern "C" void f(NonTrivial a, int b, int unused, int c) {
+; if (b) {
+; g(c);
+; } else {
+; h(a.x);
+; }
+; (void)unused;
+; }
+; //int main() {
+; // NonTrivial x;
+; // f(x, 1, 2, 3);
+; //}
+;
+; Remove C++ comments to have a complete, debuggable program.
+
+; We don't need (or want) DBG_VALUE instructions to describe the location of
+; inalloca arguments. We want frame indices in the side table, especially at
+; -O0, because they are reliable across the entire function and don't require
+; any propagation or analysis.
+
+; CHECK: _f: # @f
+; CHECK: Lfunc_begin0:
+; CHECK-NOT: DEBUG_VALUE
+; CHECK: [[start:Ltmp[0-9]+]]:
+; CHECK-NOT: DEBUG_VALUE
+; CHECK: cmpl
+; CHECK: calll _g
+; CHECK: calll _h
+; CHECK: jmp "??1NonTrivial@@QAE@XZ"
+; CHECK: [[end:Ltmp[0-9]+]]:
+; CHECK: Lfunc_end0:
+
+; FIXME: Optimized debug info should preserve this.
+; DEBUG: .short 4414 # Record kind: S_LOCAL
+; DEBUG: .asciz "a"
+; DEBUG: .cv_def_range [[start]] [[end]]
+
+; CHECK: .short 4414 # Record kind: S_LOCAL
+; CHECK: .asciz "b"
+; CHECK: .cv_def_range [[start]] [[end]]
+
+; CHECK: .short 4414 # Record kind: S_LOCAL
+; CHECK: .asciz "c"
+; CHECK: .cv_def_range [[start]] [[end]]
+
+; OBJ-LABEL: ProcStart {
+; OBJ: Kind: S_GPROC32_ID (0x1147)
+; OBJ: DisplayName: f
+; OBJ: }
+; OBJ: Local {
+; OBJ: Type: NonTrivial (0x1007)
+; OBJ: Flags [ (0x1)
+; OBJ: IsParameter (0x1)
+; OBJ: ]
+; OBJ: VarName: a
+; OBJ: }
+; OBJ: DefRangeRegisterRel {
+; OBJ: BaseRegister: 21
+; OBJ: BasePointerOffset: 12
+; OBJ: }
+; OBJ: Local {
+; OBJ: Type: int (0x74)
+; OBJ: Flags [ (0x1)
+; OBJ: IsParameter (0x1)
+; OBJ: ]
+; OBJ: VarName: b
+; OBJ: }
+; OBJ: DefRangeRegisterRel {
+; OBJ: BaseRegister: 21
+; OBJ: BasePointerOffset: 16
+; OBJ: }
+; FIXME: Retain unused.
+; OBJ: Local {
+; OBJ: Type: int (0x74)
+; OBJ: Flags [ (0x1)
+; OBJ: IsParameter (0x1)
+; OBJ: ]
+; OBJ: VarName: c
+; OBJ: }
+; OBJ: DefRangeRegisterRel {
+; OBJ: BaseRegister: 21
+; OBJ: BasePointerOffset: 24
+; OBJ: }
+; OBJ-LABEL: ProcEnd {
+; OBJ: }
+
+
+; ModuleID = 't.cpp'
+source_filename = "t.cpp"
+target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
+target triple = "i386-pc-windows-msvc19.10.24728"
+
+%struct.NonTrivial = type { i32 }
+
+; Function Attrs: nounwind
+define void @f(<{ %struct.NonTrivial, i32, i32, i32 }>* inalloca) local_unnamed_addr #0 !dbg !7 {
+entry:
+ %a = getelementptr inbounds <{ %struct.NonTrivial, i32, i32, i32 }>, <{ %struct.NonTrivial, i32, i32, i32 }>* %0, i32 0, i32 0
+ %b = getelementptr inbounds <{ %struct.NonTrivial, i32, i32, i32 }>, <{ %struct.NonTrivial, i32, i32, i32 }>* %0, i32 0, i32 1
+ tail call void @llvm.dbg.declare(metadata i32* %c, metadata !20, metadata !24), !dbg !25
+ tail call void @llvm.dbg.declare(metadata i32* %b, metadata !22, metadata !24), !dbg !26
+ tail call void @llvm.dbg.declare(metadata %struct.NonTrivial* %a, metadata !23, metadata !24), !dbg !27
+ %1 = load i32, i32* %b, align 4, !dbg !28, !tbaa !30
+ %tobool = icmp eq i32 %1, 0, !dbg !28
+ br i1 %tobool, label %if.else, label %if.then, !dbg !34
+
+if.then: ; preds = %entry
+ %c = getelementptr inbounds <{ %struct.NonTrivial, i32, i32, i32 }>, <{ %struct.NonTrivial, i32, i32, i32 }>* %0, i32 0, i32 3
+ %2 = load i32, i32* %c, align 4, !dbg !35, !tbaa !30
+ tail call void @g(i32 %2) #4, !dbg !37
+ br label %if.end, !dbg !38
+
+if.else: ; preds = %entry
+ %x = getelementptr inbounds <{ %struct.NonTrivial, i32, i32, i32 }>, <{ %struct.NonTrivial, i32, i32, i32 }>* %0, i32 0, i32 0, i32 0, !dbg !39
+ %3 = load i32, i32* %x, align 4, !dbg !39, !tbaa !41
+ tail call void @h(i32 %3) #4, !dbg !43
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ tail call x86_thiscallcc void @"\01??1NonTrivial@@QAE@XZ"(%struct.NonTrivial* nonnull %a) #4, !dbg !44
+ ret void, !dbg !44
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+declare void @g(i32) local_unnamed_addr
+
+declare void @h(i32) local_unnamed_addr
+
+; Function Attrs: nounwind
+declare x86_thiscallcc void @"\01??1NonTrivial@@QAE@XZ"(%struct.NonTrivial*) unnamed_addr #3
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone speculatable }
+attributes #3 = { nounwind }
+attributes #4 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 ", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "t.cpp", directory: "C:\5Csrc\5Cllvm-project\5Cbuild", checksumkind: CSK_MD5, checksum: "e41e3fda2a91b52e121ed6c29a209eae")
+!2 = !{}
+!3 = !{i32 1, !"NumRegisterParameters", i32 0}
+!4 = !{i32 2, !"CodeView", i32 1}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{!"clang version 5.0.0 "}
+!7 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 8, type: !8, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !19)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null, !10, !13, !13, !13}
+!10 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "NonTrivial", file: !1, line: 1, size: 32, elements: !11, identifier: ".?AUNonTrivial@@")
+!11 = !{!12, !14, !18}
+!12 = !DIDerivedType(tag: DW_TAG_member, name: "x", scope: !10, file: !1, line: 4, baseType: !13, size: 32)
+!13 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!14 = !DISubprogram(name: "NonTrivial", scope: !10, file: !1, line: 2, type: !15, isLocal: false, isDefinition: false, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true)
+!15 = !DISubroutineType(cc: DW_CC_BORLAND_thiscall, types: !16)
+!16 = !{null, !17}
+!17 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 32, flags: DIFlagArtificial | DIFlagObjectPointer)
+!18 = !DISubprogram(name: "~NonTrivial", scope: !10, file: !1, line: 3, type: !15, isLocal: false, isDefinition: false, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true)
+!19 = !{!20, !21, !22, !23}
+!20 = !DILocalVariable(name: "c", arg: 4, scope: !7, file: !1, line: 8, type: !13)
+!21 = !DILocalVariable(name: "unused", arg: 3, scope: !7, file: !1, line: 8, type: !13)
+!22 = !DILocalVariable(name: "b", arg: 2, scope: !7, file: !1, line: 8, type: !13)
+!23 = !DILocalVariable(name: "a", arg: 1, scope: !7, file: !1, line: 8, type: !10)
+!24 = !DIExpression()
+!25 = !DILocation(line: 8, column: 56, scope: !7)
+!26 = !DILocation(line: 8, column: 37, scope: !7)
+!27 = !DILocation(line: 8, column: 30, scope: !7)
+!28 = !DILocation(line: 9, column: 7, scope: !29)
+!29 = distinct !DILexicalBlock(scope: !7, file: !1, line: 9, column: 7)
+!30 = !{!31, !31, i64 0}
+!31 = !{!"int", !32, i64 0}
+!32 = !{!"omnipotent char", !33, i64 0}
+!33 = !{!"Simple C++ TBAA"}
+!34 = !DILocation(line: 9, column: 7, scope: !7)
+!35 = !DILocation(line: 10, column: 7, scope: !36)
+!36 = distinct !DILexicalBlock(scope: !29, file: !1, line: 9, column: 10)
+!37 = !DILocation(line: 10, column: 5, scope: !36)
+!38 = !DILocation(line: 11, column: 3, scope: !36)
+!39 = !DILocation(line: 12, column: 9, scope: !40)
+!40 = distinct !DILexicalBlock(scope: !29, file: !1, line: 11, column: 10)
+!41 = !{!42, !31, i64 0}
+!42 = !{!"?AUNonTrivial@@", !31, i64 0}
+!43 = !DILocation(line: 12, column: 5, scope: !40)
+!44 = !DILocation(line: 15, column: 1, scope: !7)
diff --git a/test/DebugInfo/X86/split-dwarf-cross-unit-reference.ll b/test/DebugInfo/X86/split-dwarf-cross-unit-reference.ll
index c6f0afa27937..ca8525cd335b 100644
--- a/test/DebugInfo/X86/split-dwarf-cross-unit-reference.ll
+++ b/test/DebugInfo/X86/split-dwarf-cross-unit-reference.ll
@@ -1,46 +1,194 @@
-; RUN: llc -mtriple=x86_64-linux -split-dwarf-file=foo.dwo -filetype=obj -o - < %s | llvm-objdump -r - | FileCheck %s
+; RUN: llc -mtriple=x86_64-linux -split-dwarf-cross-cu-references -split-dwarf-file=foo.dwo -filetype=obj -o %t < %s
+; RUN: llvm-objdump -r %t | FileCheck %s
+; RUN: llvm-dwarfdump -debug-dump=info.dwo %t | FileCheck --check-prefix=ALL --check-prefix=INFO --check-prefix=DWO --check-prefix=CROSS %s
+; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck --check-prefix=ALL --check-prefix=INFO %s
+
+; RUN: llc -mtriple=x86_64-linux -split-dwarf-file=foo.dwo -filetype=obj -o %t < %s
+; RUN: llvm-objdump -r %t | FileCheck %s
+; RUN: llvm-dwarfdump -debug-dump=info.dwo %t | FileCheck --check-prefix=ALL --check-prefix=DWO --check-prefix=NOCROSS %s
+; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck --check-prefix=ALL --check-prefix=INFO %s
+
+; Testing cross-CU references for types, subprograms, and variables
+; Built from code something like this:
+; foo.cpp:
+; struct t1 { int i; };
+; void f();
+; __attribute__((always_inline)) void f1(t1 t) {
+; f();
+; }
+; void foo(t1 t) {
+; f1(t);
+; }
+; bar.cpp:
+; struct t1 { int i; };
+; void f1(t1);
+; void bar(t1 t) {
+; f1(t);
+; }
+; $ clang++-tot -emit-llvm -S {foo,bar}.cpp -g
+; $ llvm-link-tot {foo,bar}.ll -S -o foobar.ll
+; $ clang++-tot -emit-llvm foobar.ll -o foobar.opt.ll -S -c
+;
+; Then manually removing the original f1 definition, to simplify the DWARF a bit
+; (so it only has the inlined definitions, no concrete definition)
+
+; Check that:
+; * no relocations are emitted for the debug_info.dwo section no matter what
+; * one debug_info->debug_info relocation in debug_info no matter what (for
+; split dwarf inlining)
+; * debug_info uses relocations and ref_addr no matter what
+; * debug_info.dwo uses relocations for types as well as abstract subprograms
+; and variables when -split-dwarf-cross-cu-references is used
+; * debug_info.dwo contains duplicate types, abstract subprograms and abstract
+; variables otherwise to avoid the need for cross-cu references
; CHECK-NOT: .rel{{a?}}.debug_info.dwo
; CHECK: RELOCATION RECORDS FOR [.rel{{a?}}.debug_info]:
; CHECK-NOT: RELOCATION RECORDS
-; Expect one relocation in debug_info, between f3 and f1.
+; Expect one relocation in debug_info, from the inlined f1 in foo to its
+; abstract origin in bar
; CHECK: R_X86_64_32 .debug_info
+; CHECK-NOT: RELOCATION RECORDS
; CHECK-NOT: .debug_info
; CHECK: RELOCATION RECORDS
; CHECK-NOT: .rel{{a?}}.debug_info.dwo
+; ALL: Compile Unit
+; ALL: DW_TAG_compile_unit
+; DWO: DW_AT_name {{.*}} "foo.cpp"
+; ALL: 0x[[F1:.*]]: DW_TAG_subprogram
+; ALL: DW_AT_name {{.*}} "f1"
+; DWO: 0x[[F1T:.*]]: DW_TAG_formal_parameter
+; DWO: DW_AT_name {{.*}} "t"
+; DWO: DW_AT_type [DW_FORM_ref4] {{.*}}{0x[[T1:.*]]}
+; DWO: NULL
+; DWO: 0x[[T1]]: DW_TAG_structure_type
+; DWO: DW_AT_name {{.*}} "t1"
+; ALL: DW_TAG_subprogram
+; ALL: DW_AT_name {{.*}} "foo"
+; DWO: DW_TAG_formal_parameter
+; DWO: DW_AT_name {{.*}} "t"
+; DWO: DW_AT_type [DW_FORM_ref4] {{.*}}{0x[[T1]]}
+; ALL: DW_TAG_inlined_subroutine
+; ALL: DW_AT_abstract_origin [DW_FORM_ref4] {{.*}}{0x[[F1]]}
+; DWO: DW_TAG_formal_parameter
+; DWO: DW_AT_abstract_origin [DW_FORM_ref4] {{.*}}{0x[[F1T]]}
+
+; ALL: Compile Unit
+; ALL: DW_TAG_compile_unit
+; DWO: DW_AT_name {{.*}} "bar.cpp"
+; NOCROSS: 0x[[BAR_F1:.*]]: DW_TAG_subprogram
+; NOCROSS: DW_AT_name {{.*}} "f1"
+; NOCROSS: 0x[[BAR_F1T:.*]]: DW_TAG_formal_parameter
+; NOCROSS: DW_AT_name {{.*}} "t"
+; NOCROSS: DW_AT_type [DW_FORM_ref4] {{.*}}{0x[[BAR_T1:.*]]}
+; NOCROSS: NULL
+; NOCROSS: 0x[[BAR_T1]]: DW_TAG_structure_type
+; NOCROSS: DW_AT_name {{.*}} "t1"
+; ALL: DW_TAG_subprogram
+; ALL: DW_AT_name {{.*}} "bar"
+; DWO: DW_TAG_formal_parameter
+; DWO: DW_AT_name {{.*}} "t"
+; CROSS: DW_AT_type [DW_FORM_ref_addr] (0x00000000[[T1]]
+; NOCROSS: DW_AT_type [DW_FORM_ref4] {{.*}}{0x[[BAR_T1]]}
+; ALL: DW_TAG_inlined_subroutine
+; INFO: DW_AT_abstract_origin [DW_FORM_ref_addr] (0x00000000[[F1]]
+; NOCROSS: DW_AT_abstract_origin [DW_FORM_ref4] {{.*}}{0x[[BAR_F1]]}
+; DWO: DW_TAG_formal_parameter
+; CROSS: DW_AT_abstract_origin [DW_FORM_ref_addr] (0x00000000[[F1T]]
+; NOCROSS: DW_AT_abstract_origin [DW_FORM_ref4] {{.*}}{0x[[BAR_F1T]]
-; Function Attrs: noinline nounwind optnone uwtable
-define void @_Z2f1v() !dbg !7 {
+%struct.t1 = type { i32 }
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+declare void @_Z1fv() #2
+
+; Function Attrs: noinline uwtable
+define void @_Z3foo2t1(i32 %t.coerce) #3 !dbg !20 {
entry:
- ret void, !dbg !10
+ %t.i = alloca %struct.t1, align 4
+ call void @llvm.dbg.declare(metadata %struct.t1* %t.i, metadata !15, metadata !16), !dbg !21
+ %t = alloca %struct.t1, align 4
+ %agg.tmp = alloca %struct.t1, align 4
+ %coerce.dive = getelementptr inbounds %struct.t1, %struct.t1* %t, i32 0, i32 0
+ store i32 %t.coerce, i32* %coerce.dive, align 4
+ call void @llvm.dbg.declare(metadata %struct.t1* %t, metadata !23, metadata !16), !dbg !24
+ %0 = bitcast %struct.t1* %agg.tmp to i8*, !dbg !25
+ %1 = bitcast %struct.t1* %t to i8*, !dbg !25
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 4, i32 4, i1 false), !dbg !25
+ %coerce.dive1 = getelementptr inbounds %struct.t1, %struct.t1* %agg.tmp, i32 0, i32 0, !dbg !26
+ %2 = load i32, i32* %coerce.dive1, align 4, !dbg !26
+ %coerce.dive.i = getelementptr inbounds %struct.t1, %struct.t1* %t.i, i32 0, i32 0
+ store i32 %2, i32* %coerce.dive.i, align 4
+ call void @_Z1fv(), !dbg !27
+ ret void, !dbg !28
}
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #4
+
; Function Attrs: noinline uwtable
-define void @_Z2f3v() !dbg !13 {
+define void @_Z3bar2t1(i32 %t.coerce) #3 !dbg !29 {
entry:
- call void @_Z2f1v(), !dbg !14
- ret void, !dbg !16
+ %t.i = alloca %struct.t1, align 4
+ call void @llvm.dbg.declare(metadata %struct.t1* %t.i, metadata !15, metadata !16), !dbg !30
+ %t = alloca %struct.t1, align 4
+ %agg.tmp = alloca %struct.t1, align 4
+ %coerce.dive = getelementptr inbounds %struct.t1, %struct.t1* %t, i32 0, i32 0
+ store i32 %t.coerce, i32* %coerce.dive, align 4
+ call void @llvm.dbg.declare(metadata %struct.t1* %t, metadata !32, metadata !16), !dbg !33
+ %0 = bitcast %struct.t1* %agg.tmp to i8*, !dbg !34
+ %1 = bitcast %struct.t1* %t to i8*, !dbg !34
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 4, i32 4, i1 false), !dbg !34
+ %coerce.dive1 = getelementptr inbounds %struct.t1, %struct.t1* %agg.tmp, i32 0, i32 0, !dbg !35
+ %2 = load i32, i32* %coerce.dive1, align 4, !dbg !35
+ %coerce.dive.i = getelementptr inbounds %struct.t1, %struct.t1* %t.i, i32 0, i32 0
+ store i32 %2, i32* %coerce.dive.i, align 4
+ call void @_Z1fv(), !dbg !36
+ ret void, !dbg !37
}
!llvm.dbg.cu = !{!0, !3}
!llvm.ident = !{!5, !5}
-!llvm.module.flags = !{!6}
+!llvm.module.flags = !{!6, !7}
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 301051) (llvm/trunk 301062)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
-!1 = !DIFile(filename: "a.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 302809) (llvm/trunk 302815)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, splitDebugInlining: true)
+!1 = !DIFile(filename: "foo.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
!2 = !{}
-!3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !4, producer: "clang version 5.0.0 (trunk 301051) (llvm/trunk 301062)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
-!4 = !DIFile(filename: "b.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
-!5 = !{!"clang version 5.0.0 (trunk 301051) (llvm/trunk 301062)"}
-!6 = !{i32 2, !"Debug Info Version", i32 3}
-!7 = distinct !DISubprogram(name: "f1", linkageName: "_Z2f1v", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
-!8 = !DISubroutineType(types: !9)
-!9 = !{null}
-!10 = !DILocation(line: 1, scope: !7)
-!11 = distinct !DISubprogram(name: "f2", linkageName: "_Z2f2v", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
-!12 = !DILocation(line: 1, scope: !11)
-!13 = distinct !DISubprogram(name: "f3", linkageName: "_Z2f3v", scope: !4, file: !4, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !3, variables: !2)
-!14 = !DILocation(line: 1, scope: !11, inlinedAt: !15)
-!15 = distinct !DILocation(line: 1, scope: !13)
-!16 = !DILocation(line: 1, scope: !13)
+!3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !4, producer: "clang version 5.0.0 (trunk 302809) (llvm/trunk 302815)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, splitDebugInlining: true)
+!4 = !DIFile(filename: "bar.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!5 = !{!"clang version 5.0.0 (trunk 302809) (llvm/trunk 302815)"}
+!6 = !{i32 2, !"Dwarf Version", i32 4}
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = distinct !DISubprogram(name: "f1", linkageName: "_Z2f12t1", scope: !1, file: !1, line: 3, type: !9, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null, !11}
+!11 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1", file: !1, line: 1, size: 32, elements: !12, identifier: "_ZTS2t1")
+!12 = !{!13}
+!13 = !DIDerivedType(tag: DW_TAG_member, name: "i", scope: !11, file: !1, line: 1, baseType: !14, size: 32)
+!14 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!15 = !DILocalVariable(name: "t", arg: 1, scope: !8, file: !1, line: 3, type: !11)
+!16 = !DIExpression()
+!17 = !DILocation(line: 3, column: 43, scope: !8)
+!18 = !DILocation(line: 4, column: 3, scope: !8)
+!19 = !DILocation(line: 5, column: 1, scope: !8)
+!20 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foo2t1", scope: !1, file: !1, line: 6, type: !9, isLocal: false, isDefinition: true, scopeLine: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!21 = !DILocation(line: 3, column: 43, scope: !8, inlinedAt: !22)
+!22 = distinct !DILocation(line: 7, column: 3, scope: !20)
+!23 = !DILocalVariable(name: "t", arg: 1, scope: !20, file: !1, line: 6, type: !11)
+!24 = !DILocation(line: 6, column: 13, scope: !20)
+!25 = !DILocation(line: 7, column: 6, scope: !20)
+!26 = !DILocation(line: 7, column: 3, scope: !20)
+!27 = !DILocation(line: 4, column: 3, scope: !8, inlinedAt: !22)
+!28 = !DILocation(line: 8, column: 1, scope: !20)
+!29 = distinct !DISubprogram(name: "bar", linkageName: "_Z3bar2t1", scope: !4, file: !4, line: 3, type: !9, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: false, unit: !3, variables: !2)
+!30 = !DILocation(line: 3, column: 43, scope: !8, inlinedAt: !31)
+!31 = distinct !DILocation(line: 4, column: 3, scope: !29)
+!32 = !DILocalVariable(name: "t", arg: 1, scope: !29, file: !4, line: 3, type: !11)
+!33 = !DILocation(line: 3, column: 13, scope: !29)
+!34 = !DILocation(line: 4, column: 6, scope: !29)
+!35 = !DILocation(line: 4, column: 3, scope: !29)
+!36 = !DILocation(line: 4, column: 3, scope: !8, inlinedAt: !31)
+!37 = !DILocation(line: 5, column: 1, scope: !29)
diff --git a/test/DebugInfo/typeunit-header.test b/test/DebugInfo/typeunit-header.test
new file mode 100644
index 000000000000..c16156b91e6f
--- /dev/null
+++ b/test/DebugInfo/typeunit-header.test
@@ -0,0 +1,15 @@
+RUN: llvm-dwarfdump %p/Inputs/typeunit-header.elf-x86-64 | FileCheck %s
+
+This is testing a bugfix where parsing the type unit header was not
+taking the unit's intial length field into account when validating.
+
+The input file is hand-coded assembler to generate a type unit stub,
+which only contains a type unit DIE with a sole visibility attribute.
+
+We make sure that llvm-dwarfdump is parsing the type unit header correctly
+and displays it.
+
+CHECK: .debug_types contents:
+CHECK: 0x00000000: Type Unit: length = 0x00000019 version = 0x0004 abbr_offset = 0x0000 addr_size = 0x08 name = '' type_signature = 0x0011223344556677 type_offset = 0x0019 (next unit at 0x0000001d)
+CHECK: 0x00000017: DW_TAG_type_unit [1] *
+CHECK: DW_AT_visibility [DW_FORM_data1] (DW_VIS_local)
diff --git a/test/Feature/intrinsic-noduplicate.ll b/test/Feature/intrinsic-noduplicate.ll
index 4f2ae1c698c9..f7b377aae38b 100644
--- a/test/Feature/intrinsic-noduplicate.ll
+++ b/test/Feature/intrinsic-noduplicate.ll
@@ -1,4 +1,5 @@
; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+; REQUIRES: NVPTX
; Make sure LLVM knows about the convergent attribute on the
; llvm.nvvm.barrier0 intrinsic.
diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 4b208d64427b..334e00dabf40 100644
--- a/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -617,70 +617,6 @@ declare i32 @llvm.bswap.i32(i32) nounwind readnone
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i32
-
-; Store intrinsic.
-
-define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
- call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
- ret void
-}
-
-declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
-
-; CHECK-LABEL: @StoreIntrinsic
-; CHECK-NOT: br
-; CHECK-NOT: = or
-; CHECK: store <4 x i32> {{.*}} align 1
-; CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
-; CHECK: ret void
-
-
-; Load intrinsic.
-
-define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
- %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
- ret <16 x i8> %call
-}
-
-declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
-
-; CHECK-LABEL: @LoadIntrinsic
-; CHECK: load <16 x i8>, <16 x i8>* {{.*}} align 1
-; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32, i32* {{.*}}
-; CHECK-NOT: br
-; CHECK-NOT: = or
-; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
-; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
-; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
-; CHECK: ret <16 x i8>
-
-
-; Simple NoMem intrinsic
-; Check that shadow is OR'ed, and origin is Select'ed
-; And no shadow checks!
-
-define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
- %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
- ret <8 x i16> %call
-}
-
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
-
-; CHECK-LABEL: @Paddsw128
-; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
-; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
-; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
-; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
-; CHECK-NEXT: = or <8 x i16>
-; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
-; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
-; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
-; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
-; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
-; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
-; CHECK-NEXT: ret <8 x i16>
-
-
; Test handling of vectors of pointers.
; Check that shadow of such vector is a vector of integers.
diff --git a/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll b/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
new file mode 100644
index 000000000000..be3f1976daa1
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
@@ -0,0 +1,68 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
+; REQUIRES: x86
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Store intrinsic.
+
+define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
+ call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
+ ret void
+}
+
+declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
+
+; CHECK-LABEL: @StoreIntrinsic
+; CHECK-NOT: br
+; CHECK-NOT: = or
+; CHECK: store <4 x i32> {{.*}} align 1
+; CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
+; CHECK: ret void
+
+
+; Load intrinsic.
+
+define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
+ %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
+ ret <16 x i8> %call
+}
+
+declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
+
+; CHECK-LABEL: @LoadIntrinsic
+; CHECK: load <16 x i8>, <16 x i8>* {{.*}} align 1
+; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32, i32* {{.*}}
+; CHECK-NOT: br
+; CHECK-NOT: = or
+; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
+; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
+; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
+; CHECK: ret <16 x i8>
+
+
+; Simple NoMem intrinsic
+; Check that shadow is OR'ed, and origin is Select'ed
+; And no shadow checks!
+
+define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
+ %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %call
+}
+
+declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
+
+; CHECK-LABEL: @Paddsw128
+; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
+; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
+; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
+; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
+; CHECK-NEXT: = or <8 x i16>
+; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
+; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
+; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
+; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
+; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
+; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
+; CHECK-NEXT: ret <8 x i16>
diff --git a/test/Instrumentation/MemorySanitizer/pr32842.ll b/test/Instrumentation/MemorySanitizer/pr32842.ll
new file mode 100644
index 000000000000..5d74c9a193bf
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/pr32842.ll
@@ -0,0 +1,20 @@
+; Regression test for https://bugs.llvm.org/show_bug.cgi?id=32842
+;
+; RUN: opt < %s -msan -S | FileCheck %s
+;target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define zeroext i1 @_Z1fii(i32 %x, i32 %y) sanitize_memory {
+entry:
+ %cmp = icmp slt i32 %x, %y
+ ret i1 %cmp
+}
+
+; CHECK: [[X:[^ ]+]] = load{{.*}}__msan_param_tls{{.*}}
+; CHECK: [[Y:[^ ]+]] = load{{.*}}__msan_param_tls{{.*}}
+; CHECK: [[OR:[^ ]+]] = or i32 [[Y]], [[X]]
+
+; Make sure the shadow of the (x < y) comparison isn't truncated to i1.
+; CHECK-NOT: trunc i32 [[OR]] to i1
+; CHECK: [[CMP:[^ ]+]] = icmp ne i32 [[OR]], 0
+; CHECK: store i1 [[CMP]],{{.*}}__msan_retval_tls
diff --git a/test/Instrumentation/MemorySanitizer/vector_arith.ll b/test/Instrumentation/MemorySanitizer/vector_arith.ll
index 6541a1c3a394..8be085cff33d 100644
--- a/test/Instrumentation/MemorySanitizer/vector_arith.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_arith.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; REQUIRES: x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/vector_cmp.ll b/test/Instrumentation/MemorySanitizer/vector_cmp.ll
index fb54a5cb632e..62a5f573064e 100644
--- a/test/Instrumentation/MemorySanitizer/vector_cmp.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_cmp.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; REQUIRES: x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/vector_cvt.ll b/test/Instrumentation/MemorySanitizer/vector_cvt.ll
index 55e91c74a316..beedb0e63e50 100644
--- a/test/Instrumentation/MemorySanitizer/vector_cvt.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_cvt.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; REQUIRES: x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/vector_pack.ll b/test/Instrumentation/MemorySanitizer/vector_pack.ll
index 31c0c62980ec..deb03d84802a 100644
--- a/test/Instrumentation/MemorySanitizer/vector_pack.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_pack.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; REQUIRES: x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/MemorySanitizer/vector_shift.ll b/test/Instrumentation/MemorySanitizer/vector_shift.ll
index 978bad3b6979..a4b8fdbd603f 100644
--- a/test/Instrumentation/MemorySanitizer/vector_shift.ll
+++ b/test/Instrumentation/MemorySanitizer/vector_shift.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; REQUIRES: x86
; Test instrumentation of vector shift instructions.
diff --git a/test/LTO/Resolution/X86/ifunc.ll b/test/LTO/Resolution/X86/ifunc.ll
new file mode 100644
index 000000000000..63723763430c
--- /dev/null
+++ b/test/LTO/Resolution/X86/ifunc.ll
@@ -0,0 +1,15 @@
+; RUN: opt -module-summary -o %t.bc %s
+; RUN: llvm-lto2 run %t.bc -r %t.bc,foo,pl -o %t2
+; RUN: llvm-nm %t2.0 | FileCheck %s
+; CHECK: T foo
+; CHECK: t foo_ifunc
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@foo = ifunc i32 (i32), i64 ()* @foo_ifunc
+
+define internal i64 @foo_ifunc() {
+entry:
+ ret i64 0
+}
diff --git a/test/MC/AArch64/directive-cpu-err.s b/test/MC/AArch64/directive-cpu-err.s
new file mode 100644
index 000000000000..ea0d28e71815
--- /dev/null
+++ b/test/MC/AArch64/directive-cpu-err.s
@@ -0,0 +1,9 @@
+// RUN: not llvm-mc -triple aarch64-linux-gnu %s 2> %t > /dev/null
+// RUN: FileCheck %s < %t
+
+ .cpu invalid
+ // CHECK: error: unknown CPU name
+
+ .cpu generic+wibble+nowobble
+ // CHECK: :[[@LINE-1]]:18: error: unsupported architectural extension
+ // CHECK: :[[@LINE-2]]:25: error: unsupported architectural extension
diff --git a/test/MC/AArch64/label-arithmetic-diags-elf.s b/test/MC/AArch64/label-arithmetic-diags-elf.s
index e9d92d591fac..dbfdd24f8dc9 100644
--- a/test/MC/AArch64/label-arithmetic-diags-elf.s
+++ b/test/MC/AArch64/label-arithmetic-diags-elf.s
@@ -1,5 +1,14 @@
// RUN: not llvm-mc -triple aarch64-elf -filetype=obj %s -o /dev/null 2>&1 | FileCheck %s
+ .data
+b:
+ .fill 300
+e:
+ .byte e - b
+ // CHECK: error: value evaluated as 300 is out of range.
+ // CHECK-NEXT: .byte e - b
+ // CHECK-NEXT: ^
+
.section sec_x
start:
.space 5000
diff --git a/test/MC/AMDGPU/flat.s b/test/MC/AMDGPU/flat.s
index c6894c35f4d7..4e81799fe9f9 100644
--- a/test/MC/AMDGPU/flat.s
+++ b/test/MC/AMDGPU/flat.s
@@ -30,31 +30,6 @@ flat_load_dword v1, v[3:4] glc slc
// CI: flat_load_dword v1, v[3:4] glc slc ; encoding: [0x00,0x00,0x33,0xdc,0x03,0x00,0x00,0x01]
// VI: flat_load_dword v1, v[3:4] glc slc ; encoding: [0x00,0x00,0x53,0xdc,0x03,0x00,0x00,0x01]
-flat_load_dword v1, v[3:4] glc tfe
-// NOSI: error:
-// CI: flat_load_dword v1, v[3:4] glc tfe ; encoding: [0x00,0x00,0x31,0xdc,0x03,0x00,0x80,0x01]
-// VI: flat_load_dword v1, v[3:4] glc tfe ; encoding: [0x00,0x00,0x51,0xdc,0x03,0x00,0x80,0x01]
-
-flat_load_dword v1, v[3:4] glc slc tfe
-// NOSI: error:
-// CI: flat_load_dword v1, v[3:4] glc slc tfe ; encoding: [0x00,0x00,0x33,0xdc,0x03,0x00,0x80,0x01]
-// VI: flat_load_dword v1, v[3:4] glc slc tfe ; encoding: [0x00,0x00,0x53,0xdc,0x03,0x00,0x80,0x01]
-
-flat_load_dword v1, v[3:4] slc
-// NOSI: error:
-// CI: flat_load_dword v1, v[3:4] slc ; encoding: [0x00,0x00,0x32,0xdc,0x03,0x00,0x00,0x01]
-// VI: flat_load_dword v1, v[3:4] slc ; encoding: [0x00,0x00,0x52,0xdc,0x03,0x00,0x00,0x01]
-
-flat_load_dword v1, v[3:4] slc tfe
-// NOSI: error:
-// CI: flat_load_dword v1, v[3:4] slc tfe ; encoding: [0x00,0x00,0x32,0xdc,0x03,0x00,0x80,0x01]
-// VI: flat_load_dword v1, v[3:4] slc tfe ; encoding: [0x00,0x00,0x52,0xdc,0x03,0x00,0x80,0x01]
-
-flat_load_dword v1, v[3:4] tfe
-// NOSI: error:
-// CI: flat_load_dword v1, v[3:4] tfe ; encoding: [0x00,0x00,0x30,0xdc,0x03,0x00,0x80,0x01]
-// VI: flat_load_dword v1, v[3:4] tfe ; encoding: [0x00,0x00,0x50,0xdc,0x03,0x00,0x80,0x01]
-
flat_store_dword v[3:4], v1
// NOSI: error:
// CIVI: flat_store_dword v[3:4], v1 ; encoding: [0x00,0x00,0x70,0xdc,0x03,0x01,0x00,0x00]
@@ -67,66 +42,25 @@ flat_store_dword v[3:4], v1 glc slc
// NOSI: error:
// CIVI: flat_store_dword v[3:4], v1 glc slc ; encoding: [0x00,0x00,0x73,0xdc,0x03,0x01,0x00,0x00]
-flat_store_dword v[3:4], v1 glc tfe
-// NOSI: error:
-// CIVI: flat_store_dword v[3:4], v1 glc tfe ; encoding: [0x00,0x00,0x71,0xdc,0x03,0x01,0x80,0x00]
-
-flat_store_dword v[3:4], v1 glc slc tfe
-// NOSI: error:
-// CIVI: flat_store_dword v[3:4], v1 glc slc tfe ; encoding: [0x00,0x00,0x73,0xdc,0x03,0x01,0x80,0x00]
flat_store_dword v[3:4], v1 slc
// NOSI: error:
// CIVI: flat_store_dword v[3:4], v1 slc ; encoding: [0x00,0x00,0x72,0xdc,0x03,0x01,0x00,0x00]
-flat_store_dword v[3:4], v1 slc tfe
-// NOSI: error:
-// CIVI: flat_store_dword v[3:4], v1 slc tfe ; encoding: [0x00,0x00,0x72,0xdc,0x03,0x01,0x80,0x00]
-
-flat_store_dword v[3:4], v1 tfe
-// NOSI: error:
-// CIVI: flat_store_dword v[3:4], v1 tfe ; encoding: [0x00,0x00,0x70,0xdc,0x03,0x01,0x80,0x00]
-
// FIXME: For atomic instructions, glc must be placed immediately following
// the data regiser. These forms aren't currently supported:
// flat_atomic_add v1, v[3:4], v5 slc glc
-// flat_atomic_add v1, v[3:4], v5 slc glc tfe
-// flat_atomic_add v1, v[3:4], v5 slc tfe glc
-// flat_atomic_add v1, v[3:4], v5 tfe glc
-// flat_atomic_add v[3:4], v5 tfe glc
-// flat_atomic_add v1, v[3:4], v5 tfe glc slc
-// flat_atomic_add v1, v[3:4], v5 tfe slc glc
flat_atomic_add v1 v[3:4], v5 glc slc
// NOSI: error:
// CI: flat_atomic_add v1, v[3:4], v5 glc slc ; encoding: [0x00,0x00,0xcb,0xdc,0x03,0x05,0x00,0x01]
// VI: flat_atomic_add v1, v[3:4], v5 glc slc ; encoding: [0x00,0x00,0x0b,0xdd,0x03,0x05,0x00,0x01]
-flat_atomic_add v1 v[3:4], v5 glc tfe
-// NOSI: error:
-// CI: flat_atomic_add v1, v[3:4], v5 glc tfe ; encoding: [0x00,0x00,0xc9,0xdc,0x03,0x05,0x80,0x01]
-// VI: flat_atomic_add v1, v[3:4], v5 glc tfe ; encoding: [0x00,0x00,0x09,0xdd,0x03,0x05,0x80,0x01]
-
-flat_atomic_add v1 v[3:4], v5 glc slc tfe
-// NOSI: error:
-// CI: flat_atomic_add v1, v[3:4], v5 glc slc tfe ; encoding: [0x00,0x00,0xcb,0xdc,0x03,0x05,0x80,0x01]
-// VI: flat_atomic_add v1, v[3:4], v5 glc slc tfe ; encoding: [0x00,0x00,0x0b,0xdd,0x03,0x05,0x80,0x01]
-
flat_atomic_add v[3:4], v5 slc
// NOSI: error:
// CI: flat_atomic_add v[3:4], v5 slc ; encoding: [0x00,0x00,0xca,0xdc,0x03,0x05,0x00,0x00]
// VI: flat_atomic_add v[3:4], v5 slc ; encoding: [0x00,0x00,0x0a,0xdd,0x03,0x05,0x00,0x00]
-flat_atomic_add v[3:4], v5 slc tfe
-// NOSI: error:
-// CI: flat_atomic_add v[3:4], v5 slc tfe ; encoding: [0x00,0x00,0xca,0xdc,0x03,0x05,0x80,0x00]
-// VI: flat_atomic_add v[3:4], v5 slc tfe ; encoding: [0x00,0x00,0x0a,0xdd,0x03,0x05,0x80,0x00]
-
-flat_atomic_add v[3:4], v5 tfe
-// NOSI: error:
-// CI: flat_atomic_add v[3:4], v5 tfe ; encoding: [0x00,0x00,0xc8,0xdc,0x03,0x05,0x80,0x00]
-// VI: flat_atomic_add v[3:4], v5 tfe ; encoding: [0x00,0x00,0x08,0xdd,0x03,0x05,0x80,0x00]
-
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
diff --git a/test/MC/AMDGPU/literal16.s b/test/MC/AMDGPU/literal16.s
index e578ce82372f..97d16c374285 100644
--- a/test/MC/AMDGPU/literal16.s
+++ b/test/MC/AMDGPU/literal16.s
@@ -133,16 +133,16 @@ v_add_f16 v1, 65535, v2
// K-constant
v_madmk_f16 v1, v2, 0x4280, v3
-// VI: v_madmk_f16_e32 v1, v2, 0x4280, v3 ; encoding: [0x02,0x07,0x02,0x48,0x80,0x42,0x00,0x00]
+// VI: v_madmk_f16 v1, v2, 0x4280, v3 ; encoding: [0x02,0x07,0x02,0x48,0x80,0x42,0x00,0x00]
v_madmk_f16 v1, v2, 1.0, v3
-// VI: v_madmk_f16_e32 v1, v2, 0x3c00, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x3c,0x00,0x00]
+// VI: v_madmk_f16 v1, v2, 0x3c00, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x3c,0x00,0x00]
v_madmk_f16 v1, v2, 1, v3
-// VI: v_madmk_f16_e32 v1, v2, 0x1, v3 ; encoding: [0x02,0x07,0x02,0x48,0x01,0x00,0x00,0x00]
+// VI: v_madmk_f16 v1, v2, 0x1, v3 ; encoding: [0x02,0x07,0x02,0x48,0x01,0x00,0x00,0x00]
v_madmk_f16 v1, v2, 64.0, v3
-// VI: v_madmk_f16_e32 v1, v2, 0x5400, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x54,0x00,0x00]
+// VI: v_madmk_f16 v1, v2, 0x5400, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x54,0x00,0x00]
v_add_f16_e32 v1, 64.0, v2
diff --git a/test/MC/AMDGPU/vop2.s b/test/MC/AMDGPU/vop2.s
index 078b68638008..79ea38e641a6 100644
--- a/test/MC/AMDGPU/vop2.s
+++ b/test/MC/AMDGPU/vop2.s
@@ -243,31 +243,31 @@ v_or_b32_e32 v1, v2, v3
v_xor_b32_e32 v1, v2, v3
// SICI: v_bfm_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x3c,0xd2,0x02,0x07,0x02,0x00]
-// VI: v_bfm_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_bfm_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
v_bfm_b32_e64 v1, v2, v3
// SICI: v_mac_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x3e]
// VI: v_mac_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x2c]
v_mac_f32_e32 v1, v2, v3
-// SICI: v_madmk_f32_e32 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x40,0x00,0x00,0x80,0x42]
-// VI: v_madmk_f32_e32 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x2e,0x00,0x00,0x80,0x42]
-v_madmk_f32_e32 v1, v2, 64.0, v3
+// SICI: v_madmk_f32 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x40,0x00,0x00,0x80,0x42]
+// VI: v_madmk_f32 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x2e,0x00,0x00,0x80,0x42]
+v_madmk_f32 v1, v2, 64.0, v3
-// SICI: v_madak_f32_e32 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x42,0x00,0x00,0x80,0x42]
-// VI: v_madak_f32_e32 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x30,0x00,0x00,0x80,0x42]
-v_madak_f32_e32 v1, v2, v3, 64.0
+// SICI: v_madak_f32 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x42,0x00,0x00,0x80,0x42]
+// VI: v_madak_f32 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x30,0x00,0x00,0x80,0x42]
+v_madak_f32 v1, v2, v3, 64.0
// SICI: v_bcnt_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x44,0xd2,0x02,0x07,0x02,0x00]
-// VI: v_bcnt_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8b,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_bcnt_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8b,0xd2,0x02,0x07,0x02,0x00]
v_bcnt_u32_b32_e64 v1, v2, v3
// SICI: v_mbcnt_lo_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x46,0xd2,0x02,0x07,0x02,0x00]
-// VI: v_mbcnt_lo_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8c,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_mbcnt_lo_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8c,0xd2,0x02,0x07,0x02,0x00]
v_mbcnt_lo_u32_b32_e64 v1, v2, v3
// SICI: v_mbcnt_hi_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x48,0xd2,0x02,0x07,0x02,0x00]
-// VI: v_mbcnt_hi_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8d,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_mbcnt_hi_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8d,0xd2,0x02,0x07,0x02,0x00]
v_mbcnt_hi_u32_b32_e64 v1, v2, v3
// SICI: v_add_i32_e32 v1, vcc, v2, v3 ; encoding: [0x02,0x07,0x02,0x4a]
@@ -376,31 +376,31 @@ v_subbrev_u32 v1, vcc, v2, v3, vcc
v_subbrev_u32 v1, s[0:1], v2, v3, vcc
// SICI: v_ldexp_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x56]
-// VI: v_ldexp_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x88,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_ldexp_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x88,0xd2,0x02,0x07,0x02,0x00]
v_ldexp_f32 v1, v2, v3
// SICI: v_cvt_pkaccum_u8_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x58]
-// VI: v_cvt_pkaccum_u8_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0xf0,0xd1,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pkaccum_u8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0xf0,0xd1,0x02,0x07,0x02,0x00]
v_cvt_pkaccum_u8_f32 v1, v2, v3
// SICI: v_cvt_pknorm_i16_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x5a]
-// VI: v_cvt_pknorm_i16_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x94,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pknorm_i16_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x94,0xd2,0x02,0x07,0x02,0x00]
v_cvt_pknorm_i16_f32 v1, v2, v3
// SICI: v_cvt_pknorm_u16_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x5c]
-// VI: v_cvt_pknorm_u16_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x95,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pknorm_u16_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x95,0xd2,0x02,0x07,0x02,0x00]
v_cvt_pknorm_u16_f32 v1, v2, v3
// SICI: v_cvt_pkrtz_f16_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x5e]
-// VI: v_cvt_pkrtz_f16_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x96,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pkrtz_f16_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x96,0xd2,0x02,0x07,0x02,0x00]
v_cvt_pkrtz_f16_f32 v1, v2, v3
// SICI: v_cvt_pk_u16_u32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x60,0xd2,0x02,0x07,0x02,0x00]
-// VI: v_cvt_pk_u16_u32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x97,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pk_u16_u32 v1, v2, v3 ; encoding: [0x01,0x00,0x97,0xd2,0x02,0x07,0x02,0x00]
v_cvt_pk_u16_u32_e64 v1, v2, v3
// SICI: v_cvt_pk_i16_i32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x62,0xd2,0x02,0x07,0x02,0x00]
-// VI: v_cvt_pk_i16_i32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x98,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pk_i16_i32 v1, v2, v3 ; encoding: [0x01,0x00,0x98,0xd2,0x02,0x07,0x02,0x00]
v_cvt_pk_i16_i32_e64 v1, v2, v3
// NOSICI: error: instruction not supported on this GPU
@@ -430,12 +430,12 @@ v_mac_f16_e32 v1, v2, v3
// NOSICI: error: instruction not supported on this GPU
// NOSICI: v_madmk_f16 v1, v2, 64.0, v3
-// VI: v_madmk_f16_e32 v1, v2, 0x5400, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x54,0x00,0x00]
+// VI: v_madmk_f16 v1, v2, 0x5400, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x54,0x00,0x00]
v_madmk_f16 v1, v2, 64.0, v3
// NOSICI: error: instruction not supported on this GPU
// NOSICI: v_madak_f16 v1, v2, v3, 64.0
-// VI: v_madak_f16_e32 v1, v2, v3, 0x5400 ; encoding: [0x02,0x07,0x02,0x4a,0x00,0x54,0x00,0x00]
+// VI: v_madak_f16 v1, v2, v3, 0x5400 ; encoding: [0x02,0x07,0x02,0x4a,0x00,0x54,0x00,0x00]
v_madak_f16 v1, v2, v3, 64.0
// NOSICI: error: instruction not supported on this GPU
diff --git a/test/MC/AMDGPU/vop3-convert.s b/test/MC/AMDGPU/vop3-convert.s
index 8bc88a08dda2..781aa672d3c4 100644
--- a/test/MC/AMDGPU/vop3-convert.s
+++ b/test/MC/AMDGPU/vop3-convert.s
@@ -288,31 +288,31 @@ v_or_b32 v1, v2, v3
v_xor_b32 v1, v2, v3
// SICI: v_bfm_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x3c]
-// VI: v_bfm_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_bfm_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
v_bfm_b32 v1, v2, v3
// SICI: v_bcnt_u32_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x44]
-// VI: v_bcnt_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8b,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_bcnt_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8b,0xd2,0x02,0x07,0x02,0x00]
v_bcnt_u32_b32 v1, v2, v3
// SICI: v_mbcnt_lo_u32_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x46]
-// VI: v_mbcnt_lo_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8c,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_mbcnt_lo_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8c,0xd2,0x02,0x07,0x02,0x00]
v_mbcnt_lo_u32_b32 v1, v2, v3
// SICI: v_mbcnt_hi_u32_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x48]
-// VI: v_mbcnt_hi_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8d,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_mbcnt_hi_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8d,0xd2,0x02,0x07,0x02,0x00]
v_mbcnt_hi_u32_b32 v1, v2, v3
// SICI: v_cvt_pk_u16_u32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x60]
-// VI: v_cvt_pk_u16_u32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x97,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pk_u16_u32 v1, v2, v3 ; encoding: [0x01,0x00,0x97,0xd2,0x02,0x07,0x02,0x00]
v_cvt_pk_u16_u32 v1, v2, v3
// SICI: v_cvt_pk_i16_i32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x62]
-// VI: v_cvt_pk_i16_i32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x98,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_cvt_pk_i16_i32 v1, v2, v3 ; encoding: [0x01,0x00,0x98,0xd2,0x02,0x07,0x02,0x00]
v_cvt_pk_i16_i32 v1, v2, v3
// SICI: v_bfm_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x3c]
-// VI: v_bfm_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
+// VI: v_bfm_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
v_bfm_b32 v1, v2, v3
// NOSICI: error: instruction not supported on this GPU
diff --git a/test/MC/AsmParser/altmacro_string_escape.s b/test/MC/AsmParser/altmacro_string_escape.s
new file mode 100644
index 000000000000..bcc9e845953e
--- /dev/null
+++ b/test/MC/AsmParser/altmacro_string_escape.s
@@ -0,0 +1,29 @@
+# RUN: llvm-mc -triple i386-linux-gnu %s| FileCheck %s
+
+.altmacro
+# single-character string escape
+# To include any single character literally in a string
+# (even if the character would otherwise have some special meaning),
+# you can prefix the character with `!'.
+# For example, you can write `<4.3 !> 5.4!!>' to get the literal text `4.3 > 5.4!'.
+
+# CHECK: workForFun:
+.macro fun1 number
+ .if \number=5
+ lableNotWork:
+ .else
+ workForFun:
+ .endif
+.endm
+
+# CHECK: workForFun2:
+.macro fun2 string
+ .if \string
+ workForFun2:
+ .else
+ notworkForFun2:
+ .endif
+.endm
+
+fun1 <5!!>
+fun2 <5!>4>
diff --git a/test/MC/Disassembler/AMDGPU/flat_vi.txt b/test/MC/Disassembler/AMDGPU/flat_vi.txt
index a7013092b493..bcc395078050 100644
--- a/test/MC/Disassembler/AMDGPU/flat_vi.txt
+++ b/test/MC/Disassembler/AMDGPU/flat_vi.txt
@@ -9,39 +9,15 @@
# VI: flat_load_dword v1, v[3:4] glc slc ; encoding: [0x00,0x00,0x53,0xdc,0x03,0x00,0x00,0x01]
0x00 0x00 0x53 0xdc 0x03 0x00 0x00 0x01
-# VI: flat_load_dword v1, v[3:4] glc tfe ; encoding: [0x00,0x00,0x51,0xdc,0x03,0x00,0x80,0x01]
-0x00 0x00 0x51 0xdc 0x03 0x00 0x80 0x01
-
-# VI: flat_load_dword v1, v[3:4] glc slc tfe ; encoding: [0x00,0x00,0x53,0xdc,0x03,0x00,0x80,0x01]
-0x00 0x00 0x53 0xdc 0x03 0x00 0x80 0x01
-
# VI: flat_load_dword v1, v[3:4] slc ; encoding: [0x00,0x00,0x52,0xdc,0x03,0x00,0x00,0x01]
0x00 0x00 0x52 0xdc 0x03 0x00 0x00 0x01
-# VI: flat_load_dword v1, v[3:4] slc tfe ; encoding: [0x00,0x00,0x52,0xdc,0x03,0x00,0x80,0x01]
-0x00 0x00 0x52 0xdc 0x03 0x00 0x80 0x01
-
-# VI: flat_load_dword v1, v[3:4] tfe ; encoding: [0x00,0x00,0x50,0xdc,0x03,0x00,0x80,0x01]
-0x00 0x00 0x50 0xdc 0x03 0x00 0x80 0x01
-
# VI: flat_atomic_add v1, v[3:4], v5 glc slc ; encoding: [0x00,0x00,0x0b,0xdd,0x03,0x05,0x00,0x01]
0x00 0x00 0x0b 0xdd 0x03 0x05 0x00 0x01
-# VI: flat_atomic_add v1, v[3:4], v5 glc tfe ; encoding: [0x00,0x00,0x09,0xdd,0x03,0x05,0x80,0x01]
-0x00 0x00 0x09 0xdd 0x03 0x05 0x80 0x01
-
-# VI: flat_atomic_add v1, v[3:4], v5 glc slc tfe ; encoding: [0x00,0x00,0x0b,0xdd,0x03,0x05,0x80,0x01]
-0x00 0x00 0x0b 0xdd 0x03 0x05 0x80 0x01
-
# VI: flat_atomic_add v[3:4], v5 slc ; encoding: [0x00,0x00,0x0a,0xdd,0x03,0x05,0x00,0x00]
0x00 0x00 0x0a 0xdd 0x03 0x05 0x00 0x00
-# VI: flat_atomic_add v[3:4], v5 slc tfe ; encoding: [0x00,0x00,0x0a,0xdd,0x03,0x05,0x80,0x00]
-0x00 0x00 0x0a 0xdd 0x03 0x05 0x80 0x00
-
-# VI: flat_atomic_add v[3:4], v5 tfe ; encoding: [0x00,0x00,0x08,0xdd,0x03,0x05,0x80,0x00]
-0x00 0x00 0x08 0xdd 0x03 0x05 0x80 0x00
-
# VI: flat_load_ubyte v1, v[3:4] ; encoding: [0x00,0x00,0x40,0xdc,0x03,0x00,0x00,0x01]
0x00 0x00 0x40 0xdc 0x03 0x00 0x00 0x01
diff --git a/test/MC/Disassembler/AMDGPU/literal16_vi.txt b/test/MC/Disassembler/AMDGPU/literal16_vi.txt
index 362e87703694..a3cdae33a4cc 100644
--- a/test/MC/Disassembler/AMDGPU/literal16_vi.txt
+++ b/test/MC/Disassembler/AMDGPU/literal16_vi.txt
@@ -44,11 +44,11 @@
# VI: v_add_f16_e32 v1, 0, v3 ; encoding: [0x80,0x06,0x02,0x3e]
0xff 0x06 0x02 0x3e 0x00 0x00 0x00 0x00
-# VI: v_madmk_f16_e32 v1, v2, 0x41, v3 ; encoding: [0x02,0x07,0x02,0x48,0x41,0x00,0x00,0x00]
+# VI: v_madmk_f16 v1, v2, 0x41, v3 ; encoding: [0x02,0x07,0x02,0x48,0x41,0x00,0x00,0x00]
0x02 0x07 0x02 0x48 0x41 0x00 0x00 0x00
-# VI: v_madmk_f16_e32 v1, v2, 0x10041, v3 ; encoding: [0x02,0x07,0x02,0x48,0x41,0x00,0x01,0x00]
+# VI: v_madmk_f16 v1, v2, 0x10041, v3 ; encoding: [0x02,0x07,0x02,0x48,0x41,0x00,0x01,0x00]
0x02 0x07 0x02 0x48 0x41 0x00 0x01 0x00
-# VI: v_madmk_f16_e32 v1, v2, 0x1000041, v3 ; encoding: [0x02,0x07,0x02,0x48,0x41,0x00,0x00,0x01]
+# VI: v_madmk_f16 v1, v2, 0x1000041, v3 ; encoding: [0x02,0x07,0x02,0x48,0x41,0x00,0x00,0x01]
0x02 0x07 0x02 0x48 0x41 0x00 0x00 0x01
diff --git a/test/MC/Disassembler/AMDGPU/vop2_vi.txt b/test/MC/Disassembler/AMDGPU/vop2_vi.txt
index 4a47c8157971..b6f556bd55be 100644
--- a/test/MC/Disassembler/AMDGPU/vop2_vi.txt
+++ b/test/MC/Disassembler/AMDGPU/vop2_vi.txt
@@ -72,25 +72,25 @@
# VI: v_xor_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x2a]
0x02 0x07 0x02 0x2a
-# VI: v_bfm_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_bfm_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x93,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x93 0xd2 0x02 0x07 0x02 0x00
# VI: v_mac_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x2c]
0x02 0x07 0x02 0x2c
-# VI: v_madmk_f32_e32 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x2e,0x00,0x00,0x80,0x42]
+# VI: v_madmk_f32 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x2e,0x00,0x00,0x80,0x42]
0x02 0x07 0x02 0x2e 0x00 0x00 0x80 0x42
-# VI: v_madak_f32_e32 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x30,0x00,0x00,0x80,0x42]
+# VI: v_madak_f32 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x30,0x00,0x00,0x80,0x42]
0x02 0x07 0x02 0x30 0x00 0x00 0x80 0x42
-# VI: v_bcnt_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8b,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_bcnt_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8b,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x8b 0xd2 0x02 0x07 0x02 0x00
-# VI: v_mbcnt_lo_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8c,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_mbcnt_lo_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8c,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x8c 0xd2 0x02 0x07 0x02 0x00
-# VI: v_mbcnt_hi_u32_b32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x8d,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_mbcnt_hi_u32_b32 v1, v2, v3 ; encoding: [0x01,0x00,0x8d,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x8d 0xd2 0x02 0x07 0x02 0x00
# VI: v_add_i32_e32 v1, vcc, v2, v3 ; encoding: [0x02,0x07,0x02,0x32]
@@ -171,25 +171,25 @@
# VI: v_subbrev_u32_e64 v1, s[0:1], v2, v3, vcc ; encoding: [0x01,0x00,0x1e,0xd1,0x02,0x07,0xaa,0x01]
0x01 0x00 0x1e 0xd1 0x02 0x07 0xaa 0x01
-# VI: v_ldexp_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x88,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_ldexp_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x88,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x88 0xd2 0x02 0x07 0x02 0x00
-# VI: v_cvt_pkaccum_u8_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0xf0,0xd1,0x02,0x07,0x02,0x00]
+# VI: v_cvt_pkaccum_u8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0xf0,0xd1,0x02,0x07,0x02,0x00]
0x01 0x00 0xf0 0xd1 0x02 0x07 0x02 0x00
-# VI: v_cvt_pknorm_i16_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x94,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_cvt_pknorm_i16_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x94,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x94 0xd2 0x02 0x07 0x02 0x00
-# VI: v_cvt_pknorm_u16_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x95,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_cvt_pknorm_u16_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x95,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x95 0xd2 0x02 0x07 0x02 0x00
-# VI: v_cvt_pkrtz_f16_f32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x96,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_cvt_pkrtz_f16_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x96,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x96 0xd2 0x02 0x07 0x02 0x00
-# VI: v_cvt_pk_u16_u32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x97,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_cvt_pk_u16_u32 v1, v2, v3 ; encoding: [0x01,0x00,0x97,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x97 0xd2 0x02 0x07 0x02 0x00
-# VI: v_cvt_pk_i16_i32_e64 v1, v2, v3 ; encoding: [0x01,0x00,0x98,0xd2,0x02,0x07,0x02,0x00]
+# VI: v_cvt_pk_i16_i32 v1, v2, v3 ; encoding: [0x01,0x00,0x98,0xd2,0x02,0x07,0x02,0x00]
0x01 0x00 0x98 0xd2 0x02 0x07 0x02 0x00
# VI: v_add_f16_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x3e]
@@ -207,10 +207,10 @@
# VI: v_mac_f16_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x46]
0x02 0x07 0x02 0x46
-# VI: v_madmk_f16_e32 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x00,0x80,0x42]
+# VI: v_madmk_f16 v1, v2, 0x42800000, v3 ; encoding: [0x02,0x07,0x02,0x48,0x00,0x00,0x80,0x42]
0x02 0x07 0x02 0x48 0x00 0x00 0x80 0x42
-# VI: v_madak_f16_e32 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x4a,0x00,0x00,0x80,0x42]
+# VI: v_madak_f16 v1, v2, v3, 0x42800000 ; encoding: [0x02,0x07,0x02,0x4a,0x00,0x00,0x80,0x42]
0x02 0x07 0x02 0x4a 0x00 0x00 0x80 0x42
# VI: v_add_u16_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x4c]
diff --git a/test/MC/Disassembler/AMDGPU/vop3_vi.txt b/test/MC/Disassembler/AMDGPU/vop3_vi.txt
index c15fbaa1e3a8..a1cc1f06c3cb 100644
--- a/test/MC/Disassembler/AMDGPU/vop3_vi.txt
+++ b/test/MC/Disassembler/AMDGPU/vop3_vi.txt
@@ -81,6 +81,24 @@
# VI: v_clrexcp ; encoding: [0x00,0x00,0x75,0xd1,0x00,0x00,0x00,0x00]
0x00 0x00 0x75 0xd1 0x00 0x00 0x00 0x00
+# VI: v_fract_f64_e64 v[5:6], s[2:3] ; encoding: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x00]
+0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x00
+
+# VI: v_fract_f64_e64 v[5:6], -4.0 ; encoding: [0x05,0x00,0x72,0xd1,0xf7,0x00,0x00,0x00]
+0x05,0x00,0x72,0xd1,0xf7,0x00,0x00,0x00
+
+# VI: v_fract_f64_e64 v[5:6], -s[2:3] ; encoding: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x20]
+0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x20
+
+# VI: v_fract_f64_e64 v[5:6], |s[2:3]| ; encoding: [0x05,0x01,0x72,0xd1,0x02,0x00,0x00,0x00]
+0x05,0x01,0x72,0xd1,0x02,0x00,0x00,0x00
+
+# VI: v_fract_f64_e64 v[5:6], s[2:3] clamp ; encoding: [0x05,0x80,0x72,0xd1,0x02,0x00,0x00,0x00]
+0x05,0x80,0x72,0xd1,0x02,0x00,0x00,0x00
+
+# VI: v_fract_f64_e64 v[5:6], s[2:3] mul:2 ; encoding: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x08]
+0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x08
+
# VI: v_fract_f32_e64 v1, -v2 ; encoding: [0x01,0x00,0x5b,0xd1,0x02,0x01,0x00,0x20]
0x01 0x00 0x5b 0xd1 0x02 0x01 0x00 0x20
diff --git a/test/MC/Disassembler/PowerPC/ppc64-encoding-p9vector.txt b/test/MC/Disassembler/PowerPC/ppc64-encoding-p9vector.txt
new file mode 100644
index 000000000000..1a7964808a44
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-encoding-p9vector.txt
@@ -0,0 +1,4 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64le-unknown-unknown -mcpu=pwr9 | FileCheck %s
+
+# CHECK: mtvsrdd 6, 0, 3
+0x66 0x1b 0xc0 0x7c
diff --git a/test/MC/Disassembler/SystemZ/insns-z13.txt b/test/MC/Disassembler/SystemZ/insns-z13.txt
index 5a983860df1a..4f5ec43f7348 100644
--- a/test/MC/Disassembler/SystemZ/insns-z13.txt
+++ b/test/MC/Disassembler/SystemZ/insns-z13.txt
@@ -2,6 +2,297 @@
# RUN: llvm-mc --disassemble %s -triple=s390x-linux-gnu -mcpu=z13 \
# RUN: | FileCheck %s
+# CHECK: lcbb %r0, 0, 0
+0xe7 0x00 0x00 0x00 0x00 0x27
+
+# CHECK: lcbb %r0, 0, 15
+0xe7 0x00 0x00 0x00 0xf0 0x27
+
+# CHECK: lcbb %r0, 4095, 0
+0xe7 0x00 0x0f 0xff 0x00 0x27
+
+# CHECK: lcbb %r0, 0(%r15), 0
+0xe7 0x00 0xf0 0x00 0x00 0x27
+
+# CHECK: lcbb %r0, 0(%r15,%r1), 0
+0xe7 0x0f 0x10 0x00 0x00 0x27
+
+# CHECK: lcbb %r15, 0, 0
+0xe7 0xf0 0x00 0x00 0x00 0x27
+
+# CHECK: lcbb %r2, 1383(%r3,%r4), 8
+0xe7 0x23 0x45 0x67 0x80 0x27
+
+# CHECK: llzrgf %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x3a
+
+# CHECK: llzrgf %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x3a
+
+# CHECK: llzrgf %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x3a
+
+# CHECK: llzrgf %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x3a
+
+# CHECK: llzrgf %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x3a
+
+# CHECK: llzrgf %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x3a
+
+# CHECK: llzrgf %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x3a
+
+# CHECK: llzrgf %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x3a
+
+# CHECK: llzrgf %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x3a
+
+# CHECK: llzrgf %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x3a
+
+# CHECK: lochi %r11, 42, 0
+0xec 0xb0 0x00 0x2a 0x00 0x42
+
+# CHECK: lochio %r11, 42
+0xec 0xb1 0x00 0x2a 0x00 0x42
+
+# CHECK: lochih %r11, 42
+0xec 0xb2 0x00 0x2a 0x00 0x42
+
+# CHECK: lochinle %r11, 42
+0xec 0xb3 0x00 0x2a 0x00 0x42
+
+# CHECK: lochil %r11, -1
+0xec 0xb4 0xff 0xff 0x00 0x42
+
+# CHECK: lochinhe %r11, 42
+0xec 0xb5 0x00 0x2a 0x00 0x42
+
+# CHECK: lochilh %r11, -1
+0xec 0xb6 0xff 0xff 0x00 0x42
+
+# CHECK: lochine %r11, 0
+0xec 0xb7 0x00 0x00 0x00 0x42
+
+# CHECK: lochie %r11, 0
+0xec 0xb8 0x00 0x00 0x00 0x42
+
+# CHECK: lochinlh %r11, 42
+0xec 0xb9 0x00 0x2a 0x00 0x42
+
+# CHECK: lochihe %r11, 255
+0xec 0xba 0x00 0xff 0x00 0x42
+
+# CHECK: lochinl %r11, 255
+0xec 0xbb 0x00 0xff 0x00 0x42
+
+# CHECK: lochile %r11, 32767
+0xec 0xbc 0x7f 0xff 0x00 0x42
+
+# CHECK: lochinh %r11, 32767
+0xec 0xbd 0x7f 0xff 0x00 0x42
+
+# CHECK: lochino %r11, 32512
+0xec 0xbe 0x7f 0x00 0x00 0x42
+
+# CHECK: lochi %r11, 32512, 15
+0xec 0xbf 0x7f 0x00 0x00 0x42
+
+# CHECK: locghi %r11, 42, 0
+0xec 0xb0 0x00 0x2a 0x00 0x46
+
+# CHECK: locghio %r11, 42
+0xec 0xb1 0x00 0x2a 0x00 0x46
+
+# CHECK: locghih %r11, 42
+0xec 0xb2 0x00 0x2a 0x00 0x46
+
+# CHECK: locghinle %r11, 42
+0xec 0xb3 0x00 0x2a 0x00 0x46
+
+# CHECK: locghil %r11, -1
+0xec 0xb4 0xff 0xff 0x00 0x46
+
+# CHECK: locghinhe %r11, 42
+0xec 0xb5 0x00 0x2a 0x00 0x46
+
+# CHECK: locghilh %r11, -1
+0xec 0xb6 0xff 0xff 0x00 0x46
+
+# CHECK: locghine %r11, 0
+0xec 0xb7 0x00 0x00 0x00 0x46
+
+# CHECK: locghie %r11, 0
+0xec 0xb8 0x00 0x00 0x00 0x46
+
+# CHECK: locghinlh %r11, 42
+0xec 0xb9 0x00 0x2a 0x00 0x46
+
+# CHECK: locghihe %r11, 255
+0xec 0xba 0x00 0xff 0x00 0x46
+
+# CHECK: locghinl %r11, 255
+0xec 0xbb 0x00 0xff 0x00 0x46
+
+# CHECK: locghile %r11, 32767
+0xec 0xbc 0x7f 0xff 0x00 0x46
+
+# CHECK: locghinh %r11, 32767
+0xec 0xbd 0x7f 0xff 0x00 0x46
+
+# CHECK: locghino %r11, 32512
+0xec 0xbe 0x7f 0x00 0x00 0x46
+
+# CHECK: locghi %r11, 32512, 15
+0xec 0xbf 0x7f 0x00 0x00 0x46
+
+# CHECK: lochhi %r11, 42, 0
+0xec 0xb0 0x00 0x2a 0x00 0x4e
+
+# CHECK: lochhio %r11, 42
+0xec 0xb1 0x00 0x2a 0x00 0x4e
+
+# CHECK: lochhih %r11, 42
+0xec 0xb2 0x00 0x2a 0x00 0x4e
+
+# CHECK: lochhinle %r11, 42
+0xec 0xb3 0x00 0x2a 0x00 0x4e
+
+# CHECK: lochhil %r11, -1
+0xec 0xb4 0xff 0xff 0x00 0x4e
+
+# CHECK: lochhinhe %r11, 42
+0xec 0xb5 0x00 0x2a 0x00 0x4e
+
+# CHECK: lochhilh %r11, -1
+0xec 0xb6 0xff 0xff 0x00 0x4e
+
+# CHECK: lochhine %r11, 0
+0xec 0xb7 0x00 0x00 0x00 0x4e
+
+# CHECK: lochhie %r11, 0
+0xec 0xb8 0x00 0x00 0x00 0x4e
+
+# CHECK: lochhinlh %r11, 42
+0xec 0xb9 0x00 0x2a 0x00 0x4e
+
+# CHECK: lochhihe %r11, 255
+0xec 0xba 0x00 0xff 0x00 0x4e
+
+# CHECK: lochhinl %r11, 255
+0xec 0xbb 0x00 0xff 0x00 0x4e
+
+# CHECK: lochhile %r11, 32767
+0xec 0xbc 0x7f 0xff 0x00 0x4e
+
+# CHECK: lochhinh %r11, 32767
+0xec 0xbd 0x7f 0xff 0x00 0x4e
+
+# CHECK: lochhino %r11, 32512
+0xec 0xbe 0x7f 0x00 0x00 0x4e
+
+# CHECK: lochhi %r11, 32512, 15
+0xec 0xbf 0x7f 0x00 0x00 0x4e
+
+# CHECK: locfh %r7, 6399(%r8), 0
+0xeb 0x70 0x88 0xff 0x01 0xe0
+
+# CHECK: locfho %r7, 6399(%r8)
+0xeb 0x71 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhh %r7, 6399(%r8)
+0xeb 0x72 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhnle %r7, 6399(%r8)
+0xeb 0x73 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhl %r7, 6399(%r8)
+0xeb 0x74 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhnhe %r7, 6399(%r8)
+0xeb 0x75 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhlh %r7, 6399(%r8)
+0xeb 0x76 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhne %r7, 6399(%r8)
+0xeb 0x77 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhe %r7, 6399(%r8)
+0xeb 0x78 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhnlh %r7, 6399(%r8)
+0xeb 0x79 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhhe %r7, 6399(%r8)
+0xeb 0x7a 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhnl %r7, 6399(%r8)
+0xeb 0x7b 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhle %r7, 6399(%r8)
+0xeb 0x7c 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhnh %r7, 6399(%r8)
+0xeb 0x7d 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhno %r7, 6399(%r8)
+0xeb 0x7e 0x88 0xff 0x01 0xe0
+
+# CHECK: locfh %r7, 6399(%r8), 15
+0xeb 0x7f 0x88 0xff 0x01 0xe0
+
+# CHECK: locfhr %r11, %r3, 0
+0xb9 0xe0 0x00 0xb3
+
+# CHECK: locfhro %r11, %r3
+0xb9 0xe0 0x10 0xb3
+
+# CHECK: locfhrh %r11, %r3
+0xb9 0xe0 0x20 0xb3
+
+# CHECK: locfhrnle %r11, %r3
+0xb9 0xe0 0x30 0xb3
+
+# CHECK: locfhrl %r11, %r3
+0xb9 0xe0 0x40 0xb3
+
+# CHECK: locfhrnhe %r11, %r3
+0xb9 0xe0 0x50 0xb3
+
+# CHECK: locfhrlh %r11, %r3
+0xb9 0xe0 0x60 0xb3
+
+# CHECK: locfhrne %r11, %r3
+0xb9 0xe0 0x70 0xb3
+
+# CHECK: locfhre %r11, %r3
+0xb9 0xe0 0x80 0xb3
+
+# CHECK: locfhrnlh %r11, %r3
+0xb9 0xe0 0x90 0xb3
+
+# CHECK: locfhrhe %r11, %r3
+0xb9 0xe0 0xa0 0xb3
+
+# CHECK: locfhrnl %r11, %r3
+0xb9 0xe0 0xb0 0xb3
+
+# CHECK: locfhrle %r11, %r3
+0xb9 0xe0 0xc0 0xb3
+
+# CHECK: locfhrnh %r11, %r3
+0xb9 0xe0 0xd0 0xb3
+
+# CHECK: locfhrno %r11, %r3
+0xb9 0xe0 0xe0 0xb3
+
+# CHECK: locfhr %r11, %r3, 15
+0xb9 0xe0 0xf0 0xb3
+
# CHECK: lzrf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x3b
@@ -62,4548 +353,4299 @@
# CHECK: lzrg %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x2a
-# CHECK: llzrgf %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x3a
+# CHECK: ppno %r2, %r10
+0xb9 0x3c 0x00 0x2a
-# CHECK: llzrgf %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x3a
+# CHECK: ppno %r2, %r14
+0xb9 0x3c 0x00 0x2e
-# CHECK: llzrgf %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x3a
+# CHECK: ppno %r14, %r2
+0xb9 0x3c 0x00 0xe2
-# CHECK: llzrgf %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x3a
+# CHECK: ppno %r14, %r10
+0xb9 0x3c 0x00 0xea
-# CHECK: llzrgf %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x3a
+# CHECK: stocfh %r0, 0, 0
+0xeb 0x00 0x00 0x00 0x00 0xe1
-# CHECK: llzrgf %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x3a
+# CHECK: stocfh %r0, 0, 15
+0xeb 0x0f 0x00 0x00 0x00 0xe1
-# CHECK: llzrgf %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x3a
+# CHECK: stocfh %r0, -524288, 0
+0xeb 0x00 0x00 0x00 0x80 0xe1
-# CHECK: llzrgf %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x3a
+# CHECK: stocfh %r0, 524287, 0
+0xeb 0x00 0x0f 0xff 0x7f 0xe1
-# CHECK: llzrgf %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x3a
+# CHECK: stocfh %r0, 0(%r1), 0
+0xeb 0x00 0x10 0x00 0x00 0xe1
-# CHECK: llzrgf %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x3a
+# CHECK: stocfh %r0, 0(%r15), 0
+0xeb 0x00 0xf0 0x00 0x00 0xe1
-#CHECK: lcbb %r0, 0, 0
-0xe7 0x00 0x00 0x00 0x00 0x27
+# CHECK: stocfh %r15, 0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xe1
+
+# CHECK: stocfho %r1, 2(%r3)
+0xeb 0x11 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhh %r1, 2(%r3)
+0xeb 0x12 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhnle %r1, 2(%r3)
+0xeb 0x13 0x30 0x02 0x00 0xe1
-#CHECK: lcbb %r1, 2475(%r7,%r8), 12
-0xe7 0x17 0x89 0xab 0xc0 0x27
+# CHECK: stocfhl %r1, 2(%r3)
+0xeb 0x14 0x30 0x02 0x00 0xe1
-#CHECK: lcbb %r15, 4095(%r15,%r15), 15
-0xe7 0xff 0xff 0xff 0xf0 0x27
+# CHECK: stocfhnhe %r1, 2(%r3)
+0xeb 0x15 0x30 0x02 0x00 0xe1
-#CHECK: va %v0, %v0, %v0, 11
+# CHECK: stocfhlh %r1, 2(%r3)
+0xeb 0x16 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhne %r1, 2(%r3)
+0xeb 0x17 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhe %r1, 2(%r3)
+0xeb 0x18 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhnlh %r1, 2(%r3)
+0xeb 0x19 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhhe %r1, 2(%r3)
+0xeb 0x1a 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhnl %r1, 2(%r3)
+0xeb 0x1b 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhle %r1, 2(%r3)
+0xeb 0x1c 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhnh %r1, 2(%r3)
+0xeb 0x1d 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfhno %r1, 2(%r3)
+0xeb 0x1e 0x30 0x02 0x00 0xe1
+
+# CHECK: stocfh %r1, 2(%r3), 15
+0xeb 0x1f 0x30 0x02 0x00 0xe1
+
+# CHECK: va %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xf3
-#CHECK: va %v18, %v3, %v20, 11
+# CHECK: va %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xf3
-#CHECK: va %v31, %v31, %v31, 11
+# CHECK: va %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xf3
-#CHECK: vab %v0, %v0, %v0
+# CHECK: vab %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf3
-#CHECK: vab %v18, %v3, %v20
+# CHECK: vab %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf3
-#CHECK: vab %v31, %v31, %v31
+# CHECK: vab %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf3
-#CHECK: vacc %v0, %v0, %v0, 11
+# CHECK: vac %v0, %v0, %v0, %v0, 11
+0xe7 0x00 0x0b 0x00 0x00 0xbb
+
+# CHECK: vac %v3, %v20, %v5, %v22, 11
+0xe7 0x34 0x5b 0x00 0x65 0xbb
+
+# CHECK: vac %v31, %v31, %v31, %v31, 11
+0xe7 0xff 0xfb 0x00 0xff 0xbb
+
+# CHECK: vacc %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xf1
-#CHECK: vacc %v18, %v3, %v20, 11
+# CHECK: vacc %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xf1
-#CHECK: vacc %v31, %v31, %v31, 11
+# CHECK: vacc %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xf1
-#CHECK: vaccb %v0, %v0, %v0
+# CHECK: vaccb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf1
-#CHECK: vaccb %v18, %v3, %v20
+# CHECK: vaccb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf1
-#CHECK: vaccb %v31, %v31, %v31
+# CHECK: vaccb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf1
-#CHECK: vaccc %v0, %v0, %v0, %v0, 11
+# CHECK: vaccc %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xb9
-#CHECK: vaccc %v3, %v20, %v5, %v22, 11
+# CHECK: vaccc %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xb9
-#CHECK: vaccc %v31, %v31, %v31, %v31, 11
+# CHECK: vaccc %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xb9
-#CHECK: vacccq %v0, %v0, %v0, %v0
+# CHECK: vacccq %v0, %v0, %v0, %v0
0xe7 0x00 0x04 0x00 0x00 0xb9
-#CHECK: vacccq %v3, %v20, %v5, %v22
+# CHECK: vacccq %v3, %v20, %v5, %v22
0xe7 0x34 0x54 0x00 0x65 0xb9
-#CHECK: vacccq %v31, %v31, %v31, %v31
+# CHECK: vacccq %v31, %v31, %v31, %v31
0xe7 0xff 0xf4 0x00 0xff 0xb9
-#CHECK: vaccf %v0, %v0, %v0
+# CHECK: vaccf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf1
-#CHECK: vaccf %v18, %v3, %v20
+# CHECK: vaccf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf1
-#CHECK: vaccf %v31, %v31, %v31
+# CHECK: vaccf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf1
-#CHECK: vaccg %v0, %v0, %v0
+# CHECK: vaccg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf1
-#CHECK: vaccg %v18, %v3, %v20
+# CHECK: vaccg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf1
-#CHECK: vaccg %v31, %v31, %v31
+# CHECK: vaccg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf1
-#CHECK: vacch %v0, %v0, %v0
+# CHECK: vacch %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf1
-#CHECK: vacch %v18, %v3, %v20
+# CHECK: vacch %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf1
-#CHECK: vacch %v31, %v31, %v31
+# CHECK: vacch %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf1
-#CHECK: vaccq %v0, %v0, %v0
+# CHECK: vaccq %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x40 0xf1
-#CHECK: vaccq %v18, %v3, %v20
+# CHECK: vaccq %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x4a 0xf1
-#CHECK: vaccq %v31, %v31, %v31
+# CHECK: vaccq %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x4e 0xf1
-#CHECK: vac %v0, %v0, %v0, %v0, 11
-0xe7 0x00 0x0b 0x00 0x00 0xbb
-
-#CHECK: vac %v3, %v20, %v5, %v22, 11
-0xe7 0x34 0x5b 0x00 0x65 0xbb
-
-#CHECK: vac %v31, %v31, %v31, %v31, 11
-0xe7 0xff 0xfb 0x00 0xff 0xbb
-
-#CHECK: vacq %v0, %v0, %v0, %v0
+# CHECK: vacq %v0, %v0, %v0, %v0
0xe7 0x00 0x04 0x00 0x00 0xbb
-#CHECK: vacq %v3, %v20, %v5, %v22
+# CHECK: vacq %v3, %v20, %v5, %v22
0xe7 0x34 0x54 0x00 0x65 0xbb
-#CHECK: vacq %v31, %v31, %v31, %v31
+# CHECK: vacq %v31, %v31, %v31, %v31
0xe7 0xff 0xf4 0x00 0xff 0xbb
-#CHECK: vaf %v0, %v0, %v0
+# CHECK: vaf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf3
-#CHECK: vaf %v18, %v3, %v20
+# CHECK: vaf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf3
-#CHECK: vaf %v31, %v31, %v31
+# CHECK: vaf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf3
-#CHECK: vag %v0, %v0, %v0
+# CHECK: vag %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf3
-#CHECK: vag %v18, %v3, %v20
+# CHECK: vag %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf3
-#CHECK: vag %v31, %v31, %v31
+# CHECK: vag %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf3
-#CHECK: vah %v0, %v0, %v0
+# CHECK: vah %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf3
-#CHECK: vah %v18, %v3, %v20
+# CHECK: vah %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf3
-#CHECK: vah %v31, %v31, %v31
+# CHECK: vah %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf3
-#CHECK: vaq %v0, %v0, %v0
+# CHECK: vaq %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x40 0xf3
-#CHECK: vaq %v18, %v3, %v20
+# CHECK: vaq %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x4a 0xf3
-#CHECK: vaq %v31, %v31, %v31
+# CHECK: vaq %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x4e 0xf3
-#CHECK: vavg %v0, %v0, %v0, 11
+# CHECK: vavg %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xf2
-#CHECK: vavg %v18, %v3, %v20, 11
+# CHECK: vavg %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xf2
-#CHECK: vavg %v31, %v31, %v31, 11
+# CHECK: vavg %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xf2
-#CHECK: vavgb %v0, %v0, %v0
+# CHECK: vavgb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf2
-#CHECK: vavgb %v18, %v3, %v20
+# CHECK: vavgb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf2
-#CHECK: vavgb %v31, %v31, %v31
+# CHECK: vavgb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf2
-#CHECK: vavgf %v0, %v0, %v0
+# CHECK: vavgf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf2
-#CHECK: vavgf %v18, %v3, %v20
+# CHECK: vavgf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf2
-#CHECK: vavgf %v31, %v31, %v31
+# CHECK: vavgf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf2
-#CHECK: vavgg %v0, %v0, %v0
+# CHECK: vavgg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf2
-#CHECK: vavgg %v18, %v3, %v20
+# CHECK: vavgg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf2
-#CHECK: vavgg %v31, %v31, %v31
+# CHECK: vavgg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf2
-#CHECK: vavgh %v0, %v0, %v0
+# CHECK: vavgh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf2
-#CHECK: vavgh %v18, %v3, %v20
+# CHECK: vavgh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf2
-#CHECK: vavgh %v31, %v31, %v31
+# CHECK: vavgh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf2
-#CHECK: vavgl %v0, %v0, %v0, 11
+# CHECK: vavgl %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xf0
-#CHECK: vavgl %v18, %v3, %v20, 11
+# CHECK: vavgl %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xf0
-#CHECK: vavgl %v31, %v31, %v31, 11
+# CHECK: vavgl %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xf0
-#CHECK: vavglb %v0, %v0, %v0
+# CHECK: vavglb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf0
-#CHECK: vavglb %v18, %v3, %v20
+# CHECK: vavglb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf0
-#CHECK: vavglb %v31, %v31, %v31
+# CHECK: vavglb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf0
-#CHECK: vavglf %v0, %v0, %v0
+# CHECK: vavglf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf0
-#CHECK: vavglf %v18, %v3, %v20
+# CHECK: vavglf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf0
-#CHECK: vavglf %v31, %v31, %v31
+# CHECK: vavglf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf0
-#CHECK: vavglg %v0, %v0, %v0
+# CHECK: vavglg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf0
-#CHECK: vavglg %v18, %v3, %v20
+# CHECK: vavglg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf0
-#CHECK: vavglg %v31, %v31, %v31
+# CHECK: vavglg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf0
-#CHECK: vavglh %v0, %v0, %v0
+# CHECK: vavglh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf0
-#CHECK: vavglh %v18, %v3, %v20
+# CHECK: vavglh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf0
-#CHECK: vavglh %v31, %v31, %v31
+# CHECK: vavglh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf0
-#CHECK: vcdg %v0, %v0, 11, 0, 0
+# CHECK: vcdg %v0, %v0, 11, 0, 0
0xe7 0x00 0x00 0x00 0xb0 0xc3
-#CHECK: vcdg %v19, %v14, 11, 4, 10
+# CHECK: vcdg %v19, %v14, 11, 4, 10
0xe7 0x3e 0x00 0xa4 0xb8 0xc3
-#CHECK: vcdg %v31, %v31, 11, 7, 15
+# CHECK: vcdg %v31, %v31, 11, 7, 15
0xe7 0xff 0x00 0xf7 0xbc 0xc3
-#CHECK: vcdgb %v0, %v0, 0, 0
+# CHECK: vcdgb %v0, %v0, 0, 0
0xe7 0x00 0x00 0x00 0x30 0xc3
-#CHECK: vcdgb %v19, %v14, 4, 10
+# CHECK: vcdgb %v19, %v14, 4, 10
0xe7 0x3e 0x00 0xa4 0x38 0xc3
-#CHECK: vcdgb %v31, %v31, 7, 15
+# CHECK: vcdgb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xf7 0x3c 0xc3
-#CHECK: vcdlg %v0, %v0, 11, 0, 0
+# CHECK: vcdlg %v0, %v0, 11, 0, 0
0xe7 0x00 0x00 0x00 0xb0 0xc1
-#CHECK: vcdlg %v19, %v14, 11, 4, 10
+# CHECK: vcdlg %v19, %v14, 11, 4, 10
0xe7 0x3e 0x00 0xa4 0xb8 0xc1
-#CHECK: vcdlg %v31, %v31, 11, 7, 15
+# CHECK: vcdlg %v31, %v31, 11, 7, 15
0xe7 0xff 0x00 0xf7 0xbc 0xc1
-#CHECK: vcdlgb %v0, %v0, 0, 0
+# CHECK: vcdlgb %v0, %v0, 0, 0
0xe7 0x00 0x00 0x00 0x30 0xc1
-#CHECK: vcdlgb %v19, %v14, 4, 10
+# CHECK: vcdlgb %v19, %v14, 4, 10
0xe7 0x3e 0x00 0xa4 0x38 0xc1
-#CHECK: vcdlgb %v31, %v31, 7, 15
+# CHECK: vcdlgb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xf7 0x3c 0xc1
-#CHECK: vceq %v0, %v0, %v0, 11, 9
+# CHECK: vceq %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x90 0xb0 0xf8
-#CHECK: vceq %v18, %v3, %v20, 11, 9
+# CHECK: vceq %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x90 0xba 0xf8
-#CHECK: vceq %v7, %v24, %v9, 11, 9
+# CHECK: vceq %v7, %v24, %v9, 11, 9
0xe7 0x78 0x90 0x90 0xb4 0xf8
-#CHECK: vceq %v31, %v31, %v31, 11, 9
+# CHECK: vceq %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x90 0xbe 0xf8
-#CHECK: vceqb %v0, %v0, %v0
+# CHECK: vceqb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf8
-#CHECK: vceqb %v18, %v3, %v20
+# CHECK: vceqb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf8
-#CHECK: vceqbs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x04 0xf8
-
-#CHECK: vceqb %v31, %v31, %v31
+# CHECK: vceqb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf8
-#CHECK: vceqf %v0, %v0, %v0
+# CHECK: vceqbs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x04 0xf8
+
+# CHECK: vceqf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf8
-#CHECK: vceqf %v18, %v3, %v20
+# CHECK: vceqf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf8
-#CHECK: vceqfs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x24 0xf8
-
-#CHECK: vceqf %v31, %v31, %v31
+# CHECK: vceqf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf8
-#CHECK: vceqg %v0, %v0, %v0
+# CHECK: vceqfs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x24 0xf8
+
+# CHECK: vceqg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf8
-#CHECK: vceqg %v18, %v3, %v20
+# CHECK: vceqg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf8
-#CHECK: vceqgs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x34 0xf8
-
-#CHECK: vceqg %v31, %v31, %v31
+# CHECK: vceqg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf8
-#CHECK: vceqh %v0, %v0, %v0
+# CHECK: vceqgs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x34 0xf8
+
+# CHECK: vceqh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf8
-#CHECK: vceqh %v18, %v3, %v20
+# CHECK: vceqh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf8
-#CHECK: vceqhs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x14 0xf8
-
-#CHECK: vceqh %v31, %v31, %v31
+# CHECK: vceqh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf8
-#CHECK: vcgd %v0, %v0, 11, 0, 0
+# CHECK: vceqhs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x14 0xf8
+
+# CHECK: vcgd %v0, %v0, 11, 0, 0
0xe7 0x00 0x00 0x00 0xb0 0xc2
-#CHECK: vcgd %v19, %v14, 11, 4, 10
+# CHECK: vcgd %v19, %v14, 11, 4, 10
0xe7 0x3e 0x00 0xa4 0xb8 0xc2
-#CHECK: vcgd %v31, %v31, 11, 7, 15
+# CHECK: vcgd %v31, %v31, 11, 7, 15
0xe7 0xff 0x00 0xf7 0xbc 0xc2
-#CHECK: vcgdb %v0, %v0, 0, 0
+# CHECK: vcgdb %v0, %v0, 0, 0
0xe7 0x00 0x00 0x00 0x30 0xc2
-#CHECK: vcgdb %v19, %v14, 4, 10
+# CHECK: vcgdb %v19, %v14, 4, 10
0xe7 0x3e 0x00 0xa4 0x38 0xc2
-#CHECK: vcgdb %v31, %v31, 7, 15
+# CHECK: vcgdb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xf7 0x3c 0xc2
-#CHECK: vch %v0, %v0, %v0, 11, 9
+# CHECK: vch %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x90 0xb0 0xfb
-#CHECK: vch %v18, %v3, %v20, 11, 9
+# CHECK: vch %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x90 0xba 0xfb
-#CHECK: vch %v7, %v24, %v9, 11, 9
+# CHECK: vch %v7, %v24, %v9, 11, 9
0xe7 0x78 0x90 0x90 0xb4 0xfb
-#CHECK: vch %v31, %v31, %v31, 11, 9
+# CHECK: vch %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x90 0xbe 0xfb
-#CHECK: vchb %v0, %v0, %v0
+# CHECK: vchb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xfb
-#CHECK: vchb %v18, %v3, %v20
+# CHECK: vchb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xfb
-#CHECK: vchbs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x04 0xfb
-
-#CHECK: vchb %v31, %v31, %v31
+# CHECK: vchb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xfb
-#CHECK: vchf %v0, %v0, %v0
+# CHECK: vchbs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x04 0xfb
+
+# CHECK: vchf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xfb
-#CHECK: vchf %v18, %v3, %v20
+# CHECK: vchf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xfb
-#CHECK: vchfs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x24 0xfb
-
-#CHECK: vchf %v31, %v31, %v31
+# CHECK: vchf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xfb
-#CHECK: vchg %v0, %v0, %v0
+# CHECK: vchfs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x24 0xfb
+
+# CHECK: vchg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xfb
-#CHECK: vchg %v18, %v3, %v20
+# CHECK: vchg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xfb
-#CHECK: vchgs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x34 0xfb
-
-#CHECK: vchg %v31, %v31, %v31
+# CHECK: vchg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xfb
-#CHECK: vchh %v0, %v0, %v0
+# CHECK: vchgs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x34 0xfb
+
+# CHECK: vchh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xfb
-#CHECK: vchh %v18, %v3, %v20
+# CHECK: vchh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xfb
-#CHECK: vchhs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x14 0xfb
-
-#CHECK: vchh %v31, %v31, %v31
+# CHECK: vchh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xfb
-#CHECK: vchl %v0, %v0, %v0, 11, 9
+# CHECK: vchhs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x14 0xfb
+
+# CHECK: vchl %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x90 0xb0 0xf9
-#CHECK: vchl %v18, %v3, %v20, 11, 9
+# CHECK: vchl %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x90 0xba 0xf9
-#CHECK: vchl %v7, %v24, %v9, 11, 9
+# CHECK: vchl %v7, %v24, %v9, 11, 9
0xe7 0x78 0x90 0x90 0xb4 0xf9
-#CHECK: vchl %v31, %v31, %v31, 11, 9
+# CHECK: vchl %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x90 0xbe 0xf9
-#CHECK: vchlb %v0, %v0, %v0
+# CHECK: vchlb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf9
-#CHECK: vchlb %v18, %v3, %v20
+# CHECK: vchlb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf9
-#CHECK: vchlbs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x04 0xf9
-
-#CHECK: vchlb %v31, %v31, %v31
+# CHECK: vchlb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf9
-#CHECK: vchlf %v0, %v0, %v0
+# CHECK: vchlbs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x04 0xf9
+
+# CHECK: vchlf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf9
-#CHECK: vchlf %v18, %v3, %v20
+# CHECK: vchlf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf9
-#CHECK: vchlfs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x24 0xf9
-
-#CHECK: vchlf %v31, %v31, %v31
+# CHECK: vchlf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf9
-#CHECK: vchlg %v0, %v0, %v0
+# CHECK: vchlfs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x24 0xf9
+
+# CHECK: vchlg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf9
-#CHECK: vchlg %v18, %v3, %v20
+# CHECK: vchlg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf9
-#CHECK: vchlgs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x34 0xf9
-
-#CHECK: vchlg %v31, %v31, %v31
+# CHECK: vchlg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf9
-#CHECK: vchlh %v0, %v0, %v0
+# CHECK: vchlgs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x34 0xf9
+
+# CHECK: vchlh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf9
-#CHECK: vchlh %v18, %v3, %v20
+# CHECK: vchlh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf9
-#CHECK: vchlhs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x14 0xf9
-
-#CHECK: vchlh %v31, %v31, %v31
+# CHECK: vchlh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf9
-#CHECK: vcksm %v0, %v0, %v0
+# CHECK: vchlhs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x14 0xf9
+
+# CHECK: vcksm %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x66
-#CHECK: vcksm %v18, %v3, %v20
+# CHECK: vcksm %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x66
-#CHECK: vcksm %v31, %v31, %v31
+# CHECK: vcksm %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x66
-#CHECK: vclgd %v0, %v0, 11, 0, 0
+# CHECK: vclgd %v0, %v0, 11, 0, 0
0xe7 0x00 0x00 0x00 0xb0 0xc0
-#CHECK: vclgd %v19, %v14, 11, 4, 10
+# CHECK: vclgd %v19, %v14, 11, 4, 10
0xe7 0x3e 0x00 0xa4 0xb8 0xc0
-#CHECK: vclgd %v31, %v31, 11, 7, 15
+# CHECK: vclgd %v31, %v31, 11, 7, 15
0xe7 0xff 0x00 0xf7 0xbc 0xc0
-#CHECK: vclgdb %v0, %v0, 0, 0
+# CHECK: vclgdb %v0, %v0, 0, 0
0xe7 0x00 0x00 0x00 0x30 0xc0
-#CHECK: vclgdb %v19, %v14, 4, 10
+# CHECK: vclgdb %v19, %v14, 4, 10
0xe7 0x3e 0x00 0xa4 0x38 0xc0
-#CHECK: vclgdb %v31, %v31, 7, 15
+# CHECK: vclgdb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xf7 0x3c 0xc0
-#CHECK: vclz %v0, %v0, 11
+# CHECK: vclz %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x53
-#CHECK: vclz %v19, %v14, 11
+# CHECK: vclz %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0x53
-#CHECK: vclz %v31, %v31, 11
+# CHECK: vclz %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0x53
-#CHECK: vclzb %v0, %v0
+# CHECK: vclzb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x53
-#CHECK: vclzb %v19, %v14
+# CHECK: vclzb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0x53
-#CHECK: vclzb %v31, %v31
+# CHECK: vclzb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0x53
-#CHECK: vclzf %v0, %v0
+# CHECK: vclzf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x53
-#CHECK: vclzf %v19, %v14
+# CHECK: vclzf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0x53
-#CHECK: vclzf %v31, %v31
+# CHECK: vclzf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0x53
-#CHECK: vclzg %v0, %v0
+# CHECK: vclzg %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x53
-#CHECK: vclzg %v19, %v14
+# CHECK: vclzg %v19, %v14
0xe7 0x3e 0x00 0x00 0x38 0x53
-#CHECK: vclzg %v31, %v31
+# CHECK: vclzg %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0x53
-#CHECK: vclzh %v0, %v0
+# CHECK: vclzh %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x53
-#CHECK: vclzh %v19, %v14
+# CHECK: vclzh %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0x53
-#CHECK: vclzh %v31, %v31
+# CHECK: vclzh %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0x53
-#CHECK: vctz %v0, %v0, 11
+# CHECK: vctz %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x52
-#CHECK: vctz %v19, %v14, 11
+# CHECK: vctz %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0x52
-#CHECK: vctz %v31, %v31, 11
+# CHECK: vctz %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0x52
-#CHECK: vctzb %v0, %v0
+# CHECK: vctzb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x52
-#CHECK: vctzb %v19, %v14
+# CHECK: vctzb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0x52
-#CHECK: vctzb %v31, %v31
+# CHECK: vctzb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0x52
-#CHECK: vctzf %v0, %v0
+# CHECK: vctzf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x52
-#CHECK: vctzf %v19, %v14
+# CHECK: vctzf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0x52
-#CHECK: vctzf %v31, %v31
+# CHECK: vctzf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0x52
-#CHECK: vctzg %v0, %v0
+# CHECK: vctzg %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x52
-#CHECK: vctzg %v19, %v14
+# CHECK: vctzg %v19, %v14
0xe7 0x3e 0x00 0x00 0x38 0x52
-#CHECK: vctzg %v31, %v31
+# CHECK: vctzg %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0x52
-#CHECK: vctzh %v0, %v0
+# CHECK: vctzh %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x52
-#CHECK: vctzh %v19, %v14
+# CHECK: vctzh %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0x52
-#CHECK: vctzh %v31, %v31
+# CHECK: vctzh %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0x52
-#CHECK: vec %v0, %v0, 11
+# CHECK: vec %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xdb
-#CHECK: vec %v19, %v14, 11
+# CHECK: vec %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0xdb
-#CHECK: vec %v31, %v31, 11
+# CHECK: vec %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0xdb
-#CHECK: vecb %v0, %v0
+# CHECK: vecb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xdb
-#CHECK: vecb %v19, %v14
+# CHECK: vecb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xdb
-#CHECK: vecb %v31, %v31
+# CHECK: vecb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xdb
-#CHECK: vecf %v0, %v0
+# CHECK: vecf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xdb
-#CHECK: vecf %v19, %v14
+# CHECK: vecf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xdb
-#CHECK: vecf %v31, %v31
+# CHECK: vecf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xdb
-#CHECK: vecg %v0, %v0
+# CHECK: vecg %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xdb
-#CHECK: vecg %v19, %v14
+# CHECK: vecg %v19, %v14
0xe7 0x3e 0x00 0x00 0x38 0xdb
-#CHECK: vecg %v31, %v31
+# CHECK: vecg %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0xdb
-#CHECK: vech %v0, %v0
+# CHECK: vech %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xdb
-#CHECK: vech %v19, %v14
+# CHECK: vech %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xdb
-#CHECK: vech %v31, %v31
+# CHECK: vech %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xdb
-#CHECK: vecl %v0, %v0, 11
+# CHECK: vecl %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xd9
-#CHECK: vecl %v19, %v14, 11
+# CHECK: vecl %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0xd9
-#CHECK: vecl %v31, %v31, 11
+# CHECK: vecl %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0xd9
-#CHECK: veclb %v0, %v0
+# CHECK: veclb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xd9
-#CHECK: veclb %v19, %v14
+# CHECK: veclb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xd9
-#CHECK: veclb %v31, %v31
+# CHECK: veclb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xd9
-#CHECK: veclf %v0, %v0
+# CHECK: veclf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xd9
-#CHECK: veclf %v19, %v14
+# CHECK: veclf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xd9
-#CHECK: veclf %v31, %v31
+# CHECK: veclf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xd9
-#CHECK: veclg %v0, %v0
+# CHECK: veclg %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xd9
-#CHECK: veclg %v19, %v14
+# CHECK: veclg %v19, %v14
0xe7 0x3e 0x00 0x00 0x38 0xd9
-#CHECK: veclg %v31, %v31
+# CHECK: veclg %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0xd9
-#CHECK: veclh %v0, %v0
+# CHECK: veclh %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xd9
-#CHECK: veclh %v19, %v14
+# CHECK: veclh %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xd9
-#CHECK: veclh %v31, %v31
+# CHECK: veclh %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xd9
-#CHECK: verim %v0, %v0, %v0, 0, 11
+# CHECK: verim %v0, %v0, %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x72
-#CHECK: verim %v3, %v20, %v5, 103, 11
+# CHECK: verim %v3, %v20, %v5, 103, 11
0xe7 0x34 0x50 0x67 0xb4 0x72
-#CHECK: verim %v31, %v31, %v31, 255, 11
+# CHECK: verim %v31, %v31, %v31, 255, 11
0xe7 0xff 0xf0 0xff 0xbe 0x72
-#CHECK: verimb %v0, %v0, %v0, 0
+# CHECK: verimb %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x72
-#CHECK: verimb %v3, %v20, %v5, 103
+# CHECK: verimb %v3, %v20, %v5, 103
0xe7 0x34 0x50 0x67 0x04 0x72
-#CHECK: verimb %v31, %v31, %v31, 255
+# CHECK: verimb %v31, %v31, %v31, 255
0xe7 0xff 0xf0 0xff 0x0e 0x72
-#CHECK: verimf %v0, %v0, %v0, 0
+# CHECK: verimf %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x72
-#CHECK: verimf %v3, %v20, %v5, 103
+# CHECK: verimf %v3, %v20, %v5, 103
0xe7 0x34 0x50 0x67 0x24 0x72
-#CHECK: verimf %v31, %v31, %v31, 255
+# CHECK: verimf %v31, %v31, %v31, 255
0xe7 0xff 0xf0 0xff 0x2e 0x72
-#CHECK: verimg %v0, %v0, %v0, 0
+# CHECK: verimg %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x72
-#CHECK: verimg %v3, %v20, %v5, 103
+# CHECK: verimg %v3, %v20, %v5, 103
0xe7 0x34 0x50 0x67 0x34 0x72
-#CHECK: verimg %v31, %v31, %v31, 255
+# CHECK: verimg %v31, %v31, %v31, 255
0xe7 0xff 0xf0 0xff 0x3e 0x72
-#CHECK: verimh %v0, %v0, %v0, 0
+# CHECK: verimh %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x72
-#CHECK: verimh %v3, %v20, %v5, 103
+# CHECK: verimh %v3, %v20, %v5, 103
0xe7 0x34 0x50 0x67 0x14 0x72
-#CHECK: verimh %v31, %v31, %v31, 255
+# CHECK: verimh %v31, %v31, %v31, 255
0xe7 0xff 0xf0 0xff 0x1e 0x72
-#CHECK: verllv %v0, %v0, %v0, 11
-0xe7 0x00 0x00 0x00 0xb0 0x73
-
-#CHECK: verllv %v18, %v3, %v20, 11
-0xe7 0x23 0x40 0x00 0xba 0x73
-
-#CHECK: verllv %v31, %v31, %v31, 11
-0xe7 0xff 0xf0 0x00 0xbe 0x73
-
-#CHECK: verllvb %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x00 0x73
-
-#CHECK: verllvb %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x0a 0x73
-
-#CHECK: verllvb %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x0e 0x73
-
-#CHECK: verllvf %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x20 0x73
-
-#CHECK: verllvf %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x2a 0x73
-
-#CHECK: verllvf %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x2e 0x73
-
-#CHECK: verllvg %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x30 0x73
-
-#CHECK: verllvg %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x3a 0x73
-
-#CHECK: verllvg %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x3e 0x73
-
-#CHECK: verllvh %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x10 0x73
-
-#CHECK: verllvh %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x1a 0x73
-
-#CHECK: verllvh %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x1e 0x73
-
-#CHECK: verll %v0, %v0, 0, 11
+# CHECK: verll %v0, %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x33
-#CHECK: verll %v12, %v18, 1110(%r3), 11
+# CHECK: verll %v12, %v18, 1110(%r3), 11
0xe7 0xc2 0x34 0x56 0xb4 0x33
-#CHECK: verll %v31, %v31, 4095(%r15), 11
+# CHECK: verll %v31, %v31, 4095(%r15), 11
0xe7 0xff 0xff 0xff 0xbc 0x33
-#CHECK: verllb %v0, %v0, 0
+# CHECK: verllb %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x33
-#CHECK: verllb %v12, %v18, 1110(%r3)
+# CHECK: verllb %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x04 0x33
-#CHECK: verllb %v31, %v31, 4095(%r15)
+# CHECK: verllb %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x0c 0x33
-#CHECK: verllf %v0, %v0, 0
+# CHECK: verllf %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x33
-#CHECK: verllf %v12, %v18, 1110(%r3)
+# CHECK: verllf %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x24 0x33
-#CHECK: verllf %v31, %v31, 4095(%r15)
+# CHECK: verllf %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x2c 0x33
-#CHECK: verllg %v0, %v0, 0
+# CHECK: verllg %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x33
-#CHECK: verllg %v12, %v18, 1110(%r3)
+# CHECK: verllg %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x34 0x33
-#CHECK: verllg %v31, %v31, 4095(%r15)
+# CHECK: verllg %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x3c 0x33
-#CHECK: verllh %v0, %v0, 0
+# CHECK: verllh %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x33
-#CHECK: verllh %v12, %v18, 1110(%r3)
+# CHECK: verllh %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x14 0x33
-#CHECK: verllh %v31, %v31, 4095(%r15)
+# CHECK: verllh %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x1c 0x33
-#CHECK: veslv %v0, %v0, %v0, 11
-0xe7 0x00 0x00 0x00 0xb0 0x70
+# CHECK: verllv %v0, %v0, %v0, 11
+0xe7 0x00 0x00 0x00 0xb0 0x73
-#CHECK: veslv %v18, %v3, %v20, 11
-0xe7 0x23 0x40 0x00 0xba 0x70
+# CHECK: verllv %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0x00 0xba 0x73
-#CHECK: veslv %v31, %v31, %v31, 11
-0xe7 0xff 0xf0 0x00 0xbe 0x70
+# CHECK: verllv %v31, %v31, %v31, 11
+0xe7 0xff 0xf0 0x00 0xbe 0x73
-#CHECK: veslvb %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x00 0x70
+# CHECK: verllvb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x73
-#CHECK: veslvb %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x0a 0x70
+# CHECK: verllvb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x73
-#CHECK: veslvb %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x0e 0x70
+# CHECK: verllvb %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x0e 0x73
-#CHECK: veslvf %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x20 0x70
+# CHECK: verllvf %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0x73
-#CHECK: veslvf %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x2a 0x70
+# CHECK: verllvf %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0x73
-#CHECK: veslvf %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x2e 0x70
+# CHECK: verllvf %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x2e 0x73
-#CHECK: veslvg %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x30 0x70
+# CHECK: verllvg %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x30 0x73
-#CHECK: veslvg %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x3a 0x70
+# CHECK: verllvg %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x3a 0x73
-#CHECK: veslvg %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x3e 0x70
+# CHECK: verllvg %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x3e 0x73
-#CHECK: veslvh %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x10 0x70
+# CHECK: verllvh %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x10 0x73
-#CHECK: veslvh %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x1a 0x70
+# CHECK: verllvh %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x1a 0x73
-#CHECK: veslvh %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x1e 0x70
+# CHECK: verllvh %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x1e 0x73
-#CHECK: vesl %v0, %v0, 0, 11
+# CHECK: vesl %v0, %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x30
-#CHECK: vesl %v12, %v18, 1110(%r3), 11
+# CHECK: vesl %v12, %v18, 1110(%r3), 11
0xe7 0xc2 0x34 0x56 0xb4 0x30
-#CHECK: vesl %v31, %v31, 4095(%r15), 11
+# CHECK: vesl %v31, %v31, 4095(%r15), 11
0xe7 0xff 0xff 0xff 0xbc 0x30
-#CHECK: veslb %v0, %v0, 0
+# CHECK: veslb %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x30
-#CHECK: veslb %v12, %v18, 1110(%r3)
+# CHECK: veslb %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x04 0x30
-#CHECK: veslb %v31, %v31, 4095(%r15)
+# CHECK: veslb %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x0c 0x30
-#CHECK: veslf %v0, %v0, 0
+# CHECK: veslf %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x30
-#CHECK: veslf %v12, %v18, 1110(%r3)
+# CHECK: veslf %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x24 0x30
-#CHECK: veslf %v31, %v31, 4095(%r15)
+# CHECK: veslf %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x2c 0x30
-#CHECK: veslg %v0, %v0, 0
+# CHECK: veslg %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x30
-#CHECK: veslg %v12, %v18, 1110(%r3)
+# CHECK: veslg %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x34 0x30
-#CHECK: veslg %v31, %v31, 4095(%r15)
+# CHECK: veslg %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x3c 0x30
-#CHECK: veslh %v0, %v0, 0
+# CHECK: veslh %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x30
-#CHECK: veslh %v12, %v18, 1110(%r3)
+# CHECK: veslh %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x14 0x30
-#CHECK: veslh %v31, %v31, 4095(%r15)
+# CHECK: veslh %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x1c 0x30
-#CHECK: vesrav %v0, %v0, %v0, 11
-0xe7 0x00 0x00 0x00 0xb0 0x7a
+# CHECK: veslv %v0, %v0, %v0, 11
+0xe7 0x00 0x00 0x00 0xb0 0x70
-#CHECK: vesrav %v18, %v3, %v20, 11
-0xe7 0x23 0x40 0x00 0xba 0x7a
+# CHECK: veslv %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0x00 0xba 0x70
-#CHECK: vesrav %v31, %v31, %v31, 11
-0xe7 0xff 0xf0 0x00 0xbe 0x7a
+# CHECK: veslv %v31, %v31, %v31, 11
+0xe7 0xff 0xf0 0x00 0xbe 0x70
-#CHECK: vesravb %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x00 0x7a
+# CHECK: veslvb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x70
-#CHECK: vesravb %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x0a 0x7a
+# CHECK: veslvb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x70
-#CHECK: vesravb %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x0e 0x7a
+# CHECK: veslvb %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x0e 0x70
-#CHECK: vesravf %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x20 0x7a
+# CHECK: veslvf %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0x70
-#CHECK: vesravf %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x2a 0x7a
+# CHECK: veslvf %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0x70
-#CHECK: vesravf %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x2e 0x7a
+# CHECK: veslvf %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x2e 0x70
-#CHECK: vesravg %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x30 0x7a
+# CHECK: veslvg %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x30 0x70
-#CHECK: vesravg %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x3a 0x7a
+# CHECK: veslvg %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x3a 0x70
-#CHECK: vesravg %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x3e 0x7a
+# CHECK: veslvg %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x3e 0x70
-#CHECK: vesravh %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x10 0x7a
+# CHECK: veslvh %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x10 0x70
-#CHECK: vesravh %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x1a 0x7a
+# CHECK: veslvh %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x1a 0x70
-#CHECK: vesravh %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x1e 0x7a
+# CHECK: veslvh %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x1e 0x70
-#CHECK: vesra %v0, %v0, 0, 11
+# CHECK: vesra %v0, %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x3a
-#CHECK: vesra %v12, %v18, 1110(%r3), 11
+# CHECK: vesra %v12, %v18, 1110(%r3), 11
0xe7 0xc2 0x34 0x56 0xb4 0x3a
-#CHECK: vesra %v31, %v31, 4095(%r15), 11
+# CHECK: vesra %v31, %v31, 4095(%r15), 11
0xe7 0xff 0xff 0xff 0xbc 0x3a
-#CHECK: vesrab %v0, %v0, 0
+# CHECK: vesrab %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x3a
-#CHECK: vesrab %v12, %v18, 1110(%r3)
+# CHECK: vesrab %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x04 0x3a
-#CHECK: vesrab %v31, %v31, 4095(%r15)
+# CHECK: vesrab %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x0c 0x3a
-#CHECK: vesraf %v0, %v0, 0
+# CHECK: vesraf %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x3a
-#CHECK: vesraf %v12, %v18, 1110(%r3)
+# CHECK: vesraf %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x24 0x3a
-#CHECK: vesraf %v31, %v31, 4095(%r15)
+# CHECK: vesraf %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x2c 0x3a
-#CHECK: vesrag %v0, %v0, 0
+# CHECK: vesrag %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x3a
-#CHECK: vesrag %v12, %v18, 1110(%r3)
+# CHECK: vesrag %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x34 0x3a
-#CHECK: vesrag %v31, %v31, 4095(%r15)
+# CHECK: vesrag %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x3c 0x3a
-#CHECK: vesrah %v0, %v0, 0
+# CHECK: vesrah %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x3a
-#CHECK: vesrah %v12, %v18, 1110(%r3)
+# CHECK: vesrah %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x14 0x3a
-#CHECK: vesrah %v31, %v31, 4095(%r15)
+# CHECK: vesrah %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x1c 0x3a
-#CHECK: vesrlv %v0, %v0, %v0, 11
-0xe7 0x00 0x00 0x00 0xb0 0x78
+# CHECK: vesrav %v0, %v0, %v0, 11
+0xe7 0x00 0x00 0x00 0xb0 0x7a
-#CHECK: vesrlv %v18, %v3, %v20, 11
-0xe7 0x23 0x40 0x00 0xba 0x78
+# CHECK: vesrav %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0x00 0xba 0x7a
-#CHECK: vesrlv %v31, %v31, %v31, 11
-0xe7 0xff 0xf0 0x00 0xbe 0x78
+# CHECK: vesrav %v31, %v31, %v31, 11
+0xe7 0xff 0xf0 0x00 0xbe 0x7a
-#CHECK: vesrlvb %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x00 0x78
+# CHECK: vesravb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x7a
-#CHECK: vesrlvb %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x0a 0x78
+# CHECK: vesravb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x7a
-#CHECK: vesrlvb %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x0e 0x78
+# CHECK: vesravb %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x0e 0x7a
-#CHECK: vesrlvf %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x20 0x78
+# CHECK: vesravf %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0x7a
-#CHECK: vesrlvf %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x2a 0x78
+# CHECK: vesravf %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0x7a
-#CHECK: vesrlvf %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x2e 0x78
+# CHECK: vesravf %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x2e 0x7a
-#CHECK: vesrlvg %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x30 0x78
+# CHECK: vesravg %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x30 0x7a
-#CHECK: vesrlvg %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x3a 0x78
+# CHECK: vesravg %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x3a 0x7a
-#CHECK: vesrlvg %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x3e 0x78
+# CHECK: vesravg %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x3e 0x7a
-#CHECK: vesrlvh %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x10 0x78
+# CHECK: vesravh %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x10 0x7a
-#CHECK: vesrlvh %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x1a 0x78
+# CHECK: vesravh %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x1a 0x7a
-#CHECK: vesrlvh %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x1e 0x78
+# CHECK: vesravh %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x1e 0x7a
-#CHECK: vesrl %v0, %v0, 0, 11
+# CHECK: vesrl %v0, %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x38
-#CHECK: vesrl %v12, %v18, 1110(%r3), 11
+# CHECK: vesrl %v12, %v18, 1110(%r3), 11
0xe7 0xc2 0x34 0x56 0xb4 0x38
-#CHECK: vesrl %v31, %v31, 4095(%r15), 11
+# CHECK: vesrl %v31, %v31, 4095(%r15), 11
0xe7 0xff 0xff 0xff 0xbc 0x38
-#CHECK: vesrlb %v0, %v0, 0
+# CHECK: vesrlb %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x38
-#CHECK: vesrlb %v12, %v18, 1110(%r3)
+# CHECK: vesrlb %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x04 0x38
-#CHECK: vesrlb %v31, %v31, 4095(%r15)
+# CHECK: vesrlb %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x0c 0x38
-#CHECK: vesrlf %v0, %v0, 0
+# CHECK: vesrlf %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x38
-#CHECK: vesrlf %v12, %v18, 1110(%r3)
+# CHECK: vesrlf %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x24 0x38
-#CHECK: vesrlf %v31, %v31, 4095(%r15)
+# CHECK: vesrlf %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x2c 0x38
-#CHECK: vesrlg %v0, %v0, 0
+# CHECK: vesrlg %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x38
-#CHECK: vesrlg %v12, %v18, 1110(%r3)
+# CHECK: vesrlg %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x34 0x38
-#CHECK: vesrlg %v31, %v31, 4095(%r15)
+# CHECK: vesrlg %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x3c 0x38
-#CHECK: vesrlh %v0, %v0, 0
+# CHECK: vesrlh %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x38
-#CHECK: vesrlh %v12, %v18, 1110(%r3)
+# CHECK: vesrlh %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x14 0x38
-#CHECK: vesrlh %v31, %v31, 4095(%r15)
+# CHECK: vesrlh %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x1c 0x38
-#CHECK: vfa %v0, %v0, %v0, 11, 9
+# CHECK: vesrlv %v0, %v0, %v0, 11
+0xe7 0x00 0x00 0x00 0xb0 0x78
+
+# CHECK: vesrlv %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0x00 0xba 0x78
+
+# CHECK: vesrlv %v31, %v31, %v31, 11
+0xe7 0xff 0xf0 0x00 0xbe 0x78
+
+# CHECK: vesrlvb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x78
+
+# CHECK: vesrlvb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x78
+
+# CHECK: vesrlvb %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x0e 0x78
+
+# CHECK: vesrlvf %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0x78
+
+# CHECK: vesrlvf %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0x78
+
+# CHECK: vesrlvf %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x2e 0x78
+
+# CHECK: vesrlvg %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x30 0x78
+
+# CHECK: vesrlvg %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x3a 0x78
+
+# CHECK: vesrlvg %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x3e 0x78
+
+# CHECK: vesrlvh %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x10 0x78
+
+# CHECK: vesrlvh %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x1a 0x78
+
+# CHECK: vesrlvh %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x1e 0x78
+
+# CHECK: vfa %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xe3
-#CHECK: vfa %v18, %v3, %v20, 11, 9
+# CHECK: vfa %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x09 0xba 0xe3
-#CHECK: vfa %v31, %v31, %v31, 11, 9
+# CHECK: vfa %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x09 0xbe 0xe3
-#CHECK: vfadb %v0, %v0, %v0
+# CHECK: vfadb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xe3
-#CHECK: vfadb %v18, %v3, %v20
+# CHECK: vfadb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xe3
-#CHECK: vfadb %v31, %v31, %v31
+# CHECK: vfadb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xe3
-#CHECK: vfae %v0, %v0, %v0, 11, 0
+# CHECK: vfae %v0, %v0, %v0, 11, 0
0xe7 0x00 0x00 0x00 0xb0 0x82
-#CHECK: vfae %v0, %v0, %v0, 11, 12
+# CHECK: vfae %v0, %v0, %v0, 11, 12
0xe7 0x00 0x00 0xc0 0xb0 0x82
-#CHECK: vfae %v18, %v3, %v20, 11, 0
+# CHECK: vfae %v18, %v3, %v20, 11, 0
0xe7 0x23 0x40 0x00 0xba 0x82
-#CHECK: vfae %v31, %v31, %v31, 11, 4
+# CHECK: vfae %v31, %v31, %v31, 11, 4
0xe7 0xff 0xf0 0x40 0xbe 0x82
-#CHECK: vfaeb %v0, %v0, %v0, 0
+# CHECK: vfaeb %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x82
-#CHECK: vfaeb %v0, %v0, %v0, 12
+# CHECK: vfaeb %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x00 0x82
-#CHECK: vfaeb %v18, %v3, %v20, 0
+# CHECK: vfaeb %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x0a 0x82
-#CHECK: vfaeb %v31, %v31, %v31, 4
+# CHECK: vfaeb %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x40 0x0e 0x82
-#CHECK: vfaebs %v31, %v31, %v31, 8
+# CHECK: vfaebs %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0x90 0x0e 0x82
-#CHECK: vfaezb %v31, %v31, %v31, 4
+# CHECK: vfaezb %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x60 0x0e 0x82
-#CHECK: vfaezbs %v31, %v31, %v31, 8
+# CHECK: vfaezbs %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0xb0 0x0e 0x82
-#CHECK: vfaef %v0, %v0, %v0, 0
+# CHECK: vfaef %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x82
-#CHECK: vfaef %v0, %v0, %v0, 12
+# CHECK: vfaef %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x20 0x82
-#CHECK: vfaef %v18, %v3, %v20, 0
+# CHECK: vfaef %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x2a 0x82
-#CHECK: vfaef %v31, %v31, %v31, 4
+# CHECK: vfaef %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x40 0x2e 0x82
-#CHECK: vfaefs %v31, %v31, %v31, 8
+# CHECK: vfaefs %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0x90 0x2e 0x82
-#CHECK: vfaezf %v31, %v31, %v31, 4
+# CHECK: vfaezf %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x60 0x2e 0x82
-#CHECK: vfaezfs %v31, %v31, %v31, 8
+# CHECK: vfaezfs %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0xb0 0x2e 0x82
-#CHECK: vfaeh %v0, %v0, %v0, 0
+# CHECK: vfaeh %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x82
-#CHECK: vfaeh %v0, %v0, %v0, 12
+# CHECK: vfaeh %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x10 0x82
-#CHECK: vfaeh %v18, %v3, %v20, 0
+# CHECK: vfaeh %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x1a 0x82
-#CHECK: vfaeh %v31, %v31, %v31, 4
+# CHECK: vfaeh %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x40 0x1e 0x82
-#CHECK: vfaehs %v31, %v31, %v31, 8
+# CHECK: vfaehs %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0x90 0x1e 0x82
-#CHECK: vfaezh %v31, %v31, %v31, 4
+# CHECK: vfaezh %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x60 0x1e 0x82
-#CHECK: vfaezhs %v31, %v31, %v31, 8
+# CHECK: vfaezhs %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0xb0 0x1e 0x82
-#CHECK: vfce %v0, %v0, %v0, 11, 9
+# CHECK: vfce %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xe8
-#CHECK: vfce %v18, %v3, %v20, 11, 9
+# CHECK: vfce %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x09 0xba 0xe8
-#CHECK: vfce %v31, %v31, %v31, 11, 9
+# CHECK: vfce %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x09 0xbe 0xe8
-#CHECK: vfcedb %v0, %v0, %v0
+# CHECK: vfcedb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xe8
-#CHECK: vfcedb %v18, %v3, %v20
+# CHECK: vfcedb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xe8
-#CHECK: vfcedb %v31, %v31, %v31
+# CHECK: vfcedb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xe8
-#CHECK: vfcedbs %v0, %v0, %v0
+# CHECK: vfcedbs %v0, %v0, %v0
0xe7 0x00 0x00 0x10 0x30 0xe8
-#CHECK: vfcedbs %v18, %v3, %v20
+# CHECK: vfcedbs %v18, %v3, %v20
0xe7 0x23 0x40 0x10 0x3a 0xe8
-#CHECK: vfcedbs %v31, %v31, %v31
+# CHECK: vfcedbs %v31, %v31, %v31
0xe7 0xff 0xf0 0x10 0x3e 0xe8
-#CHECK: vfch %v0, %v0, %v0, 11, 9
+# CHECK: vfch %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xeb
-#CHECK: vfch %v18, %v3, %v20, 11, 9
+# CHECK: vfch %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x09 0xba 0xeb
-#CHECK: vfch %v31, %v31, %v31, 11, 9
+# CHECK: vfch %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x09 0xbe 0xeb
-#CHECK: vfchdb %v0, %v0, %v0
+# CHECK: vfchdb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xeb
-#CHECK: vfchdb %v18, %v3, %v20
+# CHECK: vfchdb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xeb
-#CHECK: vfchdb %v31, %v31, %v31
+# CHECK: vfchdb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xeb
-#CHECK: vfchdbs %v0, %v0, %v0
+# CHECK: vfchdbs %v0, %v0, %v0
0xe7 0x00 0x00 0x10 0x30 0xeb
-#CHECK: vfchdbs %v18, %v3, %v20
+# CHECK: vfchdbs %v18, %v3, %v20
0xe7 0x23 0x40 0x10 0x3a 0xeb
-#CHECK: vfchdbs %v31, %v31, %v31
+# CHECK: vfchdbs %v31, %v31, %v31
0xe7 0xff 0xf0 0x10 0x3e 0xeb
-#CHECK: vfche %v0, %v0, %v0, 11, 9
+# CHECK: vfche %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xea
-#CHECK: vfche %v18, %v3, %v20, 11, 9
+# CHECK: vfche %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x09 0xba 0xea
-#CHECK: vfche %v31, %v31, %v31, 11, 9
+# CHECK: vfche %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x09 0xbe 0xea
-#CHECK: vfchedb %v0, %v0, %v0
+# CHECK: vfchedb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xea
-#CHECK: vfchedb %v18, %v3, %v20
+# CHECK: vfchedb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xea
-#CHECK: vfchedb %v31, %v31, %v31
+# CHECK: vfchedb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xea
-#CHECK: vfchedbs %v0, %v0, %v0
+# CHECK: vfchedbs %v0, %v0, %v0
0xe7 0x00 0x00 0x10 0x30 0xea
-#CHECK: vfchedbs %v18, %v3, %v20
+# CHECK: vfchedbs %v18, %v3, %v20
0xe7 0x23 0x40 0x10 0x3a 0xea
-#CHECK: vfchedbs %v31, %v31, %v31
+# CHECK: vfchedbs %v31, %v31, %v31
0xe7 0xff 0xf0 0x10 0x3e 0xea
-#CHECK: vfd %v0, %v0, %v0, 11, 9
+# CHECK: vfd %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xe5
-#CHECK: vfd %v18, %v3, %v20, 11, 9
+# CHECK: vfd %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x09 0xba 0xe5
-#CHECK: vfd %v31, %v31, %v31, 11, 9
+# CHECK: vfd %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x09 0xbe 0xe5
-#CHECK: vfddb %v0, %v0, %v0
+# CHECK: vfddb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xe5
-#CHECK: vfddb %v18, %v3, %v20
+# CHECK: vfddb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xe5
-#CHECK: vfddb %v31, %v31, %v31
+# CHECK: vfddb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xe5
-#CHECK: vfee %v0, %v0, %v0, 11, 0
+# CHECK: vfee %v0, %v0, %v0, 11, 0
0xe7 0x00 0x00 0x00 0xb0 0x80
-#CHECK: vfee %v0, %v0, %v0, 11, 12
+# CHECK: vfee %v0, %v0, %v0, 11, 12
0xe7 0x00 0x00 0xc0 0xb0 0x80
-#CHECK: vfee %v18, %v3, %v20, 11, 0
+# CHECK: vfee %v18, %v3, %v20, 11, 0
0xe7 0x23 0x40 0x00 0xba 0x80
-#CHECK: vfee %v31, %v31, %v31, 11, 0
+# CHECK: vfee %v31, %v31, %v31, 11, 0
0xe7 0xff 0xf0 0x00 0xbe 0x80
-#CHECK: vfeeb %v0, %v0, %v0, 0
+# CHECK: vfeeb %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x80
-#CHECK: vfeeb %v0, %v0, %v0, 12
+# CHECK: vfeeb %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x00 0x80
-#CHECK: vfeeb %v18, %v3, %v20, 0
+# CHECK: vfeeb %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x0a 0x80
-#CHECK: vfeebs %v7, %v24, %v9
+# CHECK: vfeeb %v31, %v31, %v31, 0
+0xe7 0xff 0xf0 0x00 0x0e 0x80
+
+# CHECK: vfeebs %v7, %v24, %v9
0xe7 0x78 0x90 0x10 0x04 0x80
-#CHECK: vfeezb %v18, %v3, %v20
+# CHECK: vfeezb %v18, %v3, %v20
0xe7 0x23 0x40 0x20 0x0a 0x80
-#CHECK: vfeezbs %v7, %v24, %v9
+# CHECK: vfeezbs %v7, %v24, %v9
0xe7 0x78 0x90 0x30 0x04 0x80
-#CHECK: vfeeb %v31, %v31, %v31, 0
-0xe7 0xff 0xf0 0x00 0x0e 0x80
-
-#CHECK: vfeef %v0, %v0, %v0, 0
+# CHECK: vfeef %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x80
-#CHECK: vfeef %v0, %v0, %v0, 12
+# CHECK: vfeef %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x20 0x80
-#CHECK: vfeef %v18, %v3, %v20, 0
+# CHECK: vfeef %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x2a 0x80
-#CHECK: vfeefs %v7, %v24, %v9
+# CHECK: vfeef %v31, %v31, %v31, 0
+0xe7 0xff 0xf0 0x00 0x2e 0x80
+
+# CHECK: vfeefs %v7, %v24, %v9
0xe7 0x78 0x90 0x10 0x24 0x80
-#CHECK: vfeezf %v18, %v3, %v20
+# CHECK: vfeezf %v18, %v3, %v20
0xe7 0x23 0x40 0x20 0x2a 0x80
-#CHECK: vfeezfs %v7, %v24, %v9
+# CHECK: vfeezfs %v7, %v24, %v9
0xe7 0x78 0x90 0x30 0x24 0x80
-#CHECK: vfeef %v31, %v31, %v31, 0
-0xe7 0xff 0xf0 0x00 0x2e 0x80
-
-#CHECK: vfeeh %v0, %v0, %v0, 0
+# CHECK: vfeeh %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x80
-#CHECK: vfeeh %v0, %v0, %v0, 12
+# CHECK: vfeeh %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x10 0x80
-#CHECK: vfeeh %v18, %v3, %v20, 0
+# CHECK: vfeeh %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x1a 0x80
-#CHECK: vfeehs %v7, %v24, %v9
+# CHECK: vfeeh %v31, %v31, %v31, 0
+0xe7 0xff 0xf0 0x00 0x1e 0x80
+
+# CHECK: vfeehs %v7, %v24, %v9
0xe7 0x78 0x90 0x10 0x14 0x80
-#CHECK: vfeezh %v18, %v3, %v20
+# CHECK: vfeezh %v18, %v3, %v20
0xe7 0x23 0x40 0x20 0x1a 0x80
-#CHECK: vfeezhs %v7, %v24, %v9
+# CHECK: vfeezhs %v7, %v24, %v9
0xe7 0x78 0x90 0x30 0x14 0x80
-#CHECK: vfeeh %v31, %v31, %v31, 0
-0xe7 0xff 0xf0 0x00 0x1e 0x80
-
-#CHECK: vfene %v0, %v0, %v0, 11, 0
+# CHECK: vfene %v0, %v0, %v0, 11, 0
0xe7 0x00 0x00 0x00 0xb0 0x81
-#CHECK: vfene %v0, %v0, %v0, 11, 12
+# CHECK: vfene %v0, %v0, %v0, 11, 12
0xe7 0x00 0x00 0xc0 0xb0 0x81
-#CHECK: vfene %v18, %v3, %v20, 11, 0
+# CHECK: vfene %v18, %v3, %v20, 11, 0
0xe7 0x23 0x40 0x00 0xba 0x81
-#CHECK: vfene %v31, %v31, %v31, 11, 0
+# CHECK: vfene %v31, %v31, %v31, 11, 0
0xe7 0xff 0xf0 0x00 0xbe 0x81
-#CHECK: vfeneb %v0, %v0, %v0, 0
+# CHECK: vfeneb %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x81
-#CHECK: vfeneb %v0, %v0, %v0, 12
+# CHECK: vfeneb %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x00 0x81
-#CHECK: vfeneb %v18, %v3, %v20, 0
+# CHECK: vfeneb %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x0a 0x81
-#CHECK: vfenebs %v7, %v24, %v9
+# CHECK: vfenebs %v7, %v24, %v9
0xe7 0x78 0x90 0x10 0x04 0x81
-#CHECK: vfenezb %v18, %v3, %v20
+# CHECK: vfeneb %v31, %v31, %v31, 0
+0xe7 0xff 0xf0 0x00 0x0e 0x81
+
+# CHECK: vfenezb %v18, %v3, %v20
0xe7 0x23 0x40 0x20 0x0a 0x81
-#CHECK: vfenezbs %v7, %v24, %v9
+# CHECK: vfenezbs %v7, %v24, %v9
0xe7 0x78 0x90 0x30 0x04 0x81
-#CHECK: vfeneb %v31, %v31, %v31, 0
-0xe7 0xff 0xf0 0x00 0x0e 0x81
-
-#CHECK: vfenef %v0, %v0, %v0, 0
+# CHECK: vfenef %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x81
-#CHECK: vfenef %v0, %v0, %v0, 12
+# CHECK: vfenef %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x20 0x81
-#CHECK: vfenef %v18, %v3, %v20, 0
+# CHECK: vfenef %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x2a 0x81
-#CHECK: vfenefs %v7, %v24, %v9
+# CHECK: vfenef %v31, %v31, %v31, 0
+0xe7 0xff 0xf0 0x00 0x2e 0x81
+
+# CHECK: vfenefs %v7, %v24, %v9
0xe7 0x78 0x90 0x10 0x24 0x81
-#CHECK: vfenezf %v18, %v3, %v20
+# CHECK: vfenezf %v18, %v3, %v20
0xe7 0x23 0x40 0x20 0x2a 0x81
-#CHECK: vfenezfs %v7, %v24, %v9
+# CHECK: vfenezfs %v7, %v24, %v9
0xe7 0x78 0x90 0x30 0x24 0x81
-#CHECK: vfenef %v31, %v31, %v31, 0
-0xe7 0xff 0xf0 0x00 0x2e 0x81
-
-#CHECK: vfeneh %v0, %v0, %v0, 0
+# CHECK: vfeneh %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x81
-#CHECK: vfeneh %v0, %v0, %v0, 12
+# CHECK: vfeneh %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x10 0x81
-#CHECK: vfeneh %v18, %v3, %v20, 0
+# CHECK: vfeneh %v18, %v3, %v20, 0
0xe7 0x23 0x40 0x00 0x1a 0x81
-#CHECK: vfenehs %v7, %v24, %v9
+# CHECK: vfeneh %v31, %v31, %v31, 0
+0xe7 0xff 0xf0 0x00 0x1e 0x81
+
+# CHECK: vfenehs %v7, %v24, %v9
0xe7 0x78 0x90 0x10 0x14 0x81
-#CHECK: vfenezh %v18, %v3, %v20
+# CHECK: vfenezh %v18, %v3, %v20
0xe7 0x23 0x40 0x20 0x1a 0x81
-#CHECK: vfenezhs %v7, %v24, %v9
+# CHECK: vfenezhs %v7, %v24, %v9
0xe7 0x78 0x90 0x30 0x14 0x81
-#CHECK: vfeneh %v31, %v31, %v31, 0
-0xe7 0xff 0xf0 0x00 0x1e 0x81
-
-#CHECK: vfi %v0, %v0, 11, 0, 0
+# CHECK: vfi %v0, %v0, 11, 0, 0
0xe7 0x00 0x00 0x00 0xb0 0xc7
-#CHECK: vfi %v19, %v14, 11, 4, 10
+# CHECK: vfi %v19, %v14, 11, 4, 10
0xe7 0x3e 0x00 0xa4 0xb8 0xc7
-#CHECK: vfi %v31, %v31, 11, 7, 15
+# CHECK: vfi %v31, %v31, 11, 7, 15
0xe7 0xff 0x00 0xf7 0xbc 0xc7
-#CHECK: vfidb %v0, %v0, 0, 0
+# CHECK: vfidb %v0, %v0, 0, 0
0xe7 0x00 0x00 0x00 0x30 0xc7
-#CHECK: vfidb %v19, %v14, 4, 10
+# CHECK: vfidb %v19, %v14, 4, 10
0xe7 0x3e 0x00 0xa4 0x38 0xc7
-#CHECK: vfidb %v31, %v31, 7, 15
+# CHECK: vfidb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xf7 0x3c 0xc7
-#CHECK: vistr %v0, %v0, 11, 0
-0xe7 0x00 0x00 0x00 0xb0 0x5c
-
-#CHECK: vistr %v0, %v0, 11, 12
-0xe7 0x00 0x00 0xc0 0xb0 0x5c
-
-#CHECK: vistr %v18, %v3, 11, 0
-0xe7 0x23 0x00 0x00 0xb8 0x5c
-
-#CHECK: vistr %v31, %v31, 11, 0
-0xe7 0xff 0x00 0x00 0xbc 0x5c
-
-#CHECK: vistrb %v0, %v0, 0
-0xe7 0x00 0x00 0x00 0x00 0x5c
-
-#CHECK: vistrb %v0, %v0, 12
-0xe7 0x00 0x00 0xc0 0x00 0x5c
-
-#CHECK: vistrb %v18, %v3, 0
-0xe7 0x23 0x00 0x00 0x08 0x5c
-
-#CHECK: vistrbs %v7, %v24
-0xe7 0x78 0x00 0x10 0x04 0x5c
+# CHECK: vflcdb %v0, %v0
+0xe7 0x00 0x00 0x00 0x30 0xcc
-#CHECK: vistrb %v31, %v31, 0
-0xe7 0xff 0x00 0x00 0x0c 0x5c
+# CHECK: vflcdb %v19, %v14
+0xe7 0x3e 0x00 0x00 0x38 0xcc
-#CHECK: vistrf %v0, %v0, 0
-0xe7 0x00 0x00 0x00 0x20 0x5c
+# CHECK: vflcdb %v31, %v31
+0xe7 0xff 0x00 0x00 0x3c 0xcc
-#CHECK: vistrf %v0, %v0, 12
-0xe7 0x00 0x00 0xc0 0x20 0x5c
+# CHECK: vflndb %v0, %v0
+0xe7 0x00 0x00 0x10 0x30 0xcc
-#CHECK: vistrf %v18, %v3, 0
-0xe7 0x23 0x00 0x00 0x28 0x5c
+# CHECK: vflndb %v19, %v14
+0xe7 0x3e 0x00 0x10 0x38 0xcc
-#CHECK: vistrfs %v7, %v24
-0xe7 0x78 0x00 0x10 0x24 0x5c
+# CHECK: vflndb %v31, %v31
+0xe7 0xff 0x00 0x10 0x3c 0xcc
-#CHECK: vistrf %v31, %v31, 0
-0xe7 0xff 0x00 0x00 0x2c 0x5c
+# CHECK: vflpdb %v0, %v0
+0xe7 0x00 0x00 0x20 0x30 0xcc
-#CHECK: vistrh %v0, %v0, 0
-0xe7 0x00 0x00 0x00 0x10 0x5c
+# CHECK: vflpdb %v19, %v14
+0xe7 0x3e 0x00 0x20 0x38 0xcc
-#CHECK: vistrh %v0, %v0, 12
-0xe7 0x00 0x00 0xc0 0x10 0x5c
+# CHECK: vflpdb %v31, %v31
+0xe7 0xff 0x00 0x20 0x3c 0xcc
-#CHECK: vistrh %v18, %v3, 0
-0xe7 0x23 0x00 0x00 0x18 0x5c
+# CHECK: vfm %v0, %v0, %v0, 11, 9
+0xe7 0x00 0x00 0x09 0xb0 0xe7
-#CHECK: vistrhs %v7, %v24
-0xe7 0x78 0x00 0x10 0x14 0x5c
+# CHECK: vfm %v18, %v3, %v20, 11, 9
+0xe7 0x23 0x40 0x09 0xba 0xe7
-#CHECK: vistrh %v31, %v31, 0
-0xe7 0xff 0x00 0x00 0x1c 0x5c
+# CHECK: vfm %v31, %v31, %v31, 11, 9
+0xe7 0xff 0xf0 0x09 0xbe 0xe7
-#CHECK: vfma %v0, %v0, %v0, %v0, 9, 11
+# CHECK: vfma %v0, %v0, %v0, %v0, 9, 11
0xe7 0x00 0x0b 0x09 0x00 0x8f
-#CHECK: vfma %v3, %v20, %v5, %v22, 9, 11
+# CHECK: vfma %v3, %v20, %v5, %v22, 9, 11
0xe7 0x34 0x5b 0x09 0x65 0x8f
-#CHECK: vfma %v31, %v31, %v31, %v31, 9, 11
+# CHECK: vfma %v31, %v31, %v31, %v31, 9, 11
0xe7 0xff 0xfb 0x09 0xff 0x8f
-#CHECK: vfmadb %v0, %v0, %v0, %v0
+# CHECK: vfmadb %v0, %v0, %v0, %v0
0xe7 0x00 0x03 0x00 0x00 0x8f
-#CHECK: vfmadb %v3, %v20, %v5, %v22
+# CHECK: vfmadb %v3, %v20, %v5, %v22
0xe7 0x34 0x53 0x00 0x65 0x8f
-#CHECK: vfmadb %v31, %v31, %v31, %v31
+# CHECK: vfmadb %v31, %v31, %v31, %v31
0xe7 0xff 0xf3 0x00 0xff 0x8f
-#CHECK: vfm %v0, %v0, %v0, 11, 9
-0xe7 0x00 0x00 0x09 0xb0 0xe7
-
-#CHECK: vfm %v18, %v3, %v20, 11, 9
-0xe7 0x23 0x40 0x09 0xba 0xe7
-
-#CHECK: vfm %v31, %v31, %v31, 11, 9
-0xe7 0xff 0xf0 0x09 0xbe 0xe7
-
-#CHECK: vfmdb %v0, %v0, %v0
+# CHECK: vfmdb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xe7
-#CHECK: vfmdb %v18, %v3, %v20
+# CHECK: vfmdb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xe7
-#CHECK: vfmdb %v31, %v31, %v31
+# CHECK: vfmdb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xe7
-#CHECK: vfms %v0, %v0, %v0, %v0, 9, 11
+# CHECK: vfms %v0, %v0, %v0, %v0, 9, 11
0xe7 0x00 0x0b 0x09 0x00 0x8e
-#CHECK: vfms %v3, %v20, %v5, %v22, 9, 11
+# CHECK: vfms %v3, %v20, %v5, %v22, 9, 11
0xe7 0x34 0x5b 0x09 0x65 0x8e
-#CHECK: vfms %v31, %v31, %v31, %v31, 9, 11
+# CHECK: vfms %v31, %v31, %v31, %v31, 9, 11
0xe7 0xff 0xfb 0x09 0xff 0x8e
-#CHECK: vfmsdb %v0, %v0, %v0, %v0
+# CHECK: vfmsdb %v0, %v0, %v0, %v0
0xe7 0x00 0x03 0x00 0x00 0x8e
-#CHECK: vfmsdb %v3, %v20, %v5, %v22
+# CHECK: vfmsdb %v3, %v20, %v5, %v22
0xe7 0x34 0x53 0x00 0x65 0x8e
-#CHECK: vfmsdb %v31, %v31, %v31, %v31
+# CHECK: vfmsdb %v31, %v31, %v31, %v31
0xe7 0xff 0xf3 0x00 0xff 0x8e
-#CHECK: vfs %v0, %v0, %v0, 11, 9
+# CHECK: vfpso %v0, %v0, 11, 9, 7
+0xe7 0x00 0x00 0x79 0xb0 0xcc
+
+# CHECK: vfpso %v19, %v14, 11, 9, 7
+0xe7 0x3e 0x00 0x79 0xb8 0xcc
+
+# CHECK: vfpso %v31, %v31, 11, 9, 7
+0xe7 0xff 0x00 0x79 0xbc 0xcc
+
+# CHECK: vfpsodb %v0, %v0, 7
+0xe7 0x00 0x00 0x70 0x30 0xcc
+
+# CHECK: vfpsodb %v19, %v14, 7
+0xe7 0x3e 0x00 0x70 0x38 0xcc
+
+# CHECK: vfpsodb %v31, %v31, 7
+0xe7 0xff 0x00 0x70 0x3c 0xcc
+
+# CHECK: vfs %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xe2
-#CHECK: vfs %v18, %v3, %v20, 11, 9
+# CHECK: vfs %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x09 0xba 0xe2
-#CHECK: vfs %v31, %v31, %v31, 11, 9
+# CHECK: vfs %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x09 0xbe 0xe2
-#CHECK: vfsdb %v0, %v0, %v0
+# CHECK: vfsdb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xe2
-#CHECK: vfsdb %v18, %v3, %v20
+# CHECK: vfsdb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xe2
-#CHECK: vfsdb %v31, %v31, %v31
+# CHECK: vfsdb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xe2
-#CHECK: vzero %v0
-0xe7 0x00 0x00 0x00 0x00 0x44
+# CHECK: vfsq %v0, %v0, 11, 9
+0xe7 0x00 0x00 0x09 0xb0 0xce
+
+# CHECK: vfsq %v19, %v14, 11, 9
+0xe7 0x3e 0x00 0x09 0xb8 0xce
-#CHECK: vgbm %v0, 1
+# CHECK: vfsq %v31, %v31, 11, 9
+0xe7 0xff 0x00 0x09 0xbc 0xce
+
+# CHECK: vfsqdb %v0, %v0
+0xe7 0x00 0x00 0x00 0x30 0xce
+
+# CHECK: vfsqdb %v19, %v14
+0xe7 0x3e 0x00 0x00 0x38 0xce
+
+# CHECK: vfsqdb %v31, %v31
+0xe7 0xff 0x00 0x00 0x3c 0xce
+
+# CHECK: vftci %v0, %v0, 0, 11, 9
+0xe7 0x00 0x00 0x09 0xb0 0x4a
+
+# CHECK: vftci %v19, %v4, 1383, 11, 9
+0xe7 0x34 0x56 0x79 0xb8 0x4a
+
+# CHECK: vftci %v31, %v31, 4095, 11, 9
+0xe7 0xff 0xff 0xf9 0xbc 0x4a
+
+# CHECK: vftcidb %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x30 0x4a
+
+# CHECK: vftcidb %v19, %v4, 1383
+0xe7 0x34 0x56 0x70 0x38 0x4a
+
+# CHECK: vftcidb %v31, %v31, 4095
+0xe7 0xff 0xff 0xf0 0x3c 0x4a
+
+# CHECK: vgbm %v0, 1
0xe7 0x00 0x00 0x01 0x00 0x44
-#CHECK: vgbm %v0, 65534
+# CHECK: vgbm %v0, 65534
0xe7 0x00 0xff 0xfe 0x00 0x44
-#CHECK: vone %v0
-0xe7 0x00 0xff 0xff 0x00 0x44
-
-#CHECK: vgbm %v17, 4660
+# CHECK: vgbm %v17, 4660
0xe7 0x10 0x12 0x34 0x08 0x44
-#CHECK: vone %v31
+# CHECK: vzero %v0
+0xe7 0x00 0x00 0x00 0x00 0x44
+
+# CHECK: vone %v0
+0xe7 0x00 0xff 0xff 0x00 0x44
+
+# CHECK: vone %v31
0xe7 0xf0 0xff 0xff 0x08 0x44
-#CHECK: vgef %v0, 0(%v0), 0
+# CHECK: vgef %v0, 0(%v0), 0
0xe7 0x00 0x00 0x00 0x00 0x13
-#CHECK: vgef %v10, 1000(%v19,%r7), 2
+# CHECK: vgef %v10, 1000(%v19,%r7), 2
0xe7 0xa3 0x73 0xe8 0x24 0x13
-#CHECK: vgef %v31, 4095(%v31,%r15), 3
+# CHECK: vgef %v31, 4095(%v31,%r15), 3
0xe7 0xff 0xff 0xff 0x3c 0x13
-#CHECK: vgeg %v0, 0(%v0), 0
+# CHECK: vgeg %v0, 0(%v0), 0
0xe7 0x00 0x00 0x00 0x00 0x12
-#CHECK: vgeg %v10, 1000(%v19,%r7), 1
+# CHECK: vgeg %v10, 1000(%v19,%r7), 1
0xe7 0xa3 0x73 0xe8 0x14 0x12
-#CHECK: vgeg %v31, 4095(%v31,%r15), 1
+# CHECK: vgeg %v31, 4095(%v31,%r15), 1
0xe7 0xff 0xff 0xff 0x1c 0x12
-#CHECK: vgfma %v0, %v0, %v0, %v0, 11
+# CHECK: vgfm %v0, %v0, %v0, 11
+0xe7 0x00 0x00 0x00 0xb0 0xb4
+
+# CHECK: vgfm %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0x00 0xba 0xb4
+
+# CHECK: vgfm %v31, %v31, %v31, 11
+0xe7 0xff 0xf0 0x00 0xbe 0xb4
+
+# CHECK: vgfma %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xbc
-#CHECK: vgfma %v3, %v20, %v5, %v22, 11
+# CHECK: vgfma %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xbc
-#CHECK: vgfma %v31, %v31, %v31, %v31, 11
+# CHECK: vgfma %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xbc
-#CHECK: vgfmab %v0, %v0, %v0, %v0
+# CHECK: vgfmab %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xbc
-#CHECK: vgfmab %v3, %v20, %v5, %v22
+# CHECK: vgfmab %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xbc
-#CHECK: vgfmab %v31, %v31, %v31, %v31
+# CHECK: vgfmab %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xbc
-#CHECK: vgfmaf %v0, %v0, %v0, %v0
+# CHECK: vgfmaf %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xbc
-#CHECK: vgfmaf %v3, %v20, %v5, %v22
+# CHECK: vgfmaf %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xbc
-#CHECK: vgfmaf %v31, %v31, %v31, %v31
+# CHECK: vgfmaf %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xbc
-#CHECK: vgfmag %v0, %v0, %v0, %v0
+# CHECK: vgfmag %v0, %v0, %v0, %v0
0xe7 0x00 0x03 0x00 0x00 0xbc
-#CHECK: vgfmag %v3, %v20, %v5, %v22
+# CHECK: vgfmag %v3, %v20, %v5, %v22
0xe7 0x34 0x53 0x00 0x65 0xbc
-#CHECK: vgfmag %v31, %v31, %v31, %v31
+# CHECK: vgfmag %v31, %v31, %v31, %v31
0xe7 0xff 0xf3 0x00 0xff 0xbc
-#CHECK: vgfmah %v0, %v0, %v0, %v0
+# CHECK: vgfmah %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xbc
-#CHECK: vgfmah %v3, %v20, %v5, %v22
+# CHECK: vgfmah %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xbc
-#CHECK: vgfmah %v31, %v31, %v31, %v31
+# CHECK: vgfmah %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xbc
-#CHECK: vgfm %v0, %v0, %v0, 11
-0xe7 0x00 0x00 0x00 0xb0 0xb4
-
-#CHECK: vgfm %v18, %v3, %v20, 11
-0xe7 0x23 0x40 0x00 0xba 0xb4
-
-#CHECK: vgfm %v31, %v31, %v31, 11
-0xe7 0xff 0xf0 0x00 0xbe 0xb4
-
-#CHECK: vgfmb %v0, %v0, %v0
+# CHECK: vgfmb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xb4
-#CHECK: vgfmb %v18, %v3, %v20
+# CHECK: vgfmb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xb4
-#CHECK: vgfmb %v31, %v31, %v31
+# CHECK: vgfmb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xb4
-#CHECK: vgfmf %v0, %v0, %v0
+# CHECK: vgfmf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xb4
-#CHECK: vgfmf %v18, %v3, %v20
+# CHECK: vgfmf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xb4
-#CHECK: vgfmf %v31, %v31, %v31
+# CHECK: vgfmf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xb4
-#CHECK: vgfmg %v0, %v0, %v0
+# CHECK: vgfmg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xb4
-#CHECK: vgfmg %v18, %v3, %v20
+# CHECK: vgfmg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xb4
-#CHECK: vgfmg %v31, %v31, %v31
+# CHECK: vgfmg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xb4
-#CHECK: vgfmh %v0, %v0, %v0
+# CHECK: vgfmh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xb4
-#CHECK: vgfmh %v18, %v3, %v20
+# CHECK: vgfmh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xb4
-#CHECK: vgfmh %v31, %v31, %v31
+# CHECK: vgfmh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xb4
-#CHECK: vgm %v0, 0, 0, 11
+# CHECK: vgm %v0, 0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x46
-#CHECK: vgm %v22, 55, 66, 11
+# CHECK: vgm %v22, 55, 66, 11
0xe7 0x60 0x37 0x42 0xb8 0x46
-#CHECK: vgm %v31, 255, 255, 11
+# CHECK: vgm %v31, 255, 255, 11
0xe7 0xf0 0xff 0xff 0xb8 0x46
-#CHECK: vgmb %v0, 0, 0
+# CHECK: vgmb %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x46
-#CHECK: vgmb %v22, 55, 66
+# CHECK: vgmb %v22, 55, 66
0xe7 0x60 0x37 0x42 0x08 0x46
-#CHECK: vgmb %v31, 255, 255
+# CHECK: vgmb %v31, 255, 255
0xe7 0xf0 0xff 0xff 0x08 0x46
-#CHECK: vgmf %v0, 0, 0
+# CHECK: vgmf %v0, 0, 0
0xe7 0x00 0x00 0x00 0x20 0x46
-#CHECK: vgmf %v22, 55, 66
+# CHECK: vgmf %v22, 55, 66
0xe7 0x60 0x37 0x42 0x28 0x46
-#CHECK: vgmf %v31, 255, 255
+# CHECK: vgmf %v31, 255, 255
0xe7 0xf0 0xff 0xff 0x28 0x46
-#CHECK: vgmg %v0, 0, 0
+# CHECK: vgmg %v0, 0, 0
0xe7 0x00 0x00 0x00 0x30 0x46
-#CHECK: vgmg %v22, 55, 66
+# CHECK: vgmg %v22, 55, 66
0xe7 0x60 0x37 0x42 0x38 0x46
-#CHECK: vgmg %v31, 255, 255
+# CHECK: vgmg %v31, 255, 255
0xe7 0xf0 0xff 0xff 0x38 0x46
-#CHECK: vgmh %v0, 0, 0
+# CHECK: vgmh %v0, 0, 0
0xe7 0x00 0x00 0x00 0x10 0x46
-#CHECK: vgmh %v22, 55, 66
+# CHECK: vgmh %v22, 55, 66
0xe7 0x60 0x37 0x42 0x18 0x46
-#CHECK: vgmh %v31, 255, 255
+# CHECK: vgmh %v31, 255, 255
0xe7 0xf0 0xff 0xff 0x18 0x46
-#CHECK: vl %v0, 0
+# CHECK: vistr %v0, %v0, 11, 0
+0xe7 0x00 0x00 0x00 0xb0 0x5c
+
+# CHECK: vistr %v0, %v0, 11, 12
+0xe7 0x00 0x00 0xc0 0xb0 0x5c
+
+# CHECK: vistr %v18, %v3, 11, 0
+0xe7 0x23 0x00 0x00 0xb8 0x5c
+
+# CHECK: vistr %v31, %v31, 11, 0
+0xe7 0xff 0x00 0x00 0xbc 0x5c
+
+# CHECK: vistrb %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x00 0x5c
+
+# CHECK: vistrb %v0, %v0, 12
+0xe7 0x00 0x00 0xc0 0x00 0x5c
+
+# CHECK: vistrb %v18, %v3, 0
+0xe7 0x23 0x00 0x00 0x08 0x5c
+
+# CHECK: vistrb %v31, %v31, 0
+0xe7 0xff 0x00 0x00 0x0c 0x5c
+
+# CHECK: vistrbs %v7, %v24
+0xe7 0x78 0x00 0x10 0x04 0x5c
+
+# CHECK: vistrf %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x20 0x5c
+
+# CHECK: vistrf %v0, %v0, 12
+0xe7 0x00 0x00 0xc0 0x20 0x5c
+
+# CHECK: vistrf %v18, %v3, 0
+0xe7 0x23 0x00 0x00 0x28 0x5c
+
+# CHECK: vistrf %v31, %v31, 0
+0xe7 0xff 0x00 0x00 0x2c 0x5c
+
+# CHECK: vistrfs %v7, %v24
+0xe7 0x78 0x00 0x10 0x24 0x5c
+
+# CHECK: vistrh %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x10 0x5c
+
+# CHECK: vistrh %v0, %v0, 12
+0xe7 0x00 0x00 0xc0 0x10 0x5c
+
+# CHECK: vistrh %v18, %v3, 0
+0xe7 0x23 0x00 0x00 0x18 0x5c
+
+# CHECK: vistrh %v31, %v31, 0
+0xe7 0xff 0x00 0x00 0x1c 0x5c
+
+# CHECK: vistrhs %v7, %v24
+0xe7 0x78 0x00 0x10 0x14 0x5c
+
+# CHECK: vl %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x06
-#CHECK: vl %v17, 2475(%r7,%r8)
+# CHECK: vl %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x08 0x06
-#CHECK: vl %v31, 4095(%r15,%r15)
+# CHECK: vl %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x08 0x06
-#CHECK: vlbb %v0, 0, 0
+# CHECK: vlbb %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x07
-#CHECK: vlbb %v17, 2475(%r7,%r8), 12
+# CHECK: vlbb %v17, 2475(%r7,%r8), 12
0xe7 0x17 0x89 0xab 0xc8 0x07
-#CHECK: vlbb %v31, 4095(%r15,%r15), 15
+# CHECK: vlbb %v31, 4095(%r15,%r15), 15
0xe7 0xff 0xff 0xff 0xf8 0x07
-#CHECK: vlc %v0, %v0, 11
+# CHECK: vlc %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xde
-#CHECK: vlc %v19, %v14, 11
+# CHECK: vlc %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0xde
-#CHECK: vlc %v31, %v31, 11
+# CHECK: vlc %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0xde
-#CHECK: vlcb %v0, %v0
+# CHECK: vlcb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xde
-#CHECK: vlcb %v19, %v14
+# CHECK: vlcb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xde
-#CHECK: vlcb %v31, %v31
+# CHECK: vlcb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xde
-#CHECK: vlcf %v0, %v0
+# CHECK: vlcf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xde
-#CHECK: vlcf %v19, %v14
+# CHECK: vlcf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xde
-#CHECK: vlcf %v31, %v31
+# CHECK: vlcf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xde
-#CHECK: vlcg %v0, %v0
+# CHECK: vlcg %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xde
-#CHECK: vlcg %v19, %v14
+# CHECK: vlcg %v19, %v14
0xe7 0x3e 0x00 0x00 0x38 0xde
-#CHECK: vlcg %v31, %v31
+# CHECK: vlcg %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0xde
-#CHECK: vlch %v0, %v0
+# CHECK: vlch %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xde
-#CHECK: vlch %v19, %v14
+# CHECK: vlch %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xde
-#CHECK: vlch %v31, %v31
+# CHECK: vlch %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xde
-#CHECK: vlde %v0, %v0, 11, 9
+# CHECK: vlde %v0, %v0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xc4
-#CHECK: vlde %v19, %v14, 11, 9
+# CHECK: vlde %v19, %v14, 11, 9
0xe7 0x3e 0x00 0x09 0xb8 0xc4
-#CHECK: vlde %v31, %v31, 11, 9
+# CHECK: vlde %v31, %v31, 11, 9
0xe7 0xff 0x00 0x09 0xbc 0xc4
-#CHECK: vldeb %v0, %v0
+# CHECK: vldeb %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xc4
-#CHECK: vldeb %v19, %v14
+# CHECK: vldeb %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xc4
-#CHECK: vldeb %v31, %v31
+# CHECK: vldeb %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xc4
-#CHECK: vleb %v0, 0, 0
+# CHECK: vleb %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x00
-#CHECK: vleb %v17, 2475(%r7,%r8), 12
+# CHECK: vleb %v17, 2475(%r7,%r8), 12
0xe7 0x17 0x89 0xab 0xc8 0x00
-#CHECK: vleb %v31, 4095(%r15,%r15), 15
+# CHECK: vleb %v31, 4095(%r15,%r15), 15
0xe7 0xff 0xff 0xff 0xf8 0x00
-#CHECK: vled %v0, %v0, 11, 0, 0
+# CHECK: vled %v0, %v0, 11, 0, 0
0xe7 0x00 0x00 0x00 0xb0 0xc5
-#CHECK: vled %v19, %v14, 11, 4, 10
+# CHECK: vled %v19, %v14, 11, 4, 10
0xe7 0x3e 0x00 0xa4 0xb8 0xc5
-#CHECK: vled %v31, %v31, 11, 7, 15
+# CHECK: vled %v31, %v31, 11, 7, 15
0xe7 0xff 0x00 0xf7 0xbc 0xc5
-#CHECK: vledb %v0, %v0, 0, 0
+# CHECK: vledb %v0, %v0, 0, 0
0xe7 0x00 0x00 0x00 0x30 0xc5
-#CHECK: vledb %v19, %v14, 4, 10
+# CHECK: vledb %v19, %v14, 4, 10
0xe7 0x3e 0x00 0xa4 0x38 0xc5
-#CHECK: vledb %v31, %v31, 7, 15
+# CHECK: vledb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xf7 0x3c 0xc5
-#CHECK: vlef %v0, 0, 0
+# CHECK: vlef %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x03
-#CHECK: vlef %v17, 2475(%r7,%r8), 2
+# CHECK: vlef %v17, 2475(%r7,%r8), 2
0xe7 0x17 0x89 0xab 0x28 0x03
-#CHECK: vlef %v31, 4095(%r15,%r15), 3
+# CHECK: vlef %v31, 4095(%r15,%r15), 3
0xe7 0xff 0xff 0xff 0x38 0x03
-#CHECK: vleg %v0, 0, 0
+# CHECK: vleg %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x02
-#CHECK: vleg %v17, 2475(%r7,%r8), 1
+# CHECK: vleg %v17, 2475(%r7,%r8), 1
0xe7 0x17 0x89 0xab 0x18 0x02
-#CHECK: vleg %v31, 4095(%r15,%r15), 1
+# CHECK: vleg %v31, 4095(%r15,%r15), 1
0xe7 0xff 0xff 0xff 0x18 0x02
-#CHECK: vleh %v0, 0, 0
+# CHECK: vleh %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x01
-#CHECK: vleh %v17, 2475(%r7,%r8), 5
+# CHECK: vleh %v17, 2475(%r7,%r8), 5
0xe7 0x17 0x89 0xab 0x58 0x01
-#CHECK: vleh %v31, 4095(%r15,%r15), 7
+# CHECK: vleh %v31, 4095(%r15,%r15), 7
0xe7 0xff 0xff 0xff 0x78 0x01
-#CHECK: vleib %v0, 0, 0
+# CHECK: vleib %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x40
-#CHECK: vleib %v23, -30293, 12
+# CHECK: vleib %v23, -30293, 12
0xe7 0x70 0x89 0xab 0xc8 0x40
-#CHECK: vleib %v31, -1, 15
+# CHECK: vleib %v31, -1, 15
0xe7 0xf0 0xff 0xff 0xf8 0x40
-#CHECK: vleif %v0, 0, 0
+# CHECK: vleif %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x43
-#CHECK: vleif %v23, -30293, 2
+# CHECK: vleif %v23, -30293, 2
0xe7 0x70 0x89 0xab 0x28 0x43
-#CHECK: vleif %v31, -1, 3
+# CHECK: vleif %v31, -1, 3
0xe7 0xf0 0xff 0xff 0x38 0x43
-#CHECK: vleig %v0, 0, 0
+# CHECK: vleig %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x42
-#CHECK: vleig %v23, -30293, 1
+# CHECK: vleig %v23, -30293, 1
0xe7 0x70 0x89 0xab 0x18 0x42
-#CHECK: vleig %v31, -1, 1
+# CHECK: vleig %v31, -1, 1
0xe7 0xf0 0xff 0xff 0x18 0x42
-#CHECK: vleih %v0, 0, 0
+# CHECK: vleih %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x41
-#CHECK: vleih %v23, -30293, 5
+# CHECK: vleih %v23, -30293, 5
0xe7 0x70 0x89 0xab 0x58 0x41
-#CHECK: vleih %v31, -1, 7
+# CHECK: vleih %v31, -1, 7
0xe7 0xf0 0xff 0xff 0x78 0x41
-#CHECK: vfpso %v0, %v0, 11, 9, 7
-0xe7 0x00 0x00 0x79 0xb0 0xcc
-
-#CHECK: vfpso %v19, %v14, 11, 9, 7
-0xe7 0x3e 0x00 0x79 0xb8 0xcc
-
-#CHECK: vfpso %v31, %v31, 11, 9, 7
-0xe7 0xff 0x00 0x79 0xbc 0xcc
-
-#CHECK: vfpsodb %v0, %v0, 7
-0xe7 0x00 0x00 0x70 0x30 0xcc
-
-#CHECK: vfpsodb %v19, %v14, 7
-0xe7 0x3e 0x00 0x70 0x38 0xcc
-
-#CHECK: vfpsodb %v31, %v31, 7
-0xe7 0xff 0x00 0x70 0x3c 0xcc
-
-#CHECK: vflcdb %v0, %v0
-0xe7 0x00 0x00 0x00 0x30 0xcc
-
-#CHECK: vflcdb %v19, %v14
-0xe7 0x3e 0x00 0x00 0x38 0xcc
-
-#CHECK: vflcdb %v31, %v31
-0xe7 0xff 0x00 0x00 0x3c 0xcc
-
-#CHECK: vflndb %v0, %v0
-0xe7 0x00 0x00 0x10 0x30 0xcc
-
-#CHECK: vflndb %v19, %v14
-0xe7 0x3e 0x00 0x10 0x38 0xcc
-
-#CHECK: vflndb %v31, %v31
-0xe7 0xff 0x00 0x10 0x3c 0xcc
-
-#CHECK: vflpdb %v0, %v0
-0xe7 0x00 0x00 0x20 0x30 0xcc
-
-#CHECK: vflpdb %v19, %v14
-0xe7 0x3e 0x00 0x20 0x38 0xcc
-
-#CHECK: vflpdb %v31, %v31
-0xe7 0xff 0x00 0x20 0x3c 0xcc
-
-#CHECK: vlgv %r0, %v0, 0, 11
+# CHECK: vlgv %r0, %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x21
-#CHECK: vlgv %r2, %v19, 1383(%r4), 11
+# CHECK: vlgv %r2, %v19, 1383(%r4), 11
0xe7 0x23 0x45 0x67 0xb4 0x21
-#CHECK: vlgv %r15, %v31, 4095(%r15), 11
+# CHECK: vlgv %r15, %v31, 4095(%r15), 11
0xe7 0xff 0xff 0xff 0xb4 0x21
-#CHECK: vlgvb %r0, %v0, 0
+# CHECK: vlgvb %r0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x21
-#CHECK: vlgvb %r2, %v19, 1383(%r4)
+# CHECK: vlgvb %r2, %v19, 1383(%r4)
0xe7 0x23 0x45 0x67 0x04 0x21
-#CHECK: vlgvb %r15, %v31, 4095(%r15)
+# CHECK: vlgvb %r15, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x04 0x21
-#CHECK: vlgvf %r0, %v0, 0
+# CHECK: vlgvf %r0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x21
-#CHECK: vlgvf %r2, %v19, 1383(%r4)
+# CHECK: vlgvf %r2, %v19, 1383(%r4)
0xe7 0x23 0x45 0x67 0x24 0x21
-#CHECK: vlgvf %r15, %v31, 4095(%r15)
+# CHECK: vlgvf %r15, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x24 0x21
-#CHECK: vlgvg %r0, %v0, 0
+# CHECK: vlgvg %r0, %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x21
-#CHECK: vlgvg %r2, %v19, 1383(%r4)
+# CHECK: vlgvg %r2, %v19, 1383(%r4)
0xe7 0x23 0x45 0x67 0x34 0x21
-#CHECK: vlgvg %r15, %v31, 4095(%r15)
+# CHECK: vlgvg %r15, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x34 0x21
-#CHECK: vlgvh %r0, %v0, 0
+# CHECK: vlgvh %r0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x21
-#CHECK: vlgvh %r2, %v19, 1383(%r4)
+# CHECK: vlgvh %r2, %v19, 1383(%r4)
0xe7 0x23 0x45 0x67 0x14 0x21
-#CHECK: vlgvh %r15, %v31, 4095(%r15)
+# CHECK: vlgvh %r15, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x14 0x21
-#CHECK: vfsq %v0, %v0, 11, 9
-0xe7 0x00 0x00 0x09 0xb0 0xce
-
-#CHECK: vfsq %v19, %v14, 11, 9
-0xe7 0x3e 0x00 0x09 0xb8 0xce
-
-#CHECK: vfsq %v31, %v31, 11, 9
-0xe7 0xff 0x00 0x09 0xbc 0xce
-
-#CHECK: vfsqdb %v0, %v0
-0xe7 0x00 0x00 0x00 0x30 0xce
-
-#CHECK: vfsqdb %v19, %v14
-0xe7 0x3e 0x00 0x00 0x38 0xce
-
-#CHECK: vfsqdb %v31, %v31
-0xe7 0xff 0x00 0x00 0x3c 0xce
-
-#CHECK: vftci %v0, %v0, 0, 11, 9
-0xe7 0x00 0x00 0x09 0xb0 0x4a
-
-#CHECK: vftci %v19, %v4, 1383, 11, 9
-0xe7 0x34 0x56 0x79 0xb8 0x4a
-
-#CHECK: vftci %v31, %v31, 4095, 11, 9
-0xe7 0xff 0xff 0xf9 0xbc 0x4a
-
-#CHECK: vftcidb %v0, %v0, 0
-0xe7 0x00 0x00 0x00 0x30 0x4a
-
-#CHECK: vftcidb %v19, %v4, 1383
-0xe7 0x34 0x56 0x70 0x38 0x4a
-
-#CHECK: vftcidb %v31, %v31, 4095
-0xe7 0xff 0xff 0xf0 0x3c 0x4a
-
-#CHECK: vll %v0, %r0, 0
+# CHECK: vll %v0, %r0, 0
0xe7 0x00 0x00 0x00 0x00 0x37
-#CHECK: vll %v18, %r3, 1383(%r4)
+# CHECK: vll %v18, %r3, 1383(%r4)
0xe7 0x23 0x45 0x67 0x08 0x37
-#CHECK: vll %v31, %r15, 4095(%r15)
+# CHECK: vll %v31, %r15, 4095(%r15)
0xe7 0xff 0xff 0xff 0x08 0x37
-#CHECK: vllez %v0, 0, 11
+# CHECK: vllez %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x04
-#CHECK: vllez %v17, 2475(%r7,%r8), 11
+# CHECK: vllez %v17, 2475(%r7,%r8), 11
0xe7 0x17 0x89 0xab 0xb8 0x04
-#CHECK: vllez %v31, 4095(%r15,%r15), 11
+# CHECK: vllez %v31, 4095(%r15,%r15), 11
0xe7 0xff 0xff 0xff 0xb8 0x04
-#CHECK: vllezb %v0, 0
+# CHECK: vllezb %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x04
-#CHECK: vllezb %v17, 2475(%r7,%r8)
+# CHECK: vllezb %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x08 0x04
-#CHECK: vllezb %v31, 4095(%r15,%r15)
+# CHECK: vllezb %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x08 0x04
-#CHECK: vllezf %v0, 0
+# CHECK: vllezf %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x04
-#CHECK: vllezf %v17, 2475(%r7,%r8)
+# CHECK: vllezf %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x28 0x04
-#CHECK: vllezf %v31, 4095(%r15,%r15)
+# CHECK: vllezf %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x28 0x04
-#CHECK: vllezg %v0, 0
+# CHECK: vllezg %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x04
-#CHECK: vllezg %v17, 2475(%r7,%r8)
+# CHECK: vllezg %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x38 0x04
-#CHECK: vllezg %v31, 4095(%r15,%r15)
+# CHECK: vllezg %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x38 0x04
-#CHECK: vllezh %v0, 0
+# CHECK: vllezh %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x04
-#CHECK: vllezh %v17, 2475(%r7,%r8)
+# CHECK: vllezh %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x18 0x04
-#CHECK: vllezh %v31, 4095(%r15,%r15)
+# CHECK: vllezh %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x18 0x04
-#CHECK: vlm %v0, %v0, 0
+# CHECK: vlm %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x36
-#CHECK: vlm %v12, %v18, 1110(%r3)
+# CHECK: vlm %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x04 0x36
-#CHECK: vlm %v31, %v31, 4095(%r15)
+# CHECK: vlm %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x0c 0x36
-#CHECK: vlp %v0, %v0, 11
+# CHECK: vlp %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xdf
-#CHECK: vlp %v19, %v14, 11
+# CHECK: vlp %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0xdf
-#CHECK: vlp %v31, %v31, 11
+# CHECK: vlp %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0xdf
-#CHECK: vlpb %v0, %v0
+# CHECK: vlpb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xdf
-#CHECK: vlpb %v19, %v14
+# CHECK: vlpb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xdf
-#CHECK: vlpb %v31, %v31
+# CHECK: vlpb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xdf
-#CHECK: vlpf %v0, %v0
+# CHECK: vlpf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xdf
-#CHECK: vlpf %v19, %v14
+# CHECK: vlpf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xdf
-#CHECK: vlpf %v31, %v31
+# CHECK: vlpf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xdf
-#CHECK: vlpg %v0, %v0
+# CHECK: vlpg %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xdf
-#CHECK: vlpg %v19, %v14
+# CHECK: vlpg %v19, %v14
0xe7 0x3e 0x00 0x00 0x38 0xdf
-#CHECK: vlpg %v31, %v31
+# CHECK: vlpg %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0xdf
-#CHECK: vlph %v0, %v0
+# CHECK: vlph %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xdf
-#CHECK: vlph %v19, %v14
+# CHECK: vlph %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xdf
-#CHECK: vlph %v31, %v31
+# CHECK: vlph %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xdf
-#CHECK: vlr %v0, %v0
+# CHECK: vlr %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x56
-#CHECK: vlr %v19, %v14
+# CHECK: vlr %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0x56
-#CHECK: vlr %v31, %v31
+# CHECK: vlr %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0x56
-#CHECK: vlrep %v0, 0, 11
+# CHECK: vlrep %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x05
-#CHECK: vlrep %v17, 2475(%r7,%r8), 11
+# CHECK: vlrep %v17, 2475(%r7,%r8), 11
0xe7 0x17 0x89 0xab 0xb8 0x05
-#CHECK: vlrep %v31, 4095(%r15,%r15), 11
+# CHECK: vlrep %v31, 4095(%r15,%r15), 11
0xe7 0xff 0xff 0xff 0xb8 0x05
-#CHECK: vlrepb %v0, 0
+# CHECK: vlrepb %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x05
-#CHECK: vlrepb %v17, 2475(%r7,%r8)
+# CHECK: vlrepb %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x08 0x05
-#CHECK: vlrepb %v31, 4095(%r15,%r15)
+# CHECK: vlrepb %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x08 0x05
-#CHECK: vlrepf %v0, 0
+# CHECK: vlrepf %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x05
-#CHECK: vlrepf %v17, 2475(%r7,%r8)
+# CHECK: vlrepf %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x28 0x05
-#CHECK: vlrepf %v31, 4095(%r15,%r15)
+# CHECK: vlrepf %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x28 0x05
-#CHECK: vlrepg %v0, 0
+# CHECK: vlrepg %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x05
-#CHECK: vlrepg %v17, 2475(%r7,%r8)
+# CHECK: vlrepg %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x38 0x05
-#CHECK: vlrepg %v31, 4095(%r15,%r15)
+# CHECK: vlrepg %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x38 0x05
-#CHECK: vlreph %v0, 0
+# CHECK: vlreph %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x05
-#CHECK: vlreph %v17, 2475(%r7,%r8)
+# CHECK: vlreph %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x18 0x05
-#CHECK: vlreph %v31, 4095(%r15,%r15)
+# CHECK: vlreph %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x18 0x05
-#CHECK: vlvg %v0, %r0, 0, 11
+# CHECK: vlvg %v0, %r0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x22
-#CHECK: vlvg %v18, %r3, 1383(%r4), 11
+# CHECK: vlvg %v18, %r3, 1383(%r4), 11
0xe7 0x23 0x45 0x67 0xb8 0x22
-#CHECK: vlvg %v31, %r15, 4095(%r15), 11
+# CHECK: vlvg %v31, %r15, 4095(%r15), 11
0xe7 0xff 0xff 0xff 0xb8 0x22
-#CHECK: vlvgb %v0, %r0, 0
+# CHECK: vlvgb %v0, %r0, 0
0xe7 0x00 0x00 0x00 0x00 0x22
-#CHECK: vlvgb %v18, %r3, 1383(%r4)
+# CHECK: vlvgb %v18, %r3, 1383(%r4)
0xe7 0x23 0x45 0x67 0x08 0x22
-#CHECK: vlvgb %v31, %r15, 4095(%r15)
+# CHECK: vlvgb %v31, %r15, 4095(%r15)
0xe7 0xff 0xff 0xff 0x08 0x22
-#CHECK: vlvgf %v0, %r0, 0
+# CHECK: vlvgf %v0, %r0, 0
0xe7 0x00 0x00 0x00 0x20 0x22
-#CHECK: vlvgf %v18, %r3, 1383(%r4)
+# CHECK: vlvgf %v18, %r3, 1383(%r4)
0xe7 0x23 0x45 0x67 0x28 0x22
-#CHECK: vlvgf %v31, %r15, 4095(%r15)
+# CHECK: vlvgf %v31, %r15, 4095(%r15)
0xe7 0xff 0xff 0xff 0x28 0x22
-#CHECK: vlvgg %v0, %r0, 0
+# CHECK: vlvgg %v0, %r0, 0
0xe7 0x00 0x00 0x00 0x30 0x22
-#CHECK: vlvgg %v18, %r3, 1383(%r4)
+# CHECK: vlvgg %v18, %r3, 1383(%r4)
0xe7 0x23 0x45 0x67 0x38 0x22
-#CHECK: vlvgg %v31, %r15, 4095(%r15)
+# CHECK: vlvgg %v31, %r15, 4095(%r15)
0xe7 0xff 0xff 0xff 0x38 0x22
-#CHECK: vlvgh %v0, %r0, 0
+# CHECK: vlvgh %v0, %r0, 0
0xe7 0x00 0x00 0x00 0x10 0x22
-#CHECK: vlvgh %v18, %r3, 1383(%r4)
+# CHECK: vlvgh %v18, %r3, 1383(%r4)
0xe7 0x23 0x45 0x67 0x18 0x22
-#CHECK: vlvgh %v31, %r15, 4095(%r15)
+# CHECK: vlvgh %v31, %r15, 4095(%r15)
0xe7 0xff 0xff 0xff 0x18 0x22
-#CHECK: vlvgp %v0, %r0, %r0
+# CHECK: vlvgp %v0, %r0, %r0
0xe7 0x00 0x00 0x00 0x00 0x62
-#CHECK: vlvgp %v18, %r3, %r4
+# CHECK: vlvgp %v18, %r3, %r4
0xe7 0x23 0x40 0x00 0x08 0x62
-#CHECK: vlvgp %v31, %r15, %r15
+# CHECK: vlvgp %v31, %r15, %r15
0xe7 0xff 0xf0 0x00 0x08 0x62
-#CHECK: vmae %v0, %v0, %v0, %v0, 11
+# CHECK: vmae %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xae
-#CHECK: vmae %v3, %v20, %v5, %v22, 11
+# CHECK: vmae %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xae
-#CHECK: vmae %v31, %v31, %v31, %v31, 11
+# CHECK: vmae %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xae
-#CHECK: vmaeb %v0, %v0, %v0, %v0
+# CHECK: vmaeb %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xae
-#CHECK: vmaeb %v3, %v20, %v5, %v22
+# CHECK: vmaeb %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xae
-#CHECK: vmaeb %v31, %v31, %v31, %v31
+# CHECK: vmaeb %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xae
-#CHECK: vmaef %v0, %v0, %v0, %v0
+# CHECK: vmaef %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xae
-#CHECK: vmaef %v3, %v20, %v5, %v22
+# CHECK: vmaef %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xae
-#CHECK: vmaef %v31, %v31, %v31, %v31
+# CHECK: vmaef %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xae
-#CHECK: vmaeh %v0, %v0, %v0, %v0
+# CHECK: vmaeh %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xae
-#CHECK: vmaeh %v3, %v20, %v5, %v22
+# CHECK: vmaeh %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xae
-#CHECK: vmaeh %v31, %v31, %v31, %v31
+# CHECK: vmaeh %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xae
-#CHECK: vmah %v0, %v0, %v0, %v0, 11
+# CHECK: vmah %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xab
-#CHECK: vmah %v3, %v20, %v5, %v22, 11
+# CHECK: vmah %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xab
-#CHECK: vmah %v31, %v31, %v31, %v31, 11
+# CHECK: vmah %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xab
-#CHECK: vmahb %v0, %v0, %v0, %v0
+# CHECK: vmahb %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xab
-#CHECK: vmahb %v3, %v20, %v5, %v22
+# CHECK: vmahb %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xab
-#CHECK: vmahb %v31, %v31, %v31, %v31
+# CHECK: vmahb %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xab
-#CHECK: vmahf %v0, %v0, %v0, %v0
+# CHECK: vmahf %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xab
-#CHECK: vmahf %v3, %v20, %v5, %v22
+# CHECK: vmahf %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xab
-#CHECK: vmahf %v31, %v31, %v31, %v31
+# CHECK: vmahf %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xab
-#CHECK: vmahh %v0, %v0, %v0, %v0
+# CHECK: vmahh %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xab
-#CHECK: vmahh %v3, %v20, %v5, %v22
+# CHECK: vmahh %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xab
-#CHECK: vmahh %v31, %v31, %v31, %v31
+# CHECK: vmahh %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xab
-#CHECK: vmal %v0, %v0, %v0, %v0, 11
+# CHECK: vmal %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xaa
-#CHECK: vmal %v3, %v20, %v5, %v22, 11
+# CHECK: vmal %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xaa
-#CHECK: vmal %v31, %v31, %v31, %v31, 11
+# CHECK: vmal %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xaa
-#CHECK: vmalb %v0, %v0, %v0, %v0
+# CHECK: vmalb %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xaa
-#CHECK: vmalb %v3, %v20, %v5, %v22
+# CHECK: vmalb %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xaa
-#CHECK: vmalb %v31, %v31, %v31, %v31
+# CHECK: vmalb %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xaa
-#CHECK: vmale %v0, %v0, %v0, %v0, 11
+# CHECK: vmale %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xac
-#CHECK: vmale %v3, %v20, %v5, %v22, 11
+# CHECK: vmale %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xac
-#CHECK: vmale %v31, %v31, %v31, %v31, 11
+# CHECK: vmale %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xac
-#CHECK: vmaleb %v0, %v0, %v0, %v0
+# CHECK: vmaleb %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xac
-#CHECK: vmaleb %v3, %v20, %v5, %v22
+# CHECK: vmaleb %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xac
-#CHECK: vmaleb %v31, %v31, %v31, %v31
+# CHECK: vmaleb %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xac
-#CHECK: vmalef %v0, %v0, %v0, %v0
+# CHECK: vmalef %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xac
-#CHECK: vmalef %v3, %v20, %v5, %v22
+# CHECK: vmalef %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xac
-#CHECK: vmalef %v31, %v31, %v31, %v31
+# CHECK: vmalef %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xac
-#CHECK: vmaleh %v0, %v0, %v0, %v0
+# CHECK: vmaleh %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xac
-#CHECK: vmaleh %v3, %v20, %v5, %v22
+# CHECK: vmaleh %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xac
-#CHECK: vmaleh %v31, %v31, %v31, %v31
+# CHECK: vmaleh %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xac
-#CHECK: vmalf %v0, %v0, %v0, %v0
+# CHECK: vmalf %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xaa
-#CHECK: vmalf %v3, %v20, %v5, %v22
+# CHECK: vmalf %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xaa
-#CHECK: vmalf %v31, %v31, %v31, %v31
+# CHECK: vmalf %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xaa
-#CHECK: vmalh %v0, %v0, %v0, %v0, 11
+# CHECK: vmalh %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xa9
-#CHECK: vmalh %v3, %v20, %v5, %v22, 11
+# CHECK: vmalh %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xa9
-#CHECK: vmalh %v31, %v31, %v31, %v31, 11
+# CHECK: vmalh %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xa9
-#CHECK: vmalhb %v0, %v0, %v0, %v0
+# CHECK: vmalhb %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa9
-#CHECK: vmalhb %v3, %v20, %v5, %v22
+# CHECK: vmalhb %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xa9
-#CHECK: vmalhb %v31, %v31, %v31, %v31
+# CHECK: vmalhb %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xa9
-#CHECK: vmalhf %v0, %v0, %v0, %v0
+# CHECK: vmalhf %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xa9
-#CHECK: vmalhf %v3, %v20, %v5, %v22
+# CHECK: vmalhf %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xa9
-#CHECK: vmalhf %v31, %v31, %v31, %v31
+# CHECK: vmalhf %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xa9
-#CHECK: vmalhh %v0, %v0, %v0, %v0
+# CHECK: vmalhh %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xa9
-#CHECK: vmalhh %v3, %v20, %v5, %v22
+# CHECK: vmalhh %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xa9
-#CHECK: vmalhh %v31, %v31, %v31, %v31
+# CHECK: vmalhh %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xa9
-#CHECK: vmalhw %v0, %v0, %v0, %v0
+# CHECK: vmalhw %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xaa
-#CHECK: vmalhw %v3, %v20, %v5, %v22
+# CHECK: vmalhw %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xaa
-#CHECK: vmalhw %v31, %v31, %v31, %v31
+# CHECK: vmalhw %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xaa
-#CHECK: vmalo %v0, %v0, %v0, %v0, 11
+# CHECK: vmalo %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xad
-#CHECK: vmalo %v3, %v20, %v5, %v22, 11
+# CHECK: vmalo %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xad
-#CHECK: vmalo %v31, %v31, %v31, %v31, 11
+# CHECK: vmalo %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xad
-#CHECK: vmalob %v0, %v0, %v0, %v0
+# CHECK: vmalob %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xad
-#CHECK: vmalob %v3, %v20, %v5, %v22
+# CHECK: vmalob %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xad
-#CHECK: vmalob %v31, %v31, %v31, %v31
+# CHECK: vmalob %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xad
-#CHECK: vmalof %v0, %v0, %v0, %v0
+# CHECK: vmalof %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xad
-#CHECK: vmalof %v3, %v20, %v5, %v22
+# CHECK: vmalof %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xad
-#CHECK: vmalof %v31, %v31, %v31, %v31
+# CHECK: vmalof %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xad
-#CHECK: vmaloh %v0, %v0, %v0, %v0
+# CHECK: vmaloh %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xad
-#CHECK: vmaloh %v3, %v20, %v5, %v22
+# CHECK: vmaloh %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xad
-#CHECK: vmaloh %v31, %v31, %v31, %v31
+# CHECK: vmaloh %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xad
-#CHECK: vmao %v0, %v0, %v0, %v0, 11
+# CHECK: vmao %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xaf
-#CHECK: vmao %v3, %v20, %v5, %v22, 11
+# CHECK: vmao %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xaf
-#CHECK: vmao %v31, %v31, %v31, %v31, 11
+# CHECK: vmao %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xaf
-#CHECK: vmaob %v0, %v0, %v0, %v0
+# CHECK: vmaob %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xaf
-#CHECK: vmaob %v3, %v20, %v5, %v22
+# CHECK: vmaob %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0xaf
-#CHECK: vmaob %v31, %v31, %v31, %v31
+# CHECK: vmaob %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0xaf
-#CHECK: vmaof %v0, %v0, %v0, %v0
+# CHECK: vmaof %v0, %v0, %v0, %v0
0xe7 0x00 0x02 0x00 0x00 0xaf
-#CHECK: vmaof %v3, %v20, %v5, %v22
+# CHECK: vmaof %v3, %v20, %v5, %v22
0xe7 0x34 0x52 0x00 0x65 0xaf
-#CHECK: vmaof %v31, %v31, %v31, %v31
+# CHECK: vmaof %v31, %v31, %v31, %v31
0xe7 0xff 0xf2 0x00 0xff 0xaf
-#CHECK: vmaoh %v0, %v0, %v0, %v0
+# CHECK: vmaoh %v0, %v0, %v0, %v0
0xe7 0x00 0x01 0x00 0x00 0xaf
-#CHECK: vmaoh %v3, %v20, %v5, %v22
+# CHECK: vmaoh %v3, %v20, %v5, %v22
0xe7 0x34 0x51 0x00 0x65 0xaf
-#CHECK: vmaoh %v31, %v31, %v31, %v31
+# CHECK: vmaoh %v31, %v31, %v31, %v31
0xe7 0xff 0xf1 0x00 0xff 0xaf
-#CHECK: vme %v0, %v0, %v0, 11
+# CHECK: vme %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xa6
-#CHECK: vme %v18, %v3, %v20, 11
+# CHECK: vme %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xa6
-#CHECK: vme %v31, %v31, %v31, 11
+# CHECK: vme %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xa6
-#CHECK: vmeb %v0, %v0, %v0
+# CHECK: vmeb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa6
-#CHECK: vmeb %v18, %v3, %v20
+# CHECK: vmeb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xa6
-#CHECK: vmeb %v31, %v31, %v31
+# CHECK: vmeb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xa6
-#CHECK: vmef %v0, %v0, %v0
+# CHECK: vmef %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xa6
-#CHECK: vmef %v18, %v3, %v20
+# CHECK: vmef %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xa6
-#CHECK: vmef %v31, %v31, %v31
+# CHECK: vmef %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xa6
-#CHECK: vmeh %v0, %v0, %v0
+# CHECK: vmeh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xa6
-#CHECK: vmeh %v18, %v3, %v20
+# CHECK: vmeh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xa6
-#CHECK: vmeh %v31, %v31, %v31
+# CHECK: vmeh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xa6
-#CHECK: vmh %v0, %v0, %v0, 11
+# CHECK: vmh %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xa3
-#CHECK: vmh %v18, %v3, %v20, 11
+# CHECK: vmh %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xa3
-#CHECK: vmh %v31, %v31, %v31, 11
+# CHECK: vmh %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xa3
-#CHECK: vmhb %v0, %v0, %v0
+# CHECK: vmhb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa3
-#CHECK: vmhb %v18, %v3, %v20
+# CHECK: vmhb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xa3
-#CHECK: vmhb %v31, %v31, %v31
+# CHECK: vmhb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xa3
-#CHECK: vmhf %v0, %v0, %v0
+# CHECK: vmhf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xa3
-#CHECK: vmhf %v18, %v3, %v20
+# CHECK: vmhf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xa3
-#CHECK: vmhf %v31, %v31, %v31
+# CHECK: vmhf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xa3
-#CHECK: vmhh %v0, %v0, %v0
+# CHECK: vmhh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xa3
-#CHECK: vmhh %v18, %v3, %v20
+# CHECK: vmhh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xa3
-#CHECK: vmhh %v31, %v31, %v31
+# CHECK: vmhh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xa3
-#CHECK: vml %v0, %v0, %v0, 11
+# CHECK: vml %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xa2
-#CHECK: vml %v18, %v3, %v20, 11
+# CHECK: vml %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xa2
-#CHECK: vml %v31, %v31, %v31, 11
+# CHECK: vml %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xa2
-#CHECK: vmlb %v0, %v0, %v0
+# CHECK: vmlb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa2
-#CHECK: vmlb %v18, %v3, %v20
+# CHECK: vmlb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xa2
-#CHECK: vmlb %v31, %v31, %v31
+# CHECK: vmlb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xa2
-#CHECK: vmlf %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x20 0xa2
-
-#CHECK: vmlf %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x2a 0xa2
-
-#CHECK: vmlf %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x2e 0xa2
-
-#CHECK: vmle %v0, %v0, %v0, 11
+# CHECK: vmle %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xa4
-#CHECK: vmle %v18, %v3, %v20, 11
+# CHECK: vmle %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xa4
-#CHECK: vmle %v31, %v31, %v31, 11
+# CHECK: vmle %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xa4
-#CHECK: vmleb %v0, %v0, %v0
+# CHECK: vmleb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa4
-#CHECK: vmleb %v18, %v3, %v20
+# CHECK: vmleb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xa4
-#CHECK: vmleb %v31, %v31, %v31
+# CHECK: vmleb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xa4
-#CHECK: vmlef %v0, %v0, %v0
+# CHECK: vmlef %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xa4
-#CHECK: vmlef %v18, %v3, %v20
+# CHECK: vmlef %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xa4
-#CHECK: vmlef %v31, %v31, %v31
+# CHECK: vmlef %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xa4
-#CHECK: vmleh %v0, %v0, %v0
+# CHECK: vmleh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xa4
-#CHECK: vmleh %v18, %v3, %v20
+# CHECK: vmleh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xa4
-#CHECK: vmleh %v31, %v31, %v31
+# CHECK: vmleh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xa4
-#CHECK: vmlh %v0, %v0, %v0, 11
+# CHECK: vmlf %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xa2
+
+# CHECK: vmlf %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xa2
+
+# CHECK: vmlf %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x2e 0xa2
+
+# CHECK: vmlh %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xa1
-#CHECK: vmlh %v18, %v3, %v20, 11
+# CHECK: vmlh %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xa1
-#CHECK: vmlh %v31, %v31, %v31, 11
+# CHECK: vmlh %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xa1
-#CHECK: vmlhb %v0, %v0, %v0
+# CHECK: vmlhb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa1
-#CHECK: vmlhb %v18, %v3, %v20
+# CHECK: vmlhb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xa1
-#CHECK: vmlhb %v31, %v31, %v31
+# CHECK: vmlhb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xa1
-#CHECK: vmlhf %v0, %v0, %v0
+# CHECK: vmlhf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xa1
-#CHECK: vmlhf %v18, %v3, %v20
+# CHECK: vmlhf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xa1
-#CHECK: vmlhf %v31, %v31, %v31
+# CHECK: vmlhf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xa1
-#CHECK: vmlhh %v0, %v0, %v0
+# CHECK: vmlhh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xa1
-#CHECK: vmlhh %v18, %v3, %v20
+# CHECK: vmlhh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xa1
-#CHECK: vmlhh %v31, %v31, %v31
+# CHECK: vmlhh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xa1
-#CHECK: vmlhw %v0, %v0, %v0
+# CHECK: vmlhw %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xa2
-#CHECK: vmlhw %v18, %v3, %v20
+# CHECK: vmlhw %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xa2
-#CHECK: vmlhw %v31, %v31, %v31
+# CHECK: vmlhw %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xa2
-#CHECK: vmlo %v0, %v0, %v0, 11
+# CHECK: vmlo %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xa5
-#CHECK: vmlo %v18, %v3, %v20, 11
+# CHECK: vmlo %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xa5
-#CHECK: vmlo %v31, %v31, %v31, 11
+# CHECK: vmlo %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xa5
-#CHECK: vmlob %v0, %v0, %v0
+# CHECK: vmlob %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa5
-#CHECK: vmlob %v18, %v3, %v20
+# CHECK: vmlob %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xa5
-#CHECK: vmlob %v31, %v31, %v31
+# CHECK: vmlob %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xa5
-#CHECK: vmlof %v0, %v0, %v0
+# CHECK: vmlof %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xa5
-#CHECK: vmlof %v18, %v3, %v20
+# CHECK: vmlof %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xa5
-#CHECK: vmlof %v31, %v31, %v31
+# CHECK: vmlof %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xa5
-#CHECK: vmloh %v0, %v0, %v0
+# CHECK: vmloh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xa5
-#CHECK: vmloh %v18, %v3, %v20
+# CHECK: vmloh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xa5
-#CHECK: vmloh %v31, %v31, %v31
+# CHECK: vmloh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xa5
-#CHECK: vmn %v0, %v0, %v0, 11
+# CHECK: vmn %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xfe
-#CHECK: vmn %v18, %v3, %v20, 11
+# CHECK: vmn %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xfe
-#CHECK: vmn %v31, %v31, %v31, 11
+# CHECK: vmn %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xfe
-#CHECK: vmnb %v0, %v0, %v0
+# CHECK: vmnb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xfe
-#CHECK: vmnb %v18, %v3, %v20
+# CHECK: vmnb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xfe
-#CHECK: vmnb %v31, %v31, %v31
+# CHECK: vmnb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xfe
-#CHECK: vmnf %v0, %v0, %v0
+# CHECK: vmnf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xfe
-#CHECK: vmnf %v18, %v3, %v20
+# CHECK: vmnf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xfe
-#CHECK: vmnf %v31, %v31, %v31
+# CHECK: vmnf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xfe
-#CHECK: vmng %v0, %v0, %v0
+# CHECK: vmng %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xfe
-#CHECK: vmng %v18, %v3, %v20
+# CHECK: vmng %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xfe
-#CHECK: vmng %v31, %v31, %v31
+# CHECK: vmng %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xfe
-#CHECK: vmnh %v0, %v0, %v0
+# CHECK: vmnh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xfe
-#CHECK: vmnh %v18, %v3, %v20
+# CHECK: vmnh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xfe
-#CHECK: vmnh %v31, %v31, %v31
+# CHECK: vmnh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xfe
-#CHECK: vmnl %v0, %v0, %v0, 11
+# CHECK: vmnl %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xfc
-#CHECK: vmnl %v18, %v3, %v20, 11
+# CHECK: vmnl %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xfc
-#CHECK: vmnl %v31, %v31, %v31, 11
+# CHECK: vmnl %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xfc
-#CHECK: vmnlb %v0, %v0, %v0
+# CHECK: vmnlb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xfc
-#CHECK: vmnlb %v18, %v3, %v20
+# CHECK: vmnlb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xfc
-#CHECK: vmnlb %v31, %v31, %v31
+# CHECK: vmnlb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xfc
-#CHECK: vmnlf %v0, %v0, %v0
+# CHECK: vmnlf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xfc
-#CHECK: vmnlf %v18, %v3, %v20
+# CHECK: vmnlf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xfc
-#CHECK: vmnlf %v31, %v31, %v31
+# CHECK: vmnlf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xfc
-#CHECK: vmnlg %v0, %v0, %v0
+# CHECK: vmnlg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xfc
-#CHECK: vmnlg %v18, %v3, %v20
+# CHECK: vmnlg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xfc
-#CHECK: vmnlg %v31, %v31, %v31
+# CHECK: vmnlg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xfc
-#CHECK: vmnlh %v0, %v0, %v0
+# CHECK: vmnlh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xfc
-#CHECK: vmnlh %v18, %v3, %v20
+# CHECK: vmnlh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xfc
-#CHECK: vmnlh %v31, %v31, %v31
+# CHECK: vmnlh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xfc
-#CHECK: vmo %v0, %v0, %v0, 11
+# CHECK: vmo %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xa7
-#CHECK: vmo %v18, %v3, %v20, 11
+# CHECK: vmo %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xa7
-#CHECK: vmo %v31, %v31, %v31, 11
+# CHECK: vmo %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xa7
-#CHECK: vmob %v0, %v0, %v0
+# CHECK: vmob %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xa7
-#CHECK: vmob %v18, %v3, %v20
+# CHECK: vmob %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xa7
-#CHECK: vmob %v31, %v31, %v31
+# CHECK: vmob %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xa7
-#CHECK: vmof %v0, %v0, %v0
+# CHECK: vmof %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xa7
-#CHECK: vmof %v18, %v3, %v20
+# CHECK: vmof %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xa7
-#CHECK: vmof %v31, %v31, %v31
+# CHECK: vmof %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xa7
-#CHECK: vmoh %v0, %v0, %v0
+# CHECK: vmoh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xa7
-#CHECK: vmoh %v18, %v3, %v20
+# CHECK: vmoh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xa7
-#CHECK: vmoh %v31, %v31, %v31
+# CHECK: vmoh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xa7
-#CHECK: vmrh %v0, %v0, %v0, 11
+# CHECK: vmrh %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x61
-#CHECK: vmrh %v18, %v3, %v20, 11
+# CHECK: vmrh %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0x61
-#CHECK: vmrh %v31, %v31, %v31, 11
+# CHECK: vmrh %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0x61
-#CHECK: vmrhb %v0, %v0, %v0
+# CHECK: vmrhb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x61
-#CHECK: vmrhb %v18, %v3, %v20
+# CHECK: vmrhb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x61
-#CHECK: vmrhb %v31, %v31, %v31
+# CHECK: vmrhb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x61
-#CHECK: vmrhf %v0, %v0, %v0
+# CHECK: vmrhf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x61
-#CHECK: vmrhf %v18, %v3, %v20
+# CHECK: vmrhf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0x61
-#CHECK: vmrhf %v31, %v31, %v31
+# CHECK: vmrhf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0x61
-#CHECK: vmrhg %v0, %v0, %v0
+# CHECK: vmrhg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x61
-#CHECK: vmrhg %v18, %v3, %v20
+# CHECK: vmrhg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0x61
-#CHECK: vmrhg %v31, %v31, %v31
+# CHECK: vmrhg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0x61
-#CHECK: vmrhh %v0, %v0, %v0
+# CHECK: vmrhh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x61
-#CHECK: vmrhh %v18, %v3, %v20
+# CHECK: vmrhh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0x61
-#CHECK: vmrhh %v31, %v31, %v31
+# CHECK: vmrhh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0x61
-#CHECK: vmrl %v0, %v0, %v0, 11
+# CHECK: vmrl %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x60
-#CHECK: vmrl %v18, %v3, %v20, 11
+# CHECK: vmrl %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0x60
-#CHECK: vmrl %v31, %v31, %v31, 11
+# CHECK: vmrl %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0x60
-#CHECK: vmrlb %v0, %v0, %v0
+# CHECK: vmrlb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x60
-#CHECK: vmrlb %v18, %v3, %v20
+# CHECK: vmrlb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x60
-#CHECK: vmrlb %v31, %v31, %v31
+# CHECK: vmrlb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x60
-#CHECK: vmrlf %v0, %v0, %v0
+# CHECK: vmrlf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x60
-#CHECK: vmrlf %v18, %v3, %v20
+# CHECK: vmrlf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0x60
-#CHECK: vmrlf %v31, %v31, %v31
+# CHECK: vmrlf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0x60
-#CHECK: vmrlg %v0, %v0, %v0
+# CHECK: vmrlg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x60
-#CHECK: vmrlg %v18, %v3, %v20
+# CHECK: vmrlg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0x60
-#CHECK: vmrlg %v31, %v31, %v31
+# CHECK: vmrlg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0x60
-#CHECK: vmrlh %v0, %v0, %v0
+# CHECK: vmrlh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x60
-#CHECK: vmrlh %v18, %v3, %v20
+# CHECK: vmrlh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0x60
-#CHECK: vmrlh %v31, %v31, %v31
+# CHECK: vmrlh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0x60
-#CHECK: vmx %v0, %v0, %v0, 11
+# CHECK: vmx %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xff
-#CHECK: vmx %v18, %v3, %v20, 11
+# CHECK: vmx %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xff
-#CHECK: vmx %v31, %v31, %v31, 11
+# CHECK: vmx %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xff
-#CHECK: vmxb %v0, %v0, %v0
+# CHECK: vmxb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xff
-#CHECK: vmxb %v18, %v3, %v20
+# CHECK: vmxb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xff
-#CHECK: vmxb %v31, %v31, %v31
+# CHECK: vmxb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xff
-#CHECK: vmxf %v0, %v0, %v0
+# CHECK: vmxf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xff
-#CHECK: vmxf %v18, %v3, %v20
+# CHECK: vmxf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xff
-#CHECK: vmxf %v31, %v31, %v31
+# CHECK: vmxf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xff
-#CHECK: vmxg %v0, %v0, %v0
+# CHECK: vmxg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xff
-#CHECK: vmxg %v18, %v3, %v20
+# CHECK: vmxg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xff
-#CHECK: vmxg %v31, %v31, %v31
+# CHECK: vmxg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xff
-#CHECK: vmxh %v0, %v0, %v0
+# CHECK: vmxh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xff
-#CHECK: vmxh %v18, %v3, %v20
+# CHECK: vmxh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xff
-#CHECK: vmxh %v31, %v31, %v31
+# CHECK: vmxh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xff
-#CHECK: vmxl %v0, %v0, %v0, 11
+# CHECK: vmxl %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xfd
-#CHECK: vmxl %v18, %v3, %v20, 11
+# CHECK: vmxl %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xfd
-#CHECK: vmxl %v31, %v31, %v31, 11
+# CHECK: vmxl %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xfd
-#CHECK: vmxlb %v0, %v0, %v0
+# CHECK: vmxlb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xfd
-#CHECK: vmxlb %v18, %v3, %v20
+# CHECK: vmxlb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xfd
-#CHECK: vmxlb %v31, %v31, %v31
+# CHECK: vmxlb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xfd
-#CHECK: vmxlf %v0, %v0, %v0
+# CHECK: vmxlf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xfd
-#CHECK: vmxlf %v18, %v3, %v20
+# CHECK: vmxlf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xfd
-#CHECK: vmxlf %v31, %v31, %v31
+# CHECK: vmxlf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xfd
-#CHECK: vmxlg %v0, %v0, %v0
+# CHECK: vmxlg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xfd
-#CHECK: vmxlg %v18, %v3, %v20
+# CHECK: vmxlg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xfd
-#CHECK: vmxlg %v31, %v31, %v31
+# CHECK: vmxlg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xfd
-#CHECK: vmxlh %v0, %v0, %v0
+# CHECK: vmxlh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xfd
-#CHECK: vmxlh %v18, %v3, %v20
+# CHECK: vmxlh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xfd
-#CHECK: vmxlh %v31, %v31, %v31
+# CHECK: vmxlh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xfd
-#CHECK: vn %v0, %v0, %v0
+# CHECK: vn %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x68
-#CHECK: vn %v18, %v3, %v20
+# CHECK: vn %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x68
-#CHECK: vn %v31, %v31, %v31
+# CHECK: vn %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x68
-#CHECK: vnc %v0, %v0, %v0
+# CHECK: vnc %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x69
-#CHECK: vnc %v18, %v3, %v20
+# CHECK: vnc %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x69
-#CHECK: vnc %v31, %v31, %v31
+# CHECK: vnc %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x69
-#CHECK: vno %v0, %v0, %v0
+# CHECK: vno %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x6b
-#CHECK: vno %v18, %v3, %v20
+# CHECK: vno %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x6b
-#CHECK: vno %v31, %v31, %v31
+# CHECK: vno %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x6b
-#CHECK: vo %v0, %v0, %v0
+# CHECK: vo %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x6a
-#CHECK: vo %v18, %v3, %v20
+# CHECK: vo %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x6a
-#CHECK: vo %v31, %v31, %v31
+# CHECK: vo %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x6a
-#CHECK: vpdi %v0, %v0, %v0, 0
+# CHECK: vpdi %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x84
-#CHECK: vpdi %v3, %v20, %v5, 4
+# CHECK: vpdi %v3, %v20, %v5, 4
0xe7 0x34 0x50 0x00 0x44 0x84
-#CHECK: vpdi %v31, %v31, %v31, 15
+# CHECK: vpdi %v31, %v31, %v31, 15
0xe7 0xff 0xf0 0x00 0xfe 0x84
-#CHECK: vperm %v0, %v0, %v0, %v0
+# CHECK: vperm %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x8c
-#CHECK: vperm %v3, %v20, %v5, %v22
+# CHECK: vperm %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0x8c
-#CHECK: vperm %v31, %v31, %v31, %v31
+# CHECK: vperm %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0x8c
-#CHECK: vpk %v0, %v0, %v0, 11
+# CHECK: vpk %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x94
-#CHECK: vpk %v18, %v3, %v20, 11
+# CHECK: vpk %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0x94
-#CHECK: vpk %v31, %v31, %v31, 11
+# CHECK: vpk %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0x94
-#CHECK: vpkf %v0, %v0, %v0
+# CHECK: vpkf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x94
-#CHECK: vpkf %v18, %v3, %v20
+# CHECK: vpkf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0x94
-#CHECK: vpkf %v31, %v31, %v31
+# CHECK: vpkf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0x94
-#CHECK: vpkg %v0, %v0, %v0
+# CHECK: vpkg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x94
-#CHECK: vpkg %v18, %v3, %v20
+# CHECK: vpkg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0x94
-#CHECK: vpkg %v31, %v31, %v31
+# CHECK: vpkg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0x94
-#CHECK: vpkh %v0, %v0, %v0
+# CHECK: vpkh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x94
-#CHECK: vpkh %v18, %v3, %v20
+# CHECK: vpkh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0x94
-#CHECK: vpkh %v31, %v31, %v31
+# CHECK: vpkh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0x94
-#CHECK: vpkls %v0, %v0, %v0, 11, 9
+# CHECK: vpkls %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x90 0xb0 0x95
-#CHECK: vpkls %v18, %v3, %v20, 11, 9
+# CHECK: vpkls %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x90 0xba 0x95
-#CHECK: vpkls %v31, %v31, %v31, 11, 9
+# CHECK: vpkls %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x90 0xbe 0x95
-#CHECK: vpklsf %v0, %v0, %v0
+# CHECK: vpklsf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x95
-#CHECK: vpklsf %v18, %v3, %v20
+# CHECK: vpklsf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0x95
-#CHECK: vpklsfs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x24 0x95
-
-#CHECK: vpklsf %v31, %v31, %v31
+# CHECK: vpklsf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0x95
-#CHECK: vpklsg %v0, %v0, %v0
+# CHECK: vpklsfs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x24 0x95
+
+# CHECK: vpklsg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x95
-#CHECK: vpklsg %v18, %v3, %v20
+# CHECK: vpklsg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0x95
-#CHECK: vpklsgs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x34 0x95
-
-#CHECK: vpklsg %v31, %v31, %v31
+# CHECK: vpklsg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0x95
-#CHECK: vpklsh %v0, %v0, %v0
+# CHECK: vpklsgs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x34 0x95
+
+# CHECK: vpklsh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x95
-#CHECK: vpklsh %v18, %v3, %v20
+# CHECK: vpklsh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0x95
-#CHECK: vpklshs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x14 0x95
-
-#CHECK: vpklsh %v31, %v31, %v31
+# CHECK: vpklsh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0x95
-#CHECK: vpks %v0, %v0, %v0, 11, 9
+# CHECK: vpklshs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x14 0x95
+
+# CHECK: vpks %v0, %v0, %v0, 11, 9
0xe7 0x00 0x00 0x90 0xb0 0x97
-#CHECK: vpks %v18, %v3, %v20, 11, 9
+# CHECK: vpks %v18, %v3, %v20, 11, 9
0xe7 0x23 0x40 0x90 0xba 0x97
-#CHECK: vpks %v31, %v31, %v31, 11, 9
+# CHECK: vpks %v31, %v31, %v31, 11, 9
0xe7 0xff 0xf0 0x90 0xbe 0x97
-#CHECK: vpksf %v0, %v0, %v0
+# CHECK: vpksf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x97
-#CHECK: vpksf %v18, %v3, %v20
+# CHECK: vpksf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0x97
-#CHECK: vpksfs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x24 0x97
-
-#CHECK: vpksf %v31, %v31, %v31
+# CHECK: vpksf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0x97
-#CHECK: vpksg %v0, %v0, %v0
+# CHECK: vpksfs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x24 0x97
+
+# CHECK: vpksg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x97
-#CHECK: vpksg %v18, %v3, %v20
+# CHECK: vpksg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0x97
-#CHECK: vpksgs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x34 0x97
-
-#CHECK: vpksg %v31, %v31, %v31
+# CHECK: vpksg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0x97
-#CHECK: vpksh %v0, %v0, %v0
+# CHECK: vpksgs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x34 0x97
+
+# CHECK: vpksh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x97
-#CHECK: vpksh %v18, %v3, %v20
+# CHECK: vpksh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0x97
-#CHECK: vpkshs %v7, %v24, %v9
-0xe7 0x78 0x90 0x10 0x14 0x97
-
-#CHECK: vpksh %v31, %v31, %v31
+# CHECK: vpksh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0x97
-#CHECK: vpopct %v0, %v0, 0
+# CHECK: vpkshs %v7, %v24, %v9
+0xe7 0x78 0x90 0x10 0x14 0x97
+
+# CHECK: vpopct %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x50
-#CHECK: vpopct %v19, %v14, 0
+# CHECK: vpopct %v19, %v14, 0
0xe7 0x3e 0x00 0x00 0x08 0x50
-#CHECK: vpopct %v31, %v31
+# CHECK: vpopct %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0x50
-#CHECK: vrep %v0, %v0, 0, 11
+# CHECK: vrep %v0, %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x4d
-#CHECK: vrep %v19, %v4, 22136, 11
+# CHECK: vrep %v19, %v4, 22136, 11
0xe7 0x34 0x56 0x78 0xb8 0x4d
-#CHECK: vrep %v31, %v31, 65535, 11
+# CHECK: vrep %v31, %v31, 65535, 11
0xe7 0xff 0xff 0xff 0xbc 0x4d
-#CHECK: vrepb %v0, %v0, 0
+# CHECK: vrepb %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x4d
-#CHECK: vrepb %v19, %v4, 22136
+# CHECK: vrepb %v19, %v4, 22136
0xe7 0x34 0x56 0x78 0x08 0x4d
-#CHECK: vrepb %v31, %v31, 65535
+# CHECK: vrepb %v31, %v31, 65535
0xe7 0xff 0xff 0xff 0x0c 0x4d
-#CHECK: vrepf %v0, %v0, 0
+# CHECK: vrepf %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x4d
-#CHECK: vrepf %v19, %v4, 22136
+# CHECK: vrepf %v19, %v4, 22136
0xe7 0x34 0x56 0x78 0x28 0x4d
-#CHECK: vrepf %v31, %v31, 65535
+# CHECK: vrepf %v31, %v31, 65535
0xe7 0xff 0xff 0xff 0x2c 0x4d
-#CHECK: vrepg %v0, %v0, 0
+# CHECK: vrepg %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x4d
-#CHECK: vrepg %v19, %v4, 22136
+# CHECK: vrepg %v19, %v4, 22136
0xe7 0x34 0x56 0x78 0x38 0x4d
-#CHECK: vrepg %v31, %v31, 65535
+# CHECK: vrepg %v31, %v31, 65535
0xe7 0xff 0xff 0xff 0x3c 0x4d
-#CHECK: vreph %v0, %v0, 0
+# CHECK: vreph %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x4d
-#CHECK: vreph %v19, %v4, 22136
+# CHECK: vreph %v19, %v4, 22136
0xe7 0x34 0x56 0x78 0x18 0x4d
-#CHECK: vreph %v31, %v31, 65535
+# CHECK: vreph %v31, %v31, 65535
0xe7 0xff 0xff 0xff 0x1c 0x4d
-#CHECK: vrepi %v0, 0, 11
+# CHECK: vrepi %v0, 0, 11
0xe7 0x00 0x00 0x00 0xb0 0x45
-#CHECK: vrepi %v23, -30293, 11
+# CHECK: vrepi %v23, -30293, 11
0xe7 0x70 0x89 0xab 0xb8 0x45
-#CHECK: vrepi %v31, -1, 11
+# CHECK: vrepi %v31, -1, 11
0xe7 0xf0 0xff 0xff 0xb8 0x45
-#CHECK: vrepib %v0, 0
+# CHECK: vrepib %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x45
-#CHECK: vrepib %v23, -30293
+# CHECK: vrepib %v23, -30293
0xe7 0x70 0x89 0xab 0x08 0x45
-#CHECK: vrepib %v31, -1
+# CHECK: vrepib %v31, -1
0xe7 0xf0 0xff 0xff 0x08 0x45
-#CHECK: vrepif %v0, 0
+# CHECK: vrepif %v0, 0
0xe7 0x00 0x00 0x00 0x20 0x45
-#CHECK: vrepif %v23, -30293
+# CHECK: vrepif %v23, -30293
0xe7 0x70 0x89 0xab 0x28 0x45
-#CHECK: vrepif %v31, -1
+# CHECK: vrepif %v31, -1
0xe7 0xf0 0xff 0xff 0x28 0x45
-#CHECK: vrepig %v0, 0
+# CHECK: vrepig %v0, 0
0xe7 0x00 0x00 0x00 0x30 0x45
-#CHECK: vrepig %v23, -30293
+# CHECK: vrepig %v23, -30293
0xe7 0x70 0x89 0xab 0x38 0x45
-#CHECK: vrepig %v31, -1
+# CHECK: vrepig %v31, -1
0xe7 0xf0 0xff 0xff 0x38 0x45
-#CHECK: vrepih %v0, 0
+# CHECK: vrepih %v0, 0
0xe7 0x00 0x00 0x00 0x10 0x45
-#CHECK: vrepih %v23, -30293
+# CHECK: vrepih %v23, -30293
0xe7 0x70 0x89 0xab 0x18 0x45
-#CHECK: vrepih %v31, -1
+# CHECK: vrepih %v31, -1
0xe7 0xf0 0xff 0xff 0x18 0x45
-#CHECK: vs %v0, %v0, %v0, 11
+# CHECK: vs %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xf7
-#CHECK: vs %v18, %v3, %v20, 11
+# CHECK: vs %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xf7
-#CHECK: vs %v31, %v31, %v31, 11
+# CHECK: vs %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xf7
-#CHECK: vsb %v0, %v0, %v0
+# CHECK: vsb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf7
-#CHECK: vsb %v18, %v3, %v20
+# CHECK: vsb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf7
-#CHECK: vsb %v31, %v31, %v31
+# CHECK: vsb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf7
-#CHECK: vsbi %v0, %v0, %v0, %v0, 11
-0xe7 0x00 0x0b 0x00 0x00 0xbf
-
-#CHECK: vsbi %v3, %v20, %v5, %v22, 11
-0xe7 0x34 0x5b 0x00 0x65 0xbf
-
-#CHECK: vsbi %v31, %v31, %v31, %v31, 11
-0xe7 0xff 0xfb 0x00 0xff 0xbf
-
-#CHECK: vsbiq %v0, %v0, %v0, %v0
-0xe7 0x00 0x04 0x00 0x00 0xbf
-
-#CHECK: vsbiq %v3, %v20, %v5, %v22
-0xe7 0x34 0x54 0x00 0x65 0xbf
-
-#CHECK: vsbiq %v31, %v31, %v31, %v31
-0xe7 0xff 0xf4 0x00 0xff 0xbf
-
-#CHECK: vsbcbi %v0, %v0, %v0, %v0, 11
+# CHECK: vsbcbi %v0, %v0, %v0, %v0, 11
0xe7 0x00 0x0b 0x00 0x00 0xbd
-#CHECK: vsbcbi %v3, %v20, %v5, %v22, 11
+# CHECK: vsbcbi %v3, %v20, %v5, %v22, 11
0xe7 0x34 0x5b 0x00 0x65 0xbd
-#CHECK: vsbcbi %v31, %v31, %v31, %v31, 11
+# CHECK: vsbcbi %v31, %v31, %v31, %v31, 11
0xe7 0xff 0xfb 0x00 0xff 0xbd
-#CHECK: vsbcbiq %v0, %v0, %v0, %v0
+# CHECK: vsbcbiq %v0, %v0, %v0, %v0
0xe7 0x00 0x04 0x00 0x00 0xbd
-#CHECK: vsbcbiq %v3, %v20, %v5, %v22
+# CHECK: vsbcbiq %v3, %v20, %v5, %v22
0xe7 0x34 0x54 0x00 0x65 0xbd
-#CHECK: vsbcbiq %v31, %v31, %v31, %v31
+# CHECK: vsbcbiq %v31, %v31, %v31, %v31
0xe7 0xff 0xf4 0x00 0xff 0xbd
-#CHECK: vscbi %v0, %v0, %v0, 11
+# CHECK: vsbi %v0, %v0, %v0, %v0, 11
+0xe7 0x00 0x0b 0x00 0x00 0xbf
+
+# CHECK: vsbi %v3, %v20, %v5, %v22, 11
+0xe7 0x34 0x5b 0x00 0x65 0xbf
+
+# CHECK: vsbi %v31, %v31, %v31, %v31, 11
+0xe7 0xff 0xfb 0x00 0xff 0xbf
+
+# CHECK: vsbiq %v0, %v0, %v0, %v0
+0xe7 0x00 0x04 0x00 0x00 0xbf
+
+# CHECK: vsbiq %v3, %v20, %v5, %v22
+0xe7 0x34 0x54 0x00 0x65 0xbf
+
+# CHECK: vsbiq %v31, %v31, %v31, %v31
+0xe7 0xff 0xf4 0x00 0xff 0xbf
+
+# CHECK: vscbi %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xf5
-#CHECK: vscbi %v18, %v3, %v20, 11
+# CHECK: vscbi %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0xf5
-#CHECK: vscbi %v31, %v31, %v31, 11
+# CHECK: vscbi %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0xf5
-#CHECK: vscbib %v0, %v0, %v0
+# CHECK: vscbib %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xf5
-#CHECK: vscbib %v18, %v3, %v20
+# CHECK: vscbib %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0xf5
-#CHECK: vscbib %v31, %v31, %v31
+# CHECK: vscbib %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0xf5
-#CHECK: vscbif %v0, %v0, %v0
+# CHECK: vscbif %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf5
-#CHECK: vscbif %v18, %v3, %v20
+# CHECK: vscbif %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf5
-#CHECK: vscbif %v31, %v31, %v31
+# CHECK: vscbif %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf5
-#CHECK: vscbig %v0, %v0, %v0
+# CHECK: vscbig %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf5
-#CHECK: vscbig %v18, %v3, %v20
+# CHECK: vscbig %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf5
-#CHECK: vscbig %v31, %v31, %v31
+# CHECK: vscbig %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf5
-#CHECK: vscbih %v0, %v0, %v0
+# CHECK: vscbih %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf5
-#CHECK: vscbih %v18, %v3, %v20
+# CHECK: vscbih %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf5
-#CHECK: vscbih %v31, %v31, %v31
+# CHECK: vscbih %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf5
-#CHECK: vscbiq %v0, %v0, %v0
+# CHECK: vscbiq %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x40 0xf5
-#CHECK: vscbiq %v18, %v3, %v20
+# CHECK: vscbiq %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x4a 0xf5
-#CHECK: vscbiq %v31, %v31, %v31
+# CHECK: vscbiq %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x4e 0xf5
-#CHECK: vscef %v0, 0(%v0), 0
+# CHECK: vscef %v0, 0(%v0), 0
0xe7 0x00 0x00 0x00 0x00 0x1b
-#CHECK: vscef %v10, 1000(%v19,%r7), 2
+# CHECK: vscef %v10, 1000(%v19,%r7), 2
0xe7 0xa3 0x73 0xe8 0x24 0x1b
-#CHECK: vscef %v31, 4095(%v31,%r15), 3
+# CHECK: vscef %v31, 4095(%v31,%r15), 3
0xe7 0xff 0xff 0xff 0x3c 0x1b
-#CHECK: vsceg %v0, 0(%v0), 0
+# CHECK: vsceg %v0, 0(%v0), 0
0xe7 0x00 0x00 0x00 0x00 0x1a
-#CHECK: vsceg %v10, 1000(%v19,%r7), 1
+# CHECK: vsceg %v10, 1000(%v19,%r7), 1
0xe7 0xa3 0x73 0xe8 0x14 0x1a
-#CHECK: vsceg %v31, 4095(%v31,%r15), 1
+# CHECK: vsceg %v31, 4095(%v31,%r15), 1
0xe7 0xff 0xff 0xff 0x1c 0x1a
-#CHECK: vseg %v0, %v0, 11
+# CHECK: vseg %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x5f
-#CHECK: vseg %v19, %v14, 11
+# CHECK: vseg %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0x5f
-#CHECK: vseg %v31, %v31, 11
+# CHECK: vseg %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0x5f
-#CHECK: vsegb %v0, %v0
+# CHECK: vsegb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x5f
-#CHECK: vsegb %v19, %v14
+# CHECK: vsegb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0x5f
-#CHECK: vsegb %v31, %v31
+# CHECK: vsegb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0x5f
-#CHECK: vsegf %v0, %v0
+# CHECK: vsegf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x5f
-#CHECK: vsegf %v19, %v14
+# CHECK: vsegf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0x5f
-#CHECK: vsegf %v31, %v31
+# CHECK: vsegf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0x5f
-#CHECK: vsegh %v0, %v0
+# CHECK: vsegh %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x5f
-#CHECK: vsegh %v19, %v14
+# CHECK: vsegh %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0x5f
-#CHECK: vsegh %v31, %v31
+# CHECK: vsegh %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0x5f
-#CHECK: vsel %v0, %v0, %v0, %v0
+# CHECK: vsel %v0, %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x8d
-#CHECK: vsel %v3, %v20, %v5, %v22
+# CHECK: vsel %v3, %v20, %v5, %v22
0xe7 0x34 0x50 0x00 0x65 0x8d
-#CHECK: vsel %v31, %v31, %v31, %v31
+# CHECK: vsel %v31, %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0xff 0x8d
-#CHECK: vsf %v0, %v0, %v0
+# CHECK: vsf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xf7
-#CHECK: vsf %v18, %v3, %v20
+# CHECK: vsf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0xf7
-#CHECK: vsf %v31, %v31, %v31
+# CHECK: vsf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0xf7
-#CHECK: vsg %v0, %v0, %v0
+# CHECK: vsg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0xf7
-#CHECK: vsg %v18, %v3, %v20
+# CHECK: vsg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0xf7
-#CHECK: vsg %v31, %v31, %v31
+# CHECK: vsg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0xf7
-#CHECK: vsh %v0, %v0, %v0
+# CHECK: vsh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xf7
-#CHECK: vsh %v18, %v3, %v20
+# CHECK: vsh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0xf7
-#CHECK: vsh %v31, %v31, %v31
+# CHECK: vsh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0xf7
-#CHECK: vsl %v0, %v0, %v0
+# CHECK: vsl %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x74
-#CHECK: vsl %v18, %v3, %v20
+# CHECK: vsl %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x74
-#CHECK: vsl %v31, %v31, %v31
+# CHECK: vsl %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x74
-#CHECK: vslb %v0, %v0, %v0
+# CHECK: vslb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x75
-#CHECK: vslb %v18, %v3, %v20
+# CHECK: vslb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x75
-#CHECK: vslb %v31, %v31, %v31
+# CHECK: vslb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x75
-#CHECK: vsldb %v0, %v0, %v0, 0
+# CHECK: vsldb %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x77
-#CHECK: vsldb %v3, %v20, %v5, 103
+# CHECK: vsldb %v3, %v20, %v5, 103
0xe7 0x34 0x50 0x67 0x04 0x77
-#CHECK: vsldb %v31, %v31, %v31, 255
+# CHECK: vsldb %v31, %v31, %v31, 255
0xe7 0xff 0xf0 0xff 0x0e 0x77
-#CHECK: vsq %v0, %v0, %v0
+# CHECK: vsq %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x40 0xf7
-#CHECK: vsq %v18, %v3, %v20
+# CHECK: vsq %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x4a 0xf7
-#CHECK: vsq %v31, %v31, %v31
+# CHECK: vsq %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x4e 0xf7
-#CHECK: vsra %v0, %v0, %v0
+# CHECK: vsra %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x7e
-#CHECK: vsra %v18, %v3, %v20
+# CHECK: vsra %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x7e
-#CHECK: vsra %v31, %v31, %v31
+# CHECK: vsra %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x7e
-#CHECK: vsrab %v0, %v0, %v0
+# CHECK: vsrab %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x7f
-#CHECK: vsrab %v18, %v3, %v20
+# CHECK: vsrab %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x7f
-#CHECK: vsrab %v31, %v31, %v31
+# CHECK: vsrab %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x7f
-#CHECK: vsrl %v0, %v0, %v0
+# CHECK: vsrl %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x7c
-#CHECK: vsrl %v18, %v3, %v20
+# CHECK: vsrl %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x7c
-#CHECK: vsrl %v31, %v31, %v31
+# CHECK: vsrl %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x7c
-#CHECK: vsrlb %v0, %v0, %v0
+# CHECK: vsrlb %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x7d
-#CHECK: vsrlb %v18, %v3, %v20
+# CHECK: vsrlb %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x7d
-#CHECK: vsrlb %v31, %v31, %v31
+# CHECK: vsrlb %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x7d
-#CHECK: vst %v0, 0
+# CHECK: vst %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x0E
-#CHECK: vst %v17, 2475(%r7,%r8)
+# CHECK: vst %v17, 2475(%r7,%r8)
0xe7 0x17 0x89 0xab 0x08 0x0E
-#CHECK: vst %v31, 4095(%r15,%r15)
+# CHECK: vst %v31, 4095(%r15,%r15)
0xe7 0xff 0xff 0xff 0x08 0x0E
-#CHECK: vsteb %v0, 0, 0
+# CHECK: vsteb %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x08
-#CHECK: vsteb %v17, 2475(%r7,%r8), 12
+# CHECK: vsteb %v17, 2475(%r7,%r8), 12
0xe7 0x17 0x89 0xab 0xc8 0x08
-#CHECK: vsteb %v31, 4095(%r15,%r15), 15
+# CHECK: vsteb %v31, 4095(%r15,%r15), 15
0xe7 0xff 0xff 0xff 0xf8 0x08
-#CHECK: vstef %v0, 0, 0
+# CHECK: vstef %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x0b
-#CHECK: vstef %v17, 2475(%r7,%r8), 2
+# CHECK: vstef %v17, 2475(%r7,%r8), 2
0xe7 0x17 0x89 0xab 0x28 0x0b
-#CHECK: vstef %v31, 4095(%r15,%r15), 3
+# CHECK: vstef %v31, 4095(%r15,%r15), 3
0xe7 0xff 0xff 0xff 0x38 0x0b
-#CHECK: vsteg %v0, 0, 0
+# CHECK: vsteg %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x0a
-#CHECK: vsteg %v17, 2475(%r7,%r8), 1
+# CHECK: vsteg %v17, 2475(%r7,%r8), 1
0xe7 0x17 0x89 0xab 0x18 0x0a
-#CHECK: vsteg %v31, 4095(%r15,%r15), 1
+# CHECK: vsteg %v31, 4095(%r15,%r15), 1
0xe7 0xff 0xff 0xff 0x18 0x0a
-#CHECK: vsteh %v0, 0, 0
+# CHECK: vsteh %v0, 0, 0
0xe7 0x00 0x00 0x00 0x00 0x09
-#CHECK: vsteh %v17, 2475(%r7,%r8), 5
+# CHECK: vsteh %v17, 2475(%r7,%r8), 5
0xe7 0x17 0x89 0xab 0x58 0x09
-#CHECK: vsteh %v31, 4095(%r15,%r15), 7
+# CHECK: vsteh %v31, 4095(%r15,%r15), 7
0xe7 0xff 0xff 0xff 0x78 0x09
-#CHECK: vstl %v0, %r0, 0
+# CHECK: vstl %v0, %r0, 0
0xe7 0x00 0x00 0x00 0x00 0x3f
-#CHECK: vstl %v18, %r3, 1383(%r4)
+# CHECK: vstl %v18, %r3, 1383(%r4)
0xe7 0x23 0x45 0x67 0x08 0x3f
-#CHECK: vstl %v31, %r15, 4095(%r15)
+# CHECK: vstl %v31, %r15, 4095(%r15)
0xe7 0xff 0xff 0xff 0x08 0x3f
-#CHECK: vstm %v0, %v0, 0
+# CHECK: vstm %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x3e
-#CHECK: vstm %v12, %v18, 1110(%r3)
+# CHECK: vstm %v12, %v18, 1110(%r3)
0xe7 0xc2 0x34 0x56 0x04 0x3e
-#CHECK: vstm %v31, %v31, 4095(%r15)
+# CHECK: vstm %v31, %v31, 4095(%r15)
0xe7 0xff 0xff 0xff 0x0c 0x3e
-#CHECK: vstrc %v0, %v0, %v0, %v0, 11, 0
+# CHECK: vstrc %v0, %v0, %v0, %v0, 11, 0
0xe7 0x00 0x0b 0x00 0x00 0x8a
-#CHECK: vstrc %v0, %v0, %v0, %v0, 11, 12
+# CHECK: vstrc %v0, %v0, %v0, %v0, 11, 12
0xe7 0x00 0x0b 0xc0 0x00 0x8a
-#CHECK: vstrc %v18, %v3, %v20, %v5, 11, 0
+# CHECK: vstrc %v18, %v3, %v20, %v5, 11, 0
0xe7 0x23 0x4b 0x00 0x5a 0x8a
-#CHECK: vstrc %v31, %v31, %v31, %v31, 11, 4
+# CHECK: vstrc %v31, %v31, %v31, %v31, 11, 4
0xe7 0xff 0xfb 0x40 0xff 0x8a
-#CHECK: vstrcb %v0, %v0, %v0, %v0, 0
+# CHECK: vstrcb %v0, %v0, %v0, %v0, 0
0xe7 0x00 0x00 0x00 0x00 0x8a
-#CHECK: vstrcb %v0, %v0, %v0, %v0, 12
+# CHECK: vstrcb %v0, %v0, %v0, %v0, 12
0xe7 0x00 0x00 0xc0 0x00 0x8a
-#CHECK: vstrcb %v18, %v3, %v20, %v5, 0
+# CHECK: vstrcb %v18, %v3, %v20, %v5, 0
0xe7 0x23 0x40 0x00 0x5a 0x8a
-#CHECK: vstrcb %v31, %v31, %v31, %v31, 4
+# CHECK: vstrcb %v31, %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x40 0xff 0x8a
-#CHECK: vstrcbs %v31, %v31, %v31, %v31, 8
+# CHECK: vstrcbs %v31, %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0x90 0xff 0x8a
-#CHECK: vstrczb %v31, %v31, %v31, %v31, 4
+# CHECK: vstrczb %v31, %v31, %v31, %v31, 4
0xe7 0xff 0xf0 0x60 0xff 0x8a
-#CHECK: vstrczbs %v31, %v31, %v31, %v31, 8
+# CHECK: vstrczbs %v31, %v31, %v31, %v31, 8
0xe7 0xff 0xf0 0xb0 0xff 0x8a
-#CHECK: vstrcf %v0, %v0, %v0, %v0, 0
+# CHECK: vstrcf %v0, %v0, %v0, %v0, 0
0xe7 0x00 0x02 0x00 0x00 0x8a
-#CHECK: vstrcf %v0, %v0, %v0, %v0, 12
+# CHECK: vstrcf %v0, %v0, %v0, %v0, 12
0xe7 0x00 0x02 0xc0 0x00 0x8a
-#CHECK: vstrcf %v18, %v3, %v20, %v5, 0
+# CHECK: vstrcf %v18, %v3, %v20, %v5, 0
0xe7 0x23 0x42 0x00 0x5a 0x8a
-#CHECK: vstrcf %v31, %v31, %v31, %v31, 4
+# CHECK: vstrcf %v31, %v31, %v31, %v31, 4
0xe7 0xff 0xf2 0x40 0xff 0x8a
-#CHECK: vstrcfs %v31, %v31, %v31, %v31, 8
+# CHECK: vstrcfs %v31, %v31, %v31, %v31, 8
0xe7 0xff 0xf2 0x90 0xff 0x8a
-#CHECK: vstrczf %v31, %v31, %v31, %v31, 4
+# CHECK: vstrczf %v31, %v31, %v31, %v31, 4
0xe7 0xff 0xf2 0x60 0xff 0x8a
-#CHECK: vstrczfs %v31, %v31, %v31, %v31, 8
+# CHECK: vstrczfs %v31, %v31, %v31, %v31, 8
0xe7 0xff 0xf2 0xb0 0xff 0x8a
-#CHECK: vstrch %v0, %v0, %v0, %v0, 0
+# CHECK: vstrch %v0, %v0, %v0, %v0, 0
0xe7 0x00 0x01 0x00 0x00 0x8a
-#CHECK: vstrch %v0, %v0, %v0, %v0, 12
+# CHECK: vstrch %v0, %v0, %v0, %v0, 12
0xe7 0x00 0x01 0xc0 0x00 0x8a
-#CHECK: vstrch %v18, %v3, %v20, %v5, 0
+# CHECK: vstrch %v18, %v3, %v20, %v5, 0
0xe7 0x23 0x41 0x00 0x5a 0x8a
-#CHECK: vstrch %v31, %v31, %v31, %v31, 4
+# CHECK: vstrch %v31, %v31, %v31, %v31, 4
0xe7 0xff 0xf1 0x40 0xff 0x8a
-#CHECK: vstrchs %v31, %v31, %v31, %v31, 8
+# CHECK: vstrchs %v31, %v31, %v31, %v31, 8
0xe7 0xff 0xf1 0x90 0xff 0x8a
-#CHECK: vstrczh %v31, %v31, %v31, %v31, 4
+# CHECK: vstrczh %v31, %v31, %v31, %v31, 4
0xe7 0xff 0xf1 0x60 0xff 0x8a
-#CHECK: vstrczhs %v31, %v31, %v31, %v31, 8
+# CHECK: vstrczhs %v31, %v31, %v31, %v31, 8
0xe7 0xff 0xf1 0xb0 0xff 0x8a
-#CHECK: vsumg %v0, %v0, %v0, 11
+# CHECK: vsum %v0, %v0, %v0, 11
+0xe7 0x00 0x00 0x00 0xb0 0x64
+
+# CHECK: vsum %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0x00 0xba 0x64
+
+# CHECK: vsum %v31, %v31, %v31, 11
+0xe7 0xff 0xf0 0x00 0xbe 0x64
+
+# CHECK: vsumb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x64
+
+# CHECK: vsumb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x64
+
+# CHECK: vsumb %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x0e 0x64
+
+# CHECK: vsumg %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x65
-#CHECK: vsumg %v18, %v3, %v20, 11
+# CHECK: vsumg %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0x65
-#CHECK: vsumg %v31, %v31, %v31, 11
+# CHECK: vsumg %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0x65
-#CHECK: vsumgh %v0, %v0, %v0
+# CHECK: vsumgf %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0x65
+
+# CHECK: vsumgf %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0x65
+
+# CHECK: vsumgf %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x2e 0x65
+
+# CHECK: vsumgh %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0x65
-#CHECK: vsumgh %v18, %v3, %v20
+# CHECK: vsumgh %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x1a 0x65
-#CHECK: vsumgh %v31, %v31, %v31
+# CHECK: vsumgh %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x1e 0x65
-#CHECK: vsumgf %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x20 0x65
+# CHECK: vsumh %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x10 0x64
-#CHECK: vsumgf %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x2a 0x65
+# CHECK: vsumh %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x1a 0x64
-#CHECK: vsumgf %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x2e 0x65
+# CHECK: vsumh %v31, %v31, %v31
+0xe7 0xff 0xf0 0x00 0x1e 0x64
-#CHECK: vsumq %v0, %v0, %v0, 11
+# CHECK: vsumq %v0, %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0x67
-#CHECK: vsumq %v18, %v3, %v20, 11
+# CHECK: vsumq %v18, %v3, %v20, 11
0xe7 0x23 0x40 0x00 0xba 0x67
-#CHECK: vsumq %v31, %v31, %v31, 11
+# CHECK: vsumq %v31, %v31, %v31, 11
0xe7 0xff 0xf0 0x00 0xbe 0x67
-#CHECK: vsumqf %v0, %v0, %v0
+# CHECK: vsumqf %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0x67
-#CHECK: vsumqf %v18, %v3, %v20
+# CHECK: vsumqf %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x2a 0x67
-#CHECK: vsumqf %v31, %v31, %v31
+# CHECK: vsumqf %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x2e 0x67
-#CHECK: vsumqg %v0, %v0, %v0
+# CHECK: vsumqg %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x30 0x67
-#CHECK: vsumqg %v18, %v3, %v20
+# CHECK: vsumqg %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x3a 0x67
-#CHECK: vsumqg %v31, %v31, %v31
+# CHECK: vsumqg %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x3e 0x67
-#CHECK: vsum %v0, %v0, %v0, 11
-0xe7 0x00 0x00 0x00 0xb0 0x64
-
-#CHECK: vsum %v18, %v3, %v20, 11
-0xe7 0x23 0x40 0x00 0xba 0x64
-
-#CHECK: vsum %v31, %v31, %v31, 11
-0xe7 0xff 0xf0 0x00 0xbe 0x64
-
-#CHECK: vsumb %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x00 0x64
-
-#CHECK: vsumb %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x0a 0x64
-
-#CHECK: vsumb %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x0e 0x64
-
-#CHECK: vsumh %v0, %v0, %v0
-0xe7 0x00 0x00 0x00 0x10 0x64
-
-#CHECK: vsumh %v18, %v3, %v20
-0xe7 0x23 0x40 0x00 0x1a 0x64
-
-#CHECK: vsumh %v31, %v31, %v31
-0xe7 0xff 0xf0 0x00 0x1e 0x64
-
-#CHECK: vtm %v0, %v0
+# CHECK: vtm %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xd8
-#CHECK: vtm %v19, %v14
+# CHECK: vtm %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xd8
-#CHECK: vtm %v31, %v31
+# CHECK: vtm %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xd8
-#CHECK: vuph %v0, %v0, 11
+# CHECK: vuph %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xd7
-#CHECK: vuph %v19, %v14, 11
+# CHECK: vuph %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0xd7
-#CHECK: vuph %v31, %v31, 11
+# CHECK: vuph %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0xd7
-#CHECK: vuphb %v0, %v0
+# CHECK: vuphb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xd7
-#CHECK: vuphb %v19, %v14
+# CHECK: vuphb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xd7
-#CHECK: vuphb %v31, %v31
+# CHECK: vuphb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xd7
-#CHECK: vuphf %v0, %v0
+# CHECK: vuphf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xd7
-#CHECK: vuphf %v19, %v14
+# CHECK: vuphf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xd7
-#CHECK: vuphf %v31, %v31
+# CHECK: vuphf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xd7
-#CHECK: vuphh %v0, %v0
+# CHECK: vuphh %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xd7
-#CHECK: vuphh %v19, %v14
+# CHECK: vuphh %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xd7
-#CHECK: vuphh %v31, %v31
+# CHECK: vuphh %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xd7
-#CHECK: vuplh %v0, %v0, 11
+# CHECK: vupl %v0, %v0, 11
+0xe7 0x00 0x00 0x00 0xb0 0xd6
+
+# CHECK: vupl %v19, %v14, 11
+0xe7 0x3e 0x00 0x00 0xb8 0xd6
+
+# CHECK: vupl %v31, %v31, 11
+0xe7 0xff 0x00 0x00 0xbc 0xd6
+
+# CHECK: vuplb %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0xd6
+
+# CHECK: vuplb %v19, %v14
+0xe7 0x3e 0x00 0x00 0x08 0xd6
+
+# CHECK: vuplb %v31, %v31
+0xe7 0xff 0x00 0x00 0x0c 0xd6
+
+# CHECK: vuplf %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xd6
+
+# CHECK: vuplf %v19, %v14
+0xe7 0x3e 0x00 0x00 0x28 0xd6
+
+# CHECK: vuplf %v31, %v31
+0xe7 0xff 0x00 0x00 0x2c 0xd6
+
+# CHECK: vuplh %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xd5
-#CHECK: vuplh %v19, %v14, 11
+# CHECK: vuplh %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0xd5
-#CHECK: vuplh %v31, %v31, 11
+# CHECK: vuplh %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0xd5
-#CHECK: vuplhb %v0, %v0
+# CHECK: vuplhb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xd5
-#CHECK: vuplhb %v19, %v14
+# CHECK: vuplhb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xd5
-#CHECK: vuplhb %v31, %v31
+# CHECK: vuplhb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xd5
-#CHECK: vuplhf %v0, %v0
+# CHECK: vuplhf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xd5
-#CHECK: vuplhf %v19, %v14
+# CHECK: vuplhf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xd5
-#CHECK: vuplhf %v31, %v31
+# CHECK: vuplhf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xd5
-#CHECK: vuplhh %v0, %v0
+# CHECK: vuplhh %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xd5
-#CHECK: vuplhh %v19, %v14
+# CHECK: vuplhh %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xd5
-#CHECK: vuplhh %v31, %v31
+# CHECK: vuplhh %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xd5
-#CHECK: vupl %v0, %v0, 11
-0xe7 0x00 0x00 0x00 0xb0 0xd6
-
-#CHECK: vupl %v19, %v14, 11
-0xe7 0x3e 0x00 0x00 0xb8 0xd6
-
-#CHECK: vupl %v31, %v31, 11
-0xe7 0xff 0x00 0x00 0xbc 0xd6
-
-#CHECK: vuplb %v0, %v0
-0xe7 0x00 0x00 0x00 0x00 0xd6
-
-#CHECK: vuplb %v19, %v14
-0xe7 0x3e 0x00 0x00 0x08 0xd6
-
-#CHECK: vuplb %v31, %v31
-0xe7 0xff 0x00 0x00 0x0c 0xd6
-
-#CHECK: vuplf %v0, %v0
-0xe7 0x00 0x00 0x00 0x20 0xd6
-
-#CHECK: vuplf %v19, %v14
-0xe7 0x3e 0x00 0x00 0x28 0xd6
-
-#CHECK: vuplf %v31, %v31
-0xe7 0xff 0x00 0x00 0x2c 0xd6
-
-#CHECK: vuplhw %v0, %v0
+# CHECK: vuplhw %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xd6
-#CHECK: vuplhw %v19, %v14
+# CHECK: vuplhw %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xd6
-#CHECK: vuplhw %v31, %v31
+# CHECK: vuplhw %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xd6
-#CHECK: vupll %v0, %v0, 11
+# CHECK: vupll %v0, %v0, 11
0xe7 0x00 0x00 0x00 0xb0 0xd4
-#CHECK: vupll %v19, %v14, 11
+# CHECK: vupll %v19, %v14, 11
0xe7 0x3e 0x00 0x00 0xb8 0xd4
-#CHECK: vupll %v31, %v31, 11
+# CHECK: vupll %v31, %v31, 11
0xe7 0xff 0x00 0x00 0xbc 0xd4
-#CHECK: vupllb %v0, %v0
+# CHECK: vupllb %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0xd4
-#CHECK: vupllb %v19, %v14
+# CHECK: vupllb %v19, %v14
0xe7 0x3e 0x00 0x00 0x08 0xd4
-#CHECK: vupllb %v31, %v31
+# CHECK: vupllb %v31, %v31
0xe7 0xff 0x00 0x00 0x0c 0xd4
-#CHECK: vupllf %v0, %v0
+# CHECK: vupllf %v0, %v0
0xe7 0x00 0x00 0x00 0x20 0xd4
-#CHECK: vupllf %v19, %v14
+# CHECK: vupllf %v19, %v14
0xe7 0x3e 0x00 0x00 0x28 0xd4
-#CHECK: vupllf %v31, %v31
+# CHECK: vupllf %v31, %v31
0xe7 0xff 0x00 0x00 0x2c 0xd4
-#CHECK: vupllh %v0, %v0
+# CHECK: vupllh %v0, %v0
0xe7 0x00 0x00 0x00 0x10 0xd4
-#CHECK: vupllh %v19, %v14
+# CHECK: vupllh %v19, %v14
0xe7 0x3e 0x00 0x00 0x18 0xd4
-#CHECK: vupllh %v31, %v31
+# CHECK: vupllh %v31, %v31
0xe7 0xff 0x00 0x00 0x1c 0xd4
-#CHECK: vx %v0, %v0, %v0
+# CHECK: vx %v0, %v0, %v0
0xe7 0x00 0x00 0x00 0x00 0x6d
-#CHECK: vx %v18, %v3, %v20
+# CHECK: vx %v18, %v3, %v20
0xe7 0x23 0x40 0x00 0x0a 0x6d
-#CHECK: vx %v31, %v31, %v31
+# CHECK: vx %v31, %v31, %v31
0xe7 0xff 0xf0 0x00 0x0e 0x6d
-#CHECK: wcdgb %f0, %f0, 0, 0
+# CHECK: wcdgb %f0, %f0, 0, 0
0xe7 0x00 0x00 0x08 0x30 0xc3
-#CHECK: wcdgb %v19, %f14, 4, 10
+# CHECK: wcdgb %v19, %f14, 4, 10
0xe7 0x3e 0x00 0xac 0x38 0xc3
-#CHECK: wcdgb %v31, %v31, 7, 15
+# CHECK: wcdgb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xff 0x3c 0xc3
-#CHECK: wcdlgb %f0, %f0, 0, 0
+# CHECK: wcdlgb %f0, %f0, 0, 0
0xe7 0x00 0x00 0x08 0x30 0xc1
-#CHECK: wcdlgb %v19, %f14, 4, 10
+# CHECK: wcdlgb %v19, %f14, 4, 10
0xe7 0x3e 0x00 0xac 0x38 0xc1
-#CHECK: wcdlgb %v31, %v31, 7, 15
+# CHECK: wcdlgb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xff 0x3c 0xc1
-#CHECK: wcgdb %f0, %f0, 0, 0
+# CHECK: wcgdb %f0, %f0, 0, 0
0xe7 0x00 0x00 0x08 0x30 0xc2
-#CHECK: wcgdb %v19, %f14, 4, 10
+# CHECK: wcgdb %v19, %f14, 4, 10
0xe7 0x3e 0x00 0xac 0x38 0xc2
-#CHECK: wcgdb %v31, %v31, 7, 15
+# CHECK: wcgdb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xff 0x3c 0xc2
-#CHECK: wclgdb %f0, %f0, 0, 0
+# CHECK: wclgdb %f0, %f0, 0, 0
0xe7 0x00 0x00 0x08 0x30 0xc0
-#CHECK: wclgdb %v19, %f14, 4, 10
+# CHECK: wclgdb %v19, %f14, 4, 10
0xe7 0x3e 0x00 0xac 0x38 0xc0
-#CHECK: wclgdb %v31, %v31, 7, 15
+# CHECK: wclgdb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xff 0x3c 0xc0
-#CHECK: wfadb %f0, %f0, %f0
+# CHECK: wfadb %f0, %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xe3
-#CHECK: wfadb %v18, %f3, %v20
+# CHECK: wfadb %v18, %f3, %v20
0xe7 0x23 0x40 0x08 0x3a 0xe3
-#CHECK: wfadb %v31, %v31, %v31
+# CHECK: wfadb %v31, %v31, %v31
0xe7 0xff 0xf0 0x08 0x3e 0xe3
-#CHECK: wfc %f0, %f0, 11, 9
+# CHECK: wfc %f0, %f0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xcb
-#CHECK: wfc %v19, %f14, 11, 9
+# CHECK: wfc %v19, %f14, 11, 9
0xe7 0x3e 0x00 0x09 0xb8 0xcb
-#CHECK: wfc %v31, %v31, 11, 9
+# CHECK: wfc %v31, %v31, 11, 9
0xe7 0xff 0x00 0x09 0xbc 0xcb
-#CHECK: wfcdb %f0, %f0
+# CHECK: wfcdb %f0, %f0
0xe7 0x00 0x00 0x00 0x30 0xcb
-#CHECK: wfcdb %v19, %f14
+# CHECK: wfcdb %v19, %f14
0xe7 0x3e 0x00 0x00 0x38 0xcb
-#CHECK: wfcdb %v31, %v31
+# CHECK: wfcdb %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0xcb
-#CHECK: wfcedb %f0, %f0, %f0
+# CHECK: wfcedb %f0, %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xe8
-#CHECK: wfcedb %v18, %f3, %v20
+# CHECK: wfcedb %v18, %f3, %v20
0xe7 0x23 0x40 0x08 0x3a 0xe8
-#CHECK: wfcedb %v31, %v31, %v31
+# CHECK: wfcedb %v31, %v31, %v31
0xe7 0xff 0xf0 0x08 0x3e 0xe8
-#CHECK: wfcedbs %f0, %f0, %f0
+# CHECK: wfcedbs %f0, %f0, %f0
0xe7 0x00 0x00 0x18 0x30 0xe8
-#CHECK: wfcedbs %v18, %f3, %v20
+# CHECK: wfcedbs %v18, %f3, %v20
0xe7 0x23 0x40 0x18 0x3a 0xe8
-#CHECK: wfcedbs %v31, %v31, %v31
+# CHECK: wfcedbs %v31, %v31, %v31
0xe7 0xff 0xf0 0x18 0x3e 0xe8
-#CHECK: wfchdb %f0, %f0, %f0
+# CHECK: wfchdb %f0, %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xeb
-#CHECK: wfchdb %v18, %f3, %v20
+# CHECK: wfchdb %v18, %f3, %v20
0xe7 0x23 0x40 0x08 0x3a 0xeb
-#CHECK: wfchdb %v31, %v31, %v31
+# CHECK: wfchdb %v31, %v31, %v31
0xe7 0xff 0xf0 0x08 0x3e 0xeb
-#CHECK: wfchdbs %f0, %f0, %f0
+# CHECK: wfchdbs %f0, %f0, %f0
0xe7 0x00 0x00 0x18 0x30 0xeb
-#CHECK: wfchdbs %v18, %f3, %v20
+# CHECK: wfchdbs %v18, %f3, %v20
0xe7 0x23 0x40 0x18 0x3a 0xeb
-#CHECK: wfchdbs %v31, %v31, %v31
+# CHECK: wfchdbs %v31, %v31, %v31
0xe7 0xff 0xf0 0x18 0x3e 0xeb
-#CHECK: wfchedb %f0, %f0, %f0
+# CHECK: wfchedb %f0, %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xea
-#CHECK: wfchedb %v18, %f3, %v20
+# CHECK: wfchedb %v18, %f3, %v20
0xe7 0x23 0x40 0x08 0x3a 0xea
-#CHECK: wfchedb %v31, %v31, %v31
+# CHECK: wfchedb %v31, %v31, %v31
0xe7 0xff 0xf0 0x08 0x3e 0xea
-#CHECK: wfchedbs %f0, %f0, %f0
+# CHECK: wfchedbs %f0, %f0, %f0
0xe7 0x00 0x00 0x18 0x30 0xea
-#CHECK: wfchedbs %v18, %f3, %v20
+# CHECK: wfchedbs %v18, %f3, %v20
0xe7 0x23 0x40 0x18 0x3a 0xea
-#CHECK: wfchedbs %v31, %v31, %v31
+# CHECK: wfchedbs %v31, %v31, %v31
0xe7 0xff 0xf0 0x18 0x3e 0xea
-#CHECK: wfddb %f0, %f0, %f0
+# CHECK: wfddb %f0, %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xe5
-#CHECK: wfddb %v18, %f3, %v20
+# CHECK: wfddb %v18, %f3, %v20
0xe7 0x23 0x40 0x08 0x3a 0xe5
-#CHECK: wfddb %v31, %v31, %v31
+# CHECK: wfddb %v31, %v31, %v31
0xe7 0xff 0xf0 0x08 0x3e 0xe5
-#CHECK: wfidb %f0, %f0, 0, 0
+# CHECK: wfidb %f0, %f0, 0, 0
0xe7 0x00 0x00 0x08 0x30 0xc7
-#CHECK: wfidb %v19, %f14, 4, 10
+# CHECK: wfidb %v19, %f14, 4, 10
0xe7 0x3e 0x00 0xac 0x38 0xc7
-#CHECK: wfidb %v31, %v31, 7, 15
+# CHECK: wfidb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xff 0x3c 0xc7
-#CHECK: wfk %f0, %f0, 11, 9
+# CHECK: wfk %f0, %f0, 11, 9
0xe7 0x00 0x00 0x09 0xb0 0xca
-#CHECK: wfk %v19, %f14, 11, 9
+# CHECK: wfk %v19, %f14, 11, 9
0xe7 0x3e 0x00 0x09 0xb8 0xca
-#CHECK: wfk %v31, %v31, 11, 9
+# CHECK: wfk %v31, %v31, 11, 9
0xe7 0xff 0x00 0x09 0xbc 0xca
-#CHECK: wfkdb %f0, %f0
+# CHECK: wfkdb %f0, %f0
0xe7 0x00 0x00 0x00 0x30 0xca
-#CHECK: wfkdb %v19, %f14
+# CHECK: wfkdb %v19, %f14
0xe7 0x3e 0x00 0x00 0x38 0xca
-#CHECK: wfkdb %v31, %v31
+# CHECK: wfkdb %v31, %v31
0xe7 0xff 0x00 0x00 0x3c 0xca
-#CHECK: wfpsodb %f0, %f0, 7
-0xe7 0x00 0x00 0x78 0x30 0xcc
-
-#CHECK: wfpsodb %v19, %f14, 7
-0xe7 0x3e 0x00 0x78 0x38 0xcc
-
-#CHECK: wfpsodb %v31, %v31, 7
-0xe7 0xff 0x00 0x78 0x3c 0xcc
-
-#CHECK: wflcdb %f0, %f0
+# CHECK: wflcdb %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xcc
-#CHECK: wflcdb %v19, %f14
+# CHECK: wflcdb %v19, %f14
0xe7 0x3e 0x00 0x08 0x38 0xcc
-#CHECK: wflcdb %v31, %v31
+# CHECK: wflcdb %v31, %v31
0xe7 0xff 0x00 0x08 0x3c 0xcc
-#CHECK: wflndb %f0, %f0
+# CHECK: wflndb %f0, %f0
0xe7 0x00 0x00 0x18 0x30 0xcc
-#CHECK: wflndb %v19, %f14
+# CHECK: wflndb %v19, %f14
0xe7 0x3e 0x00 0x18 0x38 0xcc
-#CHECK: wflndb %v31, %v31
+# CHECK: wflndb %v31, %v31
0xe7 0xff 0x00 0x18 0x3c 0xcc
-#CHECK: wflpdb %f0, %f0
+# CHECK: wflpdb %f0, %f0
0xe7 0x00 0x00 0x28 0x30 0xcc
-#CHECK: wflpdb %v19, %f14
+# CHECK: wflpdb %v19, %f14
0xe7 0x3e 0x00 0x28 0x38 0xcc
-#CHECK: wflpdb %v31, %v31
+# CHECK: wflpdb %v31, %v31
0xe7 0xff 0x00 0x28 0x3c 0xcc
-#CHECK: wfmadb %f0, %f0, %f0, %f0
+# CHECK: wfmadb %f0, %f0, %f0, %f0
0xe7 0x00 0x03 0x08 0x00 0x8f
-#CHECK: wfmadb %f3, %v20, %f5, %v22
+# CHECK: wfmadb %f3, %v20, %f5, %v22
0xe7 0x34 0x53 0x08 0x65 0x8f
-#CHECK: wfmadb %v31, %v31, %v31, %v31
+# CHECK: wfmadb %v31, %v31, %v31, %v31
0xe7 0xff 0xf3 0x08 0xff 0x8f
-#CHECK: wfmdb %f0, %f0, %f0
+# CHECK: wfmdb %f0, %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xe7
-#CHECK: wfmdb %v18, %f3, %v20
+# CHECK: wfmdb %v18, %f3, %v20
0xe7 0x23 0x40 0x08 0x3a 0xe7
-#CHECK: wfmdb %v31, %v31, %v31
+# CHECK: wfmdb %v31, %v31, %v31
0xe7 0xff 0xf0 0x08 0x3e 0xe7
-#CHECK: wfmsdb %f0, %f0, %f0, %f0
+# CHECK: wfmsdb %f0, %f0, %f0, %f0
0xe7 0x00 0x03 0x08 0x00 0x8e
-#CHECK: wfmsdb %f3, %v20, %f5, %v22
+# CHECK: wfmsdb %f3, %v20, %f5, %v22
0xe7 0x34 0x53 0x08 0x65 0x8e
-#CHECK: wfmsdb %v31, %v31, %v31, %v31
+# CHECK: wfmsdb %v31, %v31, %v31, %v31
0xe7 0xff 0xf3 0x08 0xff 0x8e
-#CHECK: wfsdb %f0, %f0, %f0
+# CHECK: wfpsodb %f0, %f0, 7
+0xe7 0x00 0x00 0x78 0x30 0xcc
+
+# CHECK: wfpsodb %v19, %f14, 7
+0xe7 0x3e 0x00 0x78 0x38 0xcc
+
+# CHECK: wfpsodb %v31, %v31, 7
+0xe7 0xff 0x00 0x78 0x3c 0xcc
+
+# CHECK: wfsdb %f0, %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xe2
-#CHECK: wfsdb %v18, %f3, %v20
+# CHECK: wfsdb %v18, %f3, %v20
0xe7 0x23 0x40 0x08 0x3a 0xe2
-#CHECK: wfsdb %v31, %v31, %v31
+# CHECK: wfsdb %v31, %v31, %v31
0xe7 0xff 0xf0 0x08 0x3e 0xe2
-#CHECK: wfsqdb %f0, %f0
+# CHECK: wfsqdb %f0, %f0
0xe7 0x00 0x00 0x08 0x30 0xce
-#CHECK: wfsqdb %v19, %f14
+# CHECK: wfsqdb %v19, %f14
0xe7 0x3e 0x00 0x08 0x38 0xce
-#CHECK: wfsqdb %v31, %v31
+# CHECK: wfsqdb %v31, %v31
0xe7 0xff 0x00 0x08 0x3c 0xce
-#CHECK: wftcidb %f0, %f0, 0
+# CHECK: wftcidb %f0, %f0, 0
0xe7 0x00 0x00 0x08 0x30 0x4a
-#CHECK: wftcidb %v19, %f4, 1383
+# CHECK: wftcidb %v19, %f4, 1383
0xe7 0x34 0x56 0x78 0x38 0x4a
-#CHECK: wftcidb %v31, %v31, 4095
+# CHECK: wftcidb %v31, %v31, 4095
0xe7 0xff 0xff 0xf8 0x3c 0x4a
-#CHECK: wldeb %f0, %f0
+# CHECK: wldeb %f0, %f0
0xe7 0x00 0x00 0x08 0x20 0xc4
-#CHECK: wldeb %v19, %f14
+# CHECK: wldeb %v19, %f14
0xe7 0x3e 0x00 0x08 0x28 0xc4
-#CHECK: wldeb %v31, %v31
+# CHECK: wldeb %v31, %v31
0xe7 0xff 0x00 0x08 0x2c 0xc4
-#CHECK: wledb %f0, %f0, 0, 0
+# CHECK: wledb %f0, %f0, 0, 0
0xe7 0x00 0x00 0x08 0x30 0xc5
-#CHECK: wledb %v19, %f14, 4, 10
+# CHECK: wledb %v19, %f14, 4, 10
0xe7 0x3e 0x00 0xac 0x38 0xc5
-#CHECK: wledb %v31, %v31, 7, 15
+# CHECK: wledb %v31, %v31, 7, 15
0xe7 0xff 0x00 0xff 0x3c 0xc5
-#CHECK: lochi %r11, 42, 0
-0xec 0xb0 0x00 0x2a 0x00 0x42
-
-#CHECK: lochio %r11, 42
-0xec 0xb1 0x00 0x2a 0x00 0x42
-
-#CHECK: lochih %r11, 42
-0xec 0xb2 0x00 0x2a 0x00 0x42
-
-#CHECK: lochinle %r11, 42
-0xec 0xb3 0x00 0x2a 0x00 0x42
-
-#CHECK: lochil %r11, -1
-0xec 0xb4 0xff 0xff 0x00 0x42
-
-#CHECK: lochinhe %r11, 42
-0xec 0xb5 0x00 0x2a 0x00 0x42
-
-#CHECK: lochilh %r11, -1
-0xec 0xb6 0xff 0xff 0x00 0x42
-
-#CHECK: lochine %r11, 0
-0xec 0xb7 0x00 0x00 0x00 0x42
-
-#CHECK: lochie %r11, 0
-0xec 0xb8 0x00 0x00 0x00 0x42
-
-#CHECK: lochinlh %r11, 42
-0xec 0xb9 0x00 0x2a 0x00 0x42
-
-#CHECK: lochihe %r11, 255
-0xec 0xba 0x00 0xff 0x00 0x42
-
-#CHECK: lochinl %r11, 255
-0xec 0xbb 0x00 0xff 0x00 0x42
-
-#CHECK: lochile %r11, 32767
-0xec 0xbc 0x7f 0xff 0x00 0x42
-
-#CHECK: lochinh %r11, 32767
-0xec 0xbd 0x7f 0xff 0x00 0x42
-
-#CHECK: lochino %r11, 32512
-0xec 0xbe 0x7f 0x00 0x00 0x42
-
-#CHECK: lochi %r11, 32512, 15
-0xec 0xbf 0x7f 0x00 0x00 0x42
-
-#CHECK: locghi %r11, 42, 0
-0xec 0xb0 0x00 0x2a 0x00 0x46
-
-#CHECK: locghio %r11, 42
-0xec 0xb1 0x00 0x2a 0x00 0x46
-
-#CHECK: locghih %r11, 42
-0xec 0xb2 0x00 0x2a 0x00 0x46
-
-#CHECK: locghinle %r11, 42
-0xec 0xb3 0x00 0x2a 0x00 0x46
-
-#CHECK: locghil %r11, -1
-0xec 0xb4 0xff 0xff 0x00 0x46
-
-#CHECK: locghinhe %r11, 42
-0xec 0xb5 0x00 0x2a 0x00 0x46
-
-#CHECK: locghilh %r11, -1
-0xec 0xb6 0xff 0xff 0x00 0x46
-
-#CHECK: locghine %r11, 0
-0xec 0xb7 0x00 0x00 0x00 0x46
-
-#CHECK: locghie %r11, 0
-0xec 0xb8 0x00 0x00 0x00 0x46
-
-#CHECK: locghinlh %r11, 42
-0xec 0xb9 0x00 0x2a 0x00 0x46
-
-#CHECK: locghihe %r11, 255
-0xec 0xba 0x00 0xff 0x00 0x46
-
-#CHECK: locghinl %r11, 255
-0xec 0xbb 0x00 0xff 0x00 0x46
-
-#CHECK: locghile %r11, 32767
-0xec 0xbc 0x7f 0xff 0x00 0x46
-
-#CHECK: locghinh %r11, 32767
-0xec 0xbd 0x7f 0xff 0x00 0x46
-
-#CHECK: locghino %r11, 32512
-0xec 0xbe 0x7f 0x00 0x00 0x46
-
-#CHECK: locghi %r11, 32512, 15
-0xec 0xbf 0x7f 0x00 0x00 0x46
-
-#CHECK: lochhi %r11, 42, 0
-0xec 0xb0 0x00 0x2a 0x00 0x4e
-
-#CHECK: lochhio %r11, 42
-0xec 0xb1 0x00 0x2a 0x00 0x4e
-
-#CHECK: lochhih %r11, 42
-0xec 0xb2 0x00 0x2a 0x00 0x4e
-
-#CHECK: lochhinle %r11, 42
-0xec 0xb3 0x00 0x2a 0x00 0x4e
-
-#CHECK: lochhil %r11, -1
-0xec 0xb4 0xff 0xff 0x00 0x4e
-
-#CHECK: lochhinhe %r11, 42
-0xec 0xb5 0x00 0x2a 0x00 0x4e
-
-#CHECK: lochhilh %r11, -1
-0xec 0xb6 0xff 0xff 0x00 0x4e
-
-#CHECK: lochhine %r11, 0
-0xec 0xb7 0x00 0x00 0x00 0x4e
-
-#CHECK: lochhie %r11, 0
-0xec 0xb8 0x00 0x00 0x00 0x4e
-
-#CHECK: lochhinlh %r11, 42
-0xec 0xb9 0x00 0x2a 0x00 0x4e
-
-#CHECK: lochhihe %r11, 255
-0xec 0xba 0x00 0xff 0x00 0x4e
-
-#CHECK: lochhinl %r11, 255
-0xec 0xbb 0x00 0xff 0x00 0x4e
-
-#CHECK: lochhile %r11, 32767
-0xec 0xbc 0x7f 0xff 0x00 0x4e
-
-#CHECK: lochhinh %r11, 32767
-0xec 0xbd 0x7f 0xff 0x00 0x4e
-
-#CHECK: lochhino %r11, 32512
-0xec 0xbe 0x7f 0x00 0x00 0x4e
-
-#CHECK: lochhi %r11, 32512, 15
-0xec 0xbf 0x7f 0x00 0x00 0x4e
-
-# CHECK: locfh %r7, 6399(%r8), 0
-0xeb 0x70 0x88 0xff 0x01 0xe0
-
-# CHECK: locfho %r7, 6399(%r8)
-0xeb 0x71 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhh %r7, 6399(%r8)
-0xeb 0x72 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhnle %r7, 6399(%r8)
-0xeb 0x73 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhl %r7, 6399(%r8)
-0xeb 0x74 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhnhe %r7, 6399(%r8)
-0xeb 0x75 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhlh %r7, 6399(%r8)
-0xeb 0x76 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhne %r7, 6399(%r8)
-0xeb 0x77 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhe %r7, 6399(%r8)
-0xeb 0x78 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhnlh %r7, 6399(%r8)
-0xeb 0x79 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhhe %r7, 6399(%r8)
-0xeb 0x7a 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhnl %r7, 6399(%r8)
-0xeb 0x7b 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhle %r7, 6399(%r8)
-0xeb 0x7c 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhnh %r7, 6399(%r8)
-0xeb 0x7d 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhno %r7, 6399(%r8)
-0xeb 0x7e 0x88 0xff 0x01 0xe0
-
-# CHECK: locfh %r7, 6399(%r8), 15
-0xeb 0x7f 0x88 0xff 0x01 0xe0
-
-# CHECK: locfhr %r11, %r3, 0
-0xb9 0xe0 0x00 0xb3
-
-# CHECK: locfhro %r11, %r3
-0xb9 0xe0 0x10 0xb3
-
-# CHECK: locfhrh %r11, %r3
-0xb9 0xe0 0x20 0xb3
-
-# CHECK: locfhrnle %r11, %r3
-0xb9 0xe0 0x30 0xb3
-
-# CHECK: locfhrl %r11, %r3
-0xb9 0xe0 0x40 0xb3
-
-# CHECK: locfhrnhe %r11, %r3
-0xb9 0xe0 0x50 0xb3
-
-# CHECK: locfhrlh %r11, %r3
-0xb9 0xe0 0x60 0xb3
-
-# CHECK: locfhrne %r11, %r3
-0xb9 0xe0 0x70 0xb3
-
-# CHECK: locfhre %r11, %r3
-0xb9 0xe0 0x80 0xb3
-
-# CHECK: locfhrnlh %r11, %r3
-0xb9 0xe0 0x90 0xb3
-
-# CHECK: locfhrhe %r11, %r3
-0xb9 0xe0 0xa0 0xb3
-
-# CHECK: locfhrnl %r11, %r3
-0xb9 0xe0 0xb0 0xb3
-
-# CHECK: locfhrle %r11, %r3
-0xb9 0xe0 0xc0 0xb3
-
-# CHECK: locfhrnh %r11, %r3
-0xb9 0xe0 0xd0 0xb3
-
-# CHECK: locfhrno %r11, %r3
-0xb9 0xe0 0xe0 0xb3
-
-# CHECK: locfhr %r11, %r3, 15
-0xb9 0xe0 0xf0 0xb3
-
-# CHECK: stocfh %r1, 2(%r3), 0
-0xeb 0x10 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfho %r1, 2(%r3)
-0xeb 0x11 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhh %r1, 2(%r3)
-0xeb 0x12 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhnle %r1, 2(%r3)
-0xeb 0x13 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhl %r1, 2(%r3)
-0xeb 0x14 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhnhe %r1, 2(%r3)
-0xeb 0x15 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhlh %r1, 2(%r3)
-0xeb 0x16 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhne %r1, 2(%r3)
-0xeb 0x17 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhe %r1, 2(%r3)
-0xeb 0x18 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhnlh %r1, 2(%r3)
-0xeb 0x19 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhhe %r1, 2(%r3)
-0xeb 0x1a 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhnl %r1, 2(%r3)
-0xeb 0x1b 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhle %r1, 2(%r3)
-0xeb 0x1c 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhnh %r1, 2(%r3)
-0xeb 0x1d 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfhno %r1, 2(%r3)
-0xeb 0x1e 0x30 0x02 0x00 0xe1
-
-# CHECK: stocfh %r1, 2(%r3), 15
-0xeb 0x1f 0x30 0x02 0x00 0xe1
-
diff --git a/test/MC/Disassembler/SystemZ/insns.txt b/test/MC/Disassembler/SystemZ/insns.txt
index 9f76b6a5fd44..dac94099f276 100644
--- a/test/MC/Disassembler/SystemZ/insns.txt
+++ b/test/MC/Disassembler/SystemZ/insns.txt
@@ -1,17 +1,26 @@
# Test instructions that don't have PC-relative operands.
# RUN: llvm-mc --disassemble %s -triple=s390x-linux-gnu -mcpu=zEC12 | FileCheck %s
-# CHECK: adbr %f0, %f0
-0xb3 0x1a 0x00 0x00
+# CHECK: a %r0, 0
+0x5a 0x00 0x00 0x00
-# CHECK: adbr %f0, %f15
-0xb3 0x1a 0x00 0x0f
+# CHECK: a %r0, 4095
+0x5a 0x00 0x0f 0xff
-# CHECK: adbr %f7, %f8
-0xb3 0x1a 0x00 0x78
+# CHECK: a %r0, 0(%r1)
+0x5a 0x00 0x10 0x00
-# CHECK: adbr %f15, %f0
-0xb3 0x1a 0x00 0xf0
+# CHECK: a %r0, 0(%r15)
+0x5a 0x00 0xf0 0x00
+
+# CHECK: a %r0, 4095(%r1,%r15)
+0x5a 0x01 0xff 0xff
+
+# CHECK: a %r0, 4095(%r15,%r1)
+0x5a 0x0f 0x1f 0xff
+
+# CHECK: a %r15, 0
+0x5a 0xf0 0x00 0x00
# CHECK: adb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x1a
@@ -34,17 +43,17 @@
# CHECK: adb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x1a
-# CHECK: aebr %f0, %f0
-0xb3 0x0a 0x00 0x00
+# CHECK: adbr %f0, %f0
+0xb3 0x1a 0x00 0x00
-# CHECK: aebr %f0, %f15
-0xb3 0x0a 0x00 0x0f
+# CHECK: adbr %f0, %f15
+0xb3 0x1a 0x00 0x0f
-# CHECK: aebr %f7, %f8
-0xb3 0x0a 0x00 0x78
+# CHECK: adbr %f7, %f8
+0xb3 0x1a 0x00 0x78
-# CHECK: aebr %f15, %f0
-0xb3 0x0a 0x00 0xf0
+# CHECK: adbr %f15, %f0
+0xb3 0x1a 0x00 0xf0
# CHECK: aeb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x0a
@@ -67,6 +76,18 @@
# CHECK: aeb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x0a
+# CHECK: aebr %f0, %f0
+0xb3 0x0a 0x00 0x00
+
+# CHECK: aebr %f0, %f15
+0xb3 0x0a 0x00 0x0f
+
+# CHECK: aebr %f7, %f8
+0xb3 0x0a 0x00 0x78
+
+# CHECK: aebr %f15, %f0
+0xb3 0x0a 0x00 0xf0
+
# CHECK: afi %r0, -2147483648
0xc2 0x09 0x80 0x00 0x00 0x00
@@ -85,35 +106,35 @@
# CHECK: afi %r15, 0
0xc2 0xf9 0x00 0x00 0x00 0x00
-# CHECK: agfi %r0, -2147483648
-0xc2 0x08 0x80 0x00 0x00 0x00
+# CHECK: ag %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x08
-# CHECK: agfi %r0, -1
-0xc2 0x08 0xff 0xff 0xff 0xff
+# CHECK: ag %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x08
-# CHECK: agfi %r0, 0
-0xc2 0x08 0x00 0x00 0x00 0x00
+# CHECK: ag %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x08
-# CHECK: agfi %r0, 1
-0xc2 0x08 0x00 0x00 0x00 0x01
+# CHECK: ag %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x08
-# CHECK: agfi %r0, 2147483647
-0xc2 0x08 0x7f 0xff 0xff 0xff
+# CHECK: ag %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x08
-# CHECK: agfi %r15, 0
-0xc2 0xf8 0x00 0x00 0x00 0x00
+# CHECK: ag %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x08
-# CHECK: agfr %r0, %r0
-0xb9 0x18 0x00 0x00
+# CHECK: ag %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x08
-# CHECK: agfr %r0, %r15
-0xb9 0x18 0x00 0x0f
+# CHECK: ag %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x08
-# CHECK: agfr %r15, %r0
-0xb9 0x18 0x00 0xf0
+# CHECK: ag %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x08
-# CHECK: agfr %r7, %r8
-0xb9 0x18 0x00 0x78
+# CHECK: ag %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x08
# CHECK: agf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x18
@@ -145,6 +166,36 @@
# CHECK: agf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x18
+# CHECK: agfi %r0, -2147483648
+0xc2 0x08 0x80 0x00 0x00 0x00
+
+# CHECK: agfi %r0, -1
+0xc2 0x08 0xff 0xff 0xff 0xff
+
+# CHECK: agfi %r0, 0
+0xc2 0x08 0x00 0x00 0x00 0x00
+
+# CHECK: agfi %r0, 1
+0xc2 0x08 0x00 0x00 0x00 0x01
+
+# CHECK: agfi %r0, 2147483647
+0xc2 0x08 0x7f 0xff 0xff 0xff
+
+# CHECK: agfi %r15, 0
+0xc2 0xf8 0x00 0x00 0x00 0x00
+
+# CHECK: agfr %r0, %r0
+0xb9 0x18 0x00 0x00
+
+# CHECK: agfr %r0, %r15
+0xb9 0x18 0x00 0x0f
+
+# CHECK: agfr %r15, %r0
+0xb9 0x18 0x00 0xf0
+
+# CHECK: agfr %r7, %r8
+0xb9 0x18 0x00 0x78
+
# CHECK: aghi %r0, -32768
0xa7 0x0b 0x80 0x00
@@ -235,35 +286,26 @@
# CHECK: agsi 524287(%r15), 42
0xeb 0x2a 0xff 0xff 0x7f 0x7a
-# CHECK: ag %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x08
-
-# CHECK: ag %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x08
-
-# CHECK: ag %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x08
-
-# CHECK: ag %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x08
+# CHECK: ah %r0, 0
+0x4a 0x00 0x00 0x00
-# CHECK: ag %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x08
+# CHECK: ah %r0, 4095
+0x4a 0x00 0x0f 0xff
-# CHECK: ag %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x08
+# CHECK: ah %r0, 0(%r1)
+0x4a 0x00 0x10 0x00
-# CHECK: ag %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x08
+# CHECK: ah %r0, 0(%r15)
+0x4a 0x00 0xf0 0x00
-# CHECK: ag %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x08
+# CHECK: ah %r0, 4095(%r1,%r15)
+0x4a 0x01 0xff 0xff
-# CHECK: ag %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x08
+# CHECK: ah %r0, 4095(%r15,%r1)
+0x4a 0x0f 0x1f 0xff
-# CHECK: ag %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x08
+# CHECK: ah %r15, 0
+0x4a 0xf0 0x00 0x00
# CHECK: ahi %r0, -32768
0xa7 0x0a 0x80 0x00
@@ -298,27 +340,6 @@
# CHECK: ahik %r8, %r15, 32767
0xec 0x8f 0x7f 0xff 0x00 0xd8
-# CHECK: ah %r0, 0
-0x4a 0x00 0x00 0x00
-
-# CHECK: ah %r0, 4095
-0x4a 0x00 0x0f 0xff
-
-# CHECK: ah %r0, 0(%r1)
-0x4a 0x00 0x10 0x00
-
-# CHECK: ah %r0, 0(%r15)
-0x4a 0x00 0xf0 0x00
-
-# CHECK: ah %r0, 4095(%r1,%r15)
-0x4a 0x01 0xff 0xff
-
-# CHECK: ah %r0, 4095(%r15,%r1)
-0x4a 0x0f 0x1f 0xff
-
-# CHECK: ah %r15, 0
-0x4a 0xf0 0x00 0x00
-
# CHECK: ahy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x7a
@@ -367,17 +388,56 @@
# CHECK: aih %r15, 0
0xcc 0xf8 0x00 0x00 0x00 0x00
-# CHECK: alcgr %r0, %r0
-0xb9 0x88 0x00 0x00
+# CHECK: al %r0, 0
+0x5e 0x00 0x00 0x00
-# CHECK: alcgr %r0, %r15
-0xb9 0x88 0x00 0x0f
+# CHECK: al %r0, 4095
+0x5e 0x00 0x0f 0xff
-# CHECK: alcgr %r15, %r0
-0xb9 0x88 0x00 0xf0
+# CHECK: al %r0, 0(%r1)
+0x5e 0x00 0x10 0x00
-# CHECK: alcgr %r7, %r8
-0xb9 0x88 0x00 0x78
+# CHECK: al %r0, 0(%r15)
+0x5e 0x00 0xf0 0x00
+
+# CHECK: al %r0, 4095(%r1,%r15)
+0x5e 0x01 0xff 0xff
+
+# CHECK: al %r0, 4095(%r15,%r1)
+0x5e 0x0f 0x1f 0xff
+
+# CHECK: al %r15, 0
+0x5e 0xf0 0x00 0x00
+
+# CHECK: alc %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x98
+
+# CHECK: alc %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x98
+
+# CHECK: alc %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x98
+
+# CHECK: alc %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x98
+
+# CHECK: alc %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x98
+
+# CHECK: alc %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x98
+
+# CHECK: alc %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x98
+
+# CHECK: alc %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x98
+
+# CHECK: alc %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x98
+
+# CHECK: alc %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x98
# CHECK: alcg %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x88
@@ -409,6 +469,18 @@
# CHECK: alcg %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x88
+# CHECK: alcgr %r0, %r0
+0xb9 0x88 0x00 0x00
+
+# CHECK: alcgr %r0, %r15
+0xb9 0x88 0x00 0x0f
+
+# CHECK: alcgr %r15, %r0
+0xb9 0x88 0x00 0xf0
+
+# CHECK: alcgr %r7, %r8
+0xb9 0x88 0x00 0x78
+
# CHECK: alcr %r0, %r0
0xb9 0x98 0x00 0x00
@@ -421,36 +493,6 @@
# CHECK: alcr %r7, %r8
0xb9 0x98 0x00 0x78
-# CHECK: alc %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x98
-
-# CHECK: alc %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x98
-
-# CHECK: alc %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x98
-
-# CHECK: alc %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x98
-
-# CHECK: alc %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x98
-
-# CHECK: alc %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x98
-
-# CHECK: alc %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x98
-
-# CHECK: alc %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x98
-
-# CHECK: alc %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x98
-
-# CHECK: alc %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x98
-
# CHECK: alfi %r0, 0
0xc2 0x0b 0x00 0x00 0x00 0x00
@@ -460,26 +502,35 @@
# CHECK: alfi %r15, 0
0xc2 0xfb 0x00 0x00 0x00 0x00
-# CHECK: algfi %r0, 0
-0xc2 0x0a 0x00 0x00 0x00 0x00
+# CHECK: alg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x0a
-# CHECK: algfi %r0, 4294967295
-0xc2 0x0a 0xff 0xff 0xff 0xff
+# CHECK: alg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x0a
-# CHECK: algfi %r15, 0
-0xc2 0xfa 0x00 0x00 0x00 0x00
+# CHECK: alg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x0a
-# CHECK: algfr %r0, %r0
-0xb9 0x1a 0x00 0x00
+# CHECK: alg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x0a
-# CHECK: algfr %r0, %r15
-0xb9 0x1a 0x00 0x0f
+# CHECK: alg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x0a
-# CHECK: algfr %r15, %r0
-0xb9 0x1a 0x00 0xf0
+# CHECK: alg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x0a
-# CHECK: algfr %r7, %r8
-0xb9 0x1a 0x00 0x78
+# CHECK: alg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x0a
+
+# CHECK: alg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x0a
+
+# CHECK: alg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x0a
+
+# CHECK: alg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x0a
# CHECK: algf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x1a
@@ -511,6 +562,42 @@
# CHECK: algf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x1a
+# CHECK: algfi %r0, 0
+0xc2 0x0a 0x00 0x00 0x00 0x00
+
+# CHECK: algfi %r0, 4294967295
+0xc2 0x0a 0xff 0xff 0xff 0xff
+
+# CHECK: algfi %r15, 0
+0xc2 0xfa 0x00 0x00 0x00 0x00
+
+# CHECK: algfr %r0, %r0
+0xb9 0x1a 0x00 0x00
+
+# CHECK: algfr %r0, %r15
+0xb9 0x1a 0x00 0x0f
+
+# CHECK: algfr %r15, %r0
+0xb9 0x1a 0x00 0xf0
+
+# CHECK: algfr %r7, %r8
+0xb9 0x1a 0x00 0x78
+
+# CHECK: alghsik %r0, %r1, -32768
+0xec 0x01 0x80 0x00 0x00 0xdb
+
+# CHECK: alghsik %r2, %r3, -1
+0xec 0x23 0xff 0xff 0x00 0xdb
+
+# CHECK: alghsik %r4, %r5, 0
+0xec 0x45 0x00 0x00 0x00 0xdb
+
+# CHECK: alghsik %r6, %r7, 1
+0xec 0x67 0x00 0x01 0x00 0xdb
+
+# CHECK: alghsik %r8, %r15, 32767
+0xec 0x8f 0x7f 0xff 0x00 0xdb
+
# CHECK: algr %r0, %r0
0xb9 0x0a 0x00 0x00
@@ -529,50 +616,44 @@
# CHECK: algrk %r2, %r3, %r4
0xb9 0xea 0x40 0x23
-# CHECK: alg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x0a
-
-# CHECK: alg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x0a
-
-# CHECK: alg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x0a
+# CHECK: algsi -524288, 0
+0xeb 0x00 0x00 0x00 0x80 0x7e
-# CHECK: alg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x0a
+# CHECK: algsi -1, 0
+0xeb 0x00 0x0f 0xff 0xff 0x7e
-# CHECK: alg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x0a
+# CHECK: algsi 0, 0
+0xeb 0x00 0x00 0x00 0x00 0x7e
-# CHECK: alg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x0a
+# CHECK: algsi 1, 0
+0xeb 0x00 0x00 0x01 0x00 0x7e
-# CHECK: alg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x0a
+# CHECK: algsi 524287, 0
+0xeb 0x00 0x0f 0xff 0x7f 0x7e
-# CHECK: alg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x0a
+# CHECK: algsi 0, -128
+0xeb 0x80 0x00 0x00 0x00 0x7e
-# CHECK: alg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x0a
+# CHECK: algsi 0, -1
+0xeb 0xff 0x00 0x00 0x00 0x7e
-# CHECK: alg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x0a
+# CHECK: algsi 0, 1
+0xeb 0x01 0x00 0x00 0x00 0x7e
-# CHECK: alghsik %r0, %r1, -32768
-0xec 0x01 0x80 0x00 0x00 0xdb
+# CHECK: algsi 0, 127
+0xeb 0x7f 0x00 0x00 0x00 0x7e
-# CHECK: alghsik %r2, %r3, -1
-0xec 0x23 0xff 0xff 0x00 0xdb
+# CHECK: algsi 0(%r1), 42
+0xeb 0x2a 0x10 0x00 0x00 0x7e
-# CHECK: alghsik %r4, %r5, 0
-0xec 0x45 0x00 0x00 0x00 0xdb
+# CHECK: algsi 0(%r15), 42
+0xeb 0x2a 0xf0 0x00 0x00 0x7e
-# CHECK: alghsik %r6, %r7, 1
-0xec 0x67 0x00 0x01 0x00 0xdb
+# CHECK: algsi 524287(%r1), 42
+0xeb 0x2a 0x1f 0xff 0x7f 0x7e
-# CHECK: alghsik %r8, %r15, 32767
-0xec 0x8f 0x7f 0xff 0x00 0xdb
+# CHECK: algsi 524287(%r15), 42
+0xeb 0x2a 0xff 0xff 0x7f 0x7e
# CHECK: alhsik %r0, %r1, -32768
0xec 0x01 0x80 0x00 0x00 0xda
@@ -607,26 +688,44 @@
# CHECK: alrk %r2, %r3, %r4
0xb9 0xfa 0x40 0x23
-# CHECK: al %r0, 0
-0x5e 0x00 0x00 0x00
+# CHECK: alsi -524288, 0
+0xeb 0x00 0x00 0x00 0x80 0x6e
-# CHECK: al %r0, 4095
-0x5e 0x00 0x0f 0xff
+# CHECK: alsi -1, 0
+0xeb 0x00 0x0f 0xff 0xff 0x6e
-# CHECK: al %r0, 0(%r1)
-0x5e 0x00 0x10 0x00
+# CHECK: alsi 0, 0
+0xeb 0x00 0x00 0x00 0x00 0x6e
-# CHECK: al %r0, 0(%r15)
-0x5e 0x00 0xf0 0x00
+# CHECK: alsi 1, 0
+0xeb 0x00 0x00 0x01 0x00 0x6e
-# CHECK: al %r0, 4095(%r1,%r15)
-0x5e 0x01 0xff 0xff
+# CHECK: alsi 524287, 0
+0xeb 0x00 0x0f 0xff 0x7f 0x6e
-# CHECK: al %r0, 4095(%r15,%r1)
-0x5e 0x0f 0x1f 0xff
+# CHECK: alsi 0, -128
+0xeb 0x80 0x00 0x00 0x00 0x6e
-# CHECK: al %r15, 0
-0x5e 0xf0 0x00 0x00
+# CHECK: alsi 0, -1
+0xeb 0xff 0x00 0x00 0x00 0x6e
+
+# CHECK: alsi 0, 1
+0xeb 0x01 0x00 0x00 0x00 0x6e
+
+# CHECK: alsi 0, 127
+0xeb 0x7f 0x00 0x00 0x00 0x6e
+
+# CHECK: alsi 0(%r1), 42
+0xeb 0x2a 0x10 0x00 0x00 0x6e
+
+# CHECK: alsi 0(%r15), 42
+0xeb 0x2a 0xf0 0x00 0x00 0x6e
+
+# CHECK: alsi 524287(%r1), 42
+0xeb 0x2a 0x1f 0xff 0x7f 0x6e
+
+# CHECK: alsi 524287(%r15), 42
+0xeb 0x2a 0xff 0xff 0x7f 0x6e
# CHECK: aly %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x5e
@@ -658,6 +757,48 @@
# CHECK: aly %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x5e
+# CHECK: ap 0(1), 0(1)
+0xfa 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: ap 0(1), 0(1,%r1)
+0xfa 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: ap 0(1), 0(1,%r15)
+0xfa 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: ap 0(1), 4095(1)
+0xfa 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: ap 0(1), 4095(1,%r1)
+0xfa 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: ap 0(1), 4095(1,%r15)
+0xfa 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: ap 0(1,%r1), 0(1)
+0xfa 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: ap 0(1,%r15), 0(1)
+0xfa 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: ap 4095(1,%r1), 0(1)
+0xfa 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: ap 4095(1,%r15), 0(1)
+0xfa 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: ap 0(16,%r1), 0(1)
+0xfa 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: ap 0(16,%r15), 0(1)
+0xfa 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: ap 0(1), 0(16,%r1)
+0xfa 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: ap 0(1), 0(16,%r15)
+0xfa 0x0f 0x00 0x00 0xf0 0x00
+
# CHECK: ar %r0, %r0
0x1a 0x00
@@ -715,27 +856,6 @@
# CHECK: asi 524287(%r15), 42
0xeb 0x2a 0xff 0xff 0x7f 0x6a
-# CHECK: a %r0, 0
-0x5a 0x00 0x00 0x00
-
-# CHECK: a %r0, 4095
-0x5a 0x00 0x0f 0xff
-
-# CHECK: a %r0, 0(%r1)
-0x5a 0x00 0x10 0x00
-
-# CHECK: a %r0, 0(%r15)
-0x5a 0x00 0xf0 0x00
-
-# CHECK: a %r0, 4095(%r1,%r15)
-0x5a 0x01 0xff 0xff
-
-# CHECK: a %r0, 4095(%r15,%r1)
-0x5a 0x0f 0x1f 0xff
-
-# CHECK: a %r15, 0
-0x5a 0xf0 0x00 0x00
-
# CHECK: axbr %f0, %f0
0xb3 0x4a 0x00 0x00
@@ -778,6 +898,24 @@
# CHECK: ay %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x5a
+# CHECK: b 0
+0x47 0xf0 0x00 0x00
+
+# CHECK: b 4095
+0x47 0xf0 0x0f 0xff
+
+# CHECK: b 0(%r1)
+0x47 0xf0 0x10 0x00
+
+# CHECK: b 0(%r15)
+0x47 0xf0 0xf0 0x00
+
+# CHECK: b 4095(%r1,%r15)
+0x47 0xf1 0xff 0xff
+
+# CHECK: b 4095(%r15,%r1)
+0x47 0xff 0x1f 0xff
+
# CHECK: bal %r0, 0
0x45 0x00 0x00 0x00
@@ -850,36 +988,6 @@
# CHECK: bassm %r15, %r1
0x0c 0xf1
-# CHECK: bsm %r0, %r1
-0x0b 0x01
-
-# CHECK: bsm %r0, %r15
-0x0b 0x0f
-
-# CHECK: bsm %r14, %r9
-0x0b 0xe9
-
-# CHECK: bsm %r15, %r1
-0x0b 0xf1
-
-# CHECK: b 0
-0x47 0xf0 0x00 0x00
-
-# CHECK: b 4095
-0x47 0xf0 0x0f 0xff
-
-# CHECK: b 0(%r1)
-0x47 0xf0 0x10 0x00
-
-# CHECK: b 0(%r15)
-0x47 0xf0 0xf0 0x00
-
-# CHECK: b 4095(%r1,%r15)
-0x47 0xf1 0xff 0xff
-
-# CHECK: b 4095(%r15,%r1)
-0x47 0xff 0x1f 0xff
-
# CHECK: bc 0, 0
0x47 0x00 0x00 0x00
@@ -1015,18 +1123,6 @@
# CHECK: bct %r15, 0
0x46 0xf0 0x00 0x00
-# CHECK: bctr %r0, %r9
-0x06 0x09
-
-# CHECK: bctr %r0, %r15
-0x06 0x0f
-
-# CHECK: bctr %r15, %r0
-0x06 0xf0
-
-# CHECK: bctr %r15, %r9
-0x06 0xf9
-
# CHECK: bctg %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x46
@@ -1069,6 +1165,30 @@
# CHECK: bctgr %r15, %r9
0xb9 0x46 0x00 0xf9
+# CHECK: bctr %r0, %r9
+0x06 0x09
+
+# CHECK: bctr %r0, %r15
+0x06 0x0f
+
+# CHECK: bctr %r15, %r0
+0x06 0xf0
+
+# CHECK: bctr %r15, %r9
+0x06 0xf9
+
+# CHECK: bsm %r0, %r1
+0x0b 0x01
+
+# CHECK: bsm %r0, %r15
+0x0b 0x0f
+
+# CHECK: bsm %r14, %r9
+0x0b 0xe9
+
+# CHECK: bsm %r15, %r1
+0x0b 0xf1
+
# CHECK: bxh %r0, %r0, 0
0x86 0x00 0x00 0x00
@@ -1177,7 +1297,7 @@
# CHECK: bxleg %r14, %r15, 0
0xeb 0xef 0x00 0x00 0x00 0x45
-# CHECK: bxleg %r15, %r15, 0
+# CHECK: bxleg %r15, %r15, 0
0xeb 0xff 0x00 0x00 0x00 0x45
# CHECK: bxleg %r0, %r0, -524288
@@ -1207,17 +1327,26 @@
# CHECK: bxleg %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0x45
-# CHECK: cdbr %f0, %f0
-0xb3 0x19 0x00 0x00
+# CHECK: c %r0, 0
+0x59 0x00 0x00 0x00
-# CHECK: cdbr %f0, %f15
-0xb3 0x19 0x00 0x0f
+# CHECK: c %r0, 4095
+0x59 0x00 0x0f 0xff
-# CHECK: cdbr %f7, %f8
-0xb3 0x19 0x00 0x78
+# CHECK: c %r0, 0(%r1)
+0x59 0x00 0x10 0x00
-# CHECK: cdbr %f15, %f0
-0xb3 0x19 0x00 0xf0
+# CHECK: c %r0, 0(%r15)
+0x59 0x00 0xf0 0x00
+
+# CHECK: c %r0, 4095(%r1,%r15)
+0x59 0x01 0xff 0xff
+
+# CHECK: c %r0, 4095(%r15,%r1)
+0x59 0x0f 0x1f 0xff
+
+# CHECK: c %r15, 0
+0x59 0xf0 0x00 0x00
# CHECK: cdb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x19
@@ -1240,6 +1369,18 @@
# CHECK: cdb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x19
+# CHECK: cdbr %f0, %f0
+0xb3 0x19 0x00 0x00
+
+# CHECK: cdbr %f0, %f15
+0xb3 0x19 0x00 0x0f
+
+# CHECK: cdbr %f7, %f8
+0xb3 0x19 0x00 0x78
+
+# CHECK: cdbr %f15, %f0
+0xb3 0x19 0x00 0xf0
+
# CHECK: cdfbr %f0, %r0
0xb3 0x95 0x00 0x00
@@ -1255,22 +1396,22 @@
# CHECK: cdfbr %f15, %r15
0xb3 0x95 0x00 0xff
-# CHECK: cdfbra %f0, 0, %r0, 1
+# CHECK: cdfbra %f0, 0, %r0, 1
0xb3 0x95 0x01 0x00
-# CHECK: cdfbra %f0, 0, %r0, 15
+# CHECK: cdfbra %f0, 0, %r0, 15
0xb3 0x95 0x0f 0x00
-# CHECK: cdfbra %f0, 0, %r15, 1
+# CHECK: cdfbra %f0, 0, %r15, 1
0xb3 0x95 0x01 0x0f
-# CHECK: cdfbra %f0, 15, %r0, 1
+# CHECK: cdfbra %f0, 15, %r0, 1
0xb3 0x95 0xf1 0x00
-# CHECK: cdfbra %f4, 5, %r6, 7
+# CHECK: cdfbra %f4, 5, %r6, 7
0xb3 0x95 0x57 0x46
-# CHECK: cdfbra %f15, 0, %r0, 1
+# CHECK: cdfbra %f15, 0, %r0, 1
0xb3 0x95 0x01 0xf0
# CHECK: cdgbr %f0, %r0
@@ -1288,58 +1429,58 @@
# CHECK: cdgbr %f15, %r15
0xb3 0xa5 0x00 0xff
-# CHECK: cdgbra %f0, 0, %r0, 1
+# CHECK: cdgbra %f0, 0, %r0, 1
0xb3 0xa5 0x01 0x00
-# CHECK: cdgbra %f0, 0, %r0, 15
+# CHECK: cdgbra %f0, 0, %r0, 15
0xb3 0xa5 0x0f 0x00
-# CHECK: cdgbra %f0, 0, %r15, 1
+# CHECK: cdgbra %f0, 0, %r15, 1
0xb3 0xa5 0x01 0x0f
-# CHECK: cdgbra %f0, 15, %r0, 1
+# CHECK: cdgbra %f0, 15, %r0, 1
0xb3 0xa5 0xf1 0x00
-# CHECK: cdgbra %f4, 5, %r6, 7
+# CHECK: cdgbra %f4, 5, %r6, 7
0xb3 0xa5 0x57 0x46
-# CHECK: cdgbra %f15, 0, %r0, 1
+# CHECK: cdgbra %f15, 0, %r0, 1
0xb3 0xa5 0x01 0xf0
-# CHECK: cdlfbr %f0, 0, %r0, 1
+# CHECK: cdlfbr %f0, 0, %r0, 1
0xb3 0x91 0x01 0x00
-# CHECK: cdlfbr %f0, 0, %r0, 15
+# CHECK: cdlfbr %f0, 0, %r0, 15
0xb3 0x91 0x0f 0x00
-# CHECK: cdlfbr %f0, 0, %r15, 1
+# CHECK: cdlfbr %f0, 0, %r15, 1
0xb3 0x91 0x01 0x0f
-# CHECK: cdlfbr %f0, 15, %r0, 1
+# CHECK: cdlfbr %f0, 15, %r0, 1
0xb3 0x91 0xf1 0x00
-# CHECK: cdlfbr %f4, 5, %r6, 7
+# CHECK: cdlfbr %f4, 5, %r6, 7
0xb3 0x91 0x57 0x46
-# CHECK: cdlfbr %f15, 0, %r0, 1
+# CHECK: cdlfbr %f15, 0, %r0, 1
0xb3 0x91 0x01 0xf0
-# CHECK: cdlgbr %f0, 0, %r0, 1
+# CHECK: cdlgbr %f0, 0, %r0, 1
0xb3 0xa1 0x01 0x00
-# CHECK: cdlgbr %f0, 0, %r0, 15
+# CHECK: cdlgbr %f0, 0, %r0, 15
0xb3 0xa1 0x0f 0x00
-# CHECK: cdlgbr %f0, 0, %r15, 1
+# CHECK: cdlgbr %f0, 0, %r15, 1
0xb3 0xa1 0x01 0x0f
-# CHECK: cdlgbr %f0, 15, %r0, 1
+# CHECK: cdlgbr %f0, 15, %r0, 1
0xb3 0xa1 0xf1 0x00
-# CHECK: cdlgbr %f4, 5, %r6, 7
+# CHECK: cdlgbr %f4, 5, %r6, 7
0xb3 0xa1 0x57 0x46
-# CHECK: cdlgbr %f15, 0, %r0, 1
+# CHECK: cdlgbr %f15, 0, %r0, 1
0xb3 0xa1 0x01 0xf0
# CHECK: cds %r0, %r0, 0
@@ -1432,18 +1573,6 @@
# CHECK: cdsy %r14, %r0, 0
0xeb 0xe0 0x00 0x00 0x00 0x31
-# CHECK: cebr %f0, %f0
-0xb3 0x09 0x00 0x00
-
-# CHECK: cebr %f0, %f15
-0xb3 0x09 0x00 0x0f
-
-# CHECK: cebr %f7, %f8
-0xb3 0x09 0x00 0x78
-
-# CHECK: cebr %f15, %f0
-0xb3 0x09 0x00 0xf0
-
# CHECK: ceb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x09
@@ -1465,6 +1594,18 @@
# CHECK: ceb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x09
+# CHECK: cebr %f0, %f0
+0xb3 0x09 0x00 0x00
+
+# CHECK: cebr %f0, %f15
+0xb3 0x09 0x00 0x0f
+
+# CHECK: cebr %f7, %f8
+0xb3 0x09 0x00 0x78
+
+# CHECK: cebr %f15, %f0
+0xb3 0x09 0x00 0xf0
+
# CHECK: cefbr %f0, %r0
0xb3 0x94 0x00 0x00
@@ -1480,22 +1621,22 @@
# CHECK: cefbr %f15, %r15
0xb3 0x94 0x00 0xff
-# CHECK: cefbra %f0, 0, %r0, 1
+# CHECK: cefbra %f0, 0, %r0, 1
0xb3 0x94 0x01 0x00
-# CHECK: cefbra %f0, 0, %r0, 15
+# CHECK: cefbra %f0, 0, %r0, 15
0xb3 0x94 0x0f 0x00
-# CHECK: cefbra %f0, 0, %r15, 1
+# CHECK: cefbra %f0, 0, %r15, 1
0xb3 0x94 0x01 0x0f
-# CHECK: cefbra %f0, 15, %r0, 1
+# CHECK: cefbra %f0, 15, %r0, 1
0xb3 0x94 0xf1 0x00
-# CHECK: cefbra %f4, 5, %r6, 7
+# CHECK: cefbra %f4, 5, %r6, 7
0xb3 0x94 0x57 0x46
-# CHECK: cefbra %f15, 0, %r0, 1
+# CHECK: cefbra %f15, 0, %r0, 1
0xb3 0x94 0x01 0xf0
# CHECK: cegbr %f0, %r0
@@ -1513,60 +1654,78 @@
# CHECK: cegbr %f15, %r15
0xb3 0xa4 0x00 0xff
-# CHECK: cegbra %f0, 0, %r0, 1
+# CHECK: cegbra %f0, 0, %r0, 1
0xb3 0xa4 0x01 0x00
-# CHECK: cegbra %f0, 0, %r0, 15
+# CHECK: cegbra %f0, 0, %r0, 15
0xb3 0xa4 0x0f 0x00
-# CHECK: cegbra %f0, 0, %r15, 1
+# CHECK: cegbra %f0, 0, %r15, 1
0xb3 0xa4 0x01 0x0f
-# CHECK: cegbra %f0, 15, %r0, 1
+# CHECK: cegbra %f0, 15, %r0, 1
0xb3 0xa4 0xf1 0x00
-# CHECK: cegbra %f4, 5, %r6, 7
+# CHECK: cegbra %f4, 5, %r6, 7
0xb3 0xa4 0x57 0x46
-# CHECK: cegbra %f15, 0, %r0, 1
+# CHECK: cegbra %f15, 0, %r0, 1
0xb3 0xa4 0x01 0xf0
-# CHECK: celfbr %f0, 0, %r0, 1
+# CHECK: celfbr %f0, 0, %r0, 1
0xb3 0x90 0x01 0x00
-# CHECK: celfbr %f0, 0, %r0, 15
+# CHECK: celfbr %f0, 0, %r0, 15
0xb3 0x90 0x0f 0x00
-# CHECK: celfbr %f0, 0, %r15, 1
+# CHECK: celfbr %f0, 0, %r15, 1
0xb3 0x90 0x01 0x0f
-# CHECK: celfbr %f0, 15, %r0, 1
+# CHECK: celfbr %f0, 15, %r0, 1
0xb3 0x90 0xf1 0x00
-# CHECK: celfbr %f4, 5, %r6, 7
+# CHECK: celfbr %f4, 5, %r6, 7
0xb3 0x90 0x57 0x46
-# CHECK: celfbr %f15, 0, %r0, 1
+# CHECK: celfbr %f15, 0, %r0, 1
0xb3 0x90 0x01 0xf0
-# CHECK: celgbr %f0, 0, %r0, 1
+# CHECK: celgbr %f0, 0, %r0, 1
0xb3 0xa0 0x01 0x00
-# CHECK: celgbr %f0, 0, %r0, 15
+# CHECK: celgbr %f0, 0, %r0, 15
0xb3 0xa0 0x0f 0x00
-# CHECK: celgbr %f0, 0, %r15, 1
+# CHECK: celgbr %f0, 0, %r15, 1
0xb3 0xa0 0x01 0x0f
-# CHECK: celgbr %f0, 15, %r0, 1
+# CHECK: celgbr %f0, 15, %r0, 1
0xb3 0xa0 0xf1 0x00
-# CHECK: celgbr %f4, 5, %r6, 7
+# CHECK: celgbr %f4, 5, %r6, 7
0xb3 0xa0 0x57 0x46
-# CHECK: celgbr %f15, 0, %r0, 1
+# CHECK: celgbr %f15, 0, %r0, 1
0xb3 0xa0 0x01 0xf0
+# CHECK: cfc 0
+0xb2 0x1a 0x00 0x00
+
+# CHECK: cfc 0(%r1)
+0xb2 0x1a 0x10 0x00
+
+# CHECK: cfc 0(%r15)
+0xb2 0x1a 0xf0 0x00
+
+# CHECK: cfc 4095
+0xb2 0x1a 0x0f 0xff
+
+# CHECK: cfc 4095(%r1)
+0xb2 0x1a 0x1f 0xff
+
+# CHECK: cfc 4095(%r15)
+0xb2 0x1a 0xff 0xff
+
# CHECK: cfdbr %r0, 0, %f0
0xb3 0x99 0x00 0x00
@@ -1582,22 +1741,22 @@
# CHECK: cfdbr %r15, 0, %f0
0xb3 0x99 0x00 0xf0
-# CHECK: cfdbra %r0, 0, %f0, 1
+# CHECK: cfdbra %r0, 0, %f0, 1
0xb3 0x99 0x01 0x00
-# CHECK: cfdbra %r0, 0, %f0, 15
+# CHECK: cfdbra %r0, 0, %f0, 15
0xb3 0x99 0x0f 0x00
-# CHECK: cfdbra %r0, 0, %f15, 1
+# CHECK: cfdbra %r0, 0, %f15, 1
0xb3 0x99 0x01 0x0f
-# CHECK: cfdbra %r0, 15, %f0, 1
+# CHECK: cfdbra %r0, 15, %f0, 1
0xb3 0x99 0xf1 0x00
-# CHECK: cfdbra %r4, 5, %f6, 7
+# CHECK: cfdbra %r4, 5, %f6, 7
0xb3 0x99 0x57 0x46
-# CHECK: cfdbra %r15, 0, %f0, 1
+# CHECK: cfdbra %r15, 0, %f0, 1
0xb3 0x99 0x01 0xf0
# CHECK: cfebr %r0, 0, %f0
@@ -1615,22 +1774,22 @@
# CHECK: cfebr %r15, 0, %f0
0xb3 0x98 0x00 0xf0
-# CHECK: cfebra %r0, 0, %f0, 1
+# CHECK: cfebra %r0, 0, %f0, 1
0xb3 0x98 0x01 0x00
-# CHECK: cfebra %r0, 0, %f0, 15
+# CHECK: cfebra %r0, 0, %f0, 15
0xb3 0x98 0x0f 0x00
-# CHECK: cfebra %r0, 0, %f15, 1
+# CHECK: cfebra %r0, 0, %f15, 1
0xb3 0x98 0x01 0x0f
-# CHECK: cfebra %r0, 15, %f0, 1
+# CHECK: cfebra %r0, 15, %f0, 1
0xb3 0x98 0xf1 0x00
-# CHECK: cfebra %r4, 5, %f6, 7
+# CHECK: cfebra %r4, 5, %f6, 7
0xb3 0x98 0x57 0x46
-# CHECK: cfebra %r15, 0, %f0, 1
+# CHECK: cfebra %r15, 0, %f0, 1
0xb3 0x98 0x01 0xf0
# CHECK: cfi %r0, -2147483648
@@ -1666,24 +1825,54 @@
# CHECK: cfxbr %r15, 0, %f0
0xb3 0x9a 0x00 0xf0
-# CHECK: cfxbra %r0, 0, %f0, 1
+# CHECK: cfxbra %r0, 0, %f0, 1
0xb3 0x9a 0x01 0x00
-# CHECK: cfxbra %r0, 0, %f0, 15
+# CHECK: cfxbra %r0, 0, %f0, 15
0xb3 0x9a 0x0f 0x00
-# CHECK: cfxbra %r0, 0, %f13, 1
+# CHECK: cfxbra %r0, 0, %f13, 1
0xb3 0x9a 0x01 0x0d
-# CHECK: cfxbra %r0, 15, %f0, 1
+# CHECK: cfxbra %r0, 15, %f0, 1
0xb3 0x9a 0xf1 0x00
-# CHECK: cfxbra %r4, 5, %f8, 9
+# CHECK: cfxbra %r4, 5, %f8, 9
0xb3 0x9a 0x59 0x48
-# CHECK: cfxbra %r15, 0, %f0, 1
+# CHECK: cfxbra %r15, 0, %f0, 1
0xb3 0x9a 0x01 0xf0
+# CHECK: cg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x20
+
+# CHECK: cg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x20
+
+# CHECK: cg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x20
+
+# CHECK: cg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x20
+
+# CHECK: cg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x20
+
+# CHECK: cg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x20
+
+# CHECK: cg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x20
+
+# CHECK: cg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x20
+
+# CHECK: cg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x20
+
+# CHECK: cg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x20
+
# CHECK: cgdbr %r0, 0, %f0
0xb3 0xa9 0x00 0x00
@@ -1699,22 +1888,22 @@
# CHECK: cgdbr %r15, 0, %f0
0xb3 0xa9 0x00 0xf0
-# CHECK: cgdbra %r0, 0, %f0, 1
+# CHECK: cgdbra %r0, 0, %f0, 1
0xb3 0xa9 0x01 0x00
-# CHECK: cgdbra %r0, 0, %f0, 15
+# CHECK: cgdbra %r0, 0, %f0, 15
0xb3 0xa9 0x0f 0x00
-# CHECK: cgdbra %r0, 0, %f15, 1
+# CHECK: cgdbra %r0, 0, %f15, 1
0xb3 0xa9 0x01 0x0f
-# CHECK: cgdbra %r0, 15, %f0, 1
+# CHECK: cgdbra %r0, 15, %f0, 1
0xb3 0xa9 0xf1 0x00
-# CHECK: cgdbra %r4, 5, %f6, 7
+# CHECK: cgdbra %r4, 5, %f6, 7
0xb3 0xa9 0x57 0x46
-# CHECK: cgdbra %r15, 0, %f0, 1
+# CHECK: cgdbra %r15, 0, %f0, 1
0xb3 0xa9 0x01 0xf0
# CHECK: cgebr %r0, 0, %f0
@@ -1732,24 +1921,54 @@
# CHECK: cgebr %r15, 0, %f0
0xb3 0xa8 0x00 0xf0
-# CHECK: cgebra %r0, 0, %f0, 1
+# CHECK: cgebra %r0, 0, %f0, 1
0xb3 0xa8 0x01 0x00
-# CHECK: cgebra %r0, 0, %f0, 15
+# CHECK: cgebra %r0, 0, %f0, 15
0xb3 0xa8 0x0f 0x00
-# CHECK: cgebra %r0, 0, %f15, 1
+# CHECK: cgebra %r0, 0, %f15, 1
0xb3 0xa8 0x01 0x0f
-# CHECK: cgebra %r0, 15, %f0, 1
+# CHECK: cgebra %r0, 15, %f0, 1
0xb3 0xa8 0xf1 0x00
-# CHECK: cgebra %r4, 5, %f6, 7
+# CHECK: cgebra %r4, 5, %f6, 7
0xb3 0xa8 0x57 0x46
-# CHECK: cgebra %r15, 0, %f0, 1
+# CHECK: cgebra %r15, 0, %f0, 1
0xb3 0xa8 0x01 0xf0
+# CHECK: cgf %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x30
+
+# CHECK: cgf %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x30
+
+# CHECK: cgf %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x30
+
+# CHECK: cgf %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x30
+
+# CHECK: cgf %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x30
+
+# CHECK: cgf %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x30
+
+# CHECK: cgf %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x30
+
+# CHECK: cgf %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x30
+
+# CHECK: cgf %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x30
+
+# CHECK: cgf %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x30
+
# CHECK: cgfi %r0, -2147483648
0xc2 0x0c 0x80 0x00 0x00 0x00
@@ -1780,35 +1999,35 @@
# CHECK: cgfr %r7, %r8
0xb9 0x30 0x00 0x78
-# CHECK: cgf %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x30
+# CHECK: cgh %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x34
-# CHECK: cgf %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x30
+# CHECK: cgh %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x34
-# CHECK: cgf %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x30
+# CHECK: cgh %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x34
-# CHECK: cgf %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x30
+# CHECK: cgh %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x34
-# CHECK: cgf %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x30
+# CHECK: cgh %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x34
-# CHECK: cgf %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x30
+# CHECK: cgh %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x34
-# CHECK: cgf %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x30
+# CHECK: cgh %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x34
-# CHECK: cgf %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x30
+# CHECK: cgh %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x34
-# CHECK: cgf %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x30
+# CHECK: cgh %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x34
-# CHECK: cgf %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x30
+# CHECK: cgh %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x34
# CHECK: cghi %r0, -32768
0xa7 0x0f 0x80 0x00
@@ -1861,35 +2080,95 @@
# CHECK: cghsi 4095(%r15), 42
0xe5 0x58 0xff 0xff 0x00 0x2a
-# CHECK: cgh %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x34
+# CHECK: cgib %r0, 0, 0, 0
+0xec 0x00 0x00 0x00 0x00 0xfc
-# CHECK: cgh %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x34
+# CHECK: cgib %r0, -128, 0, 0
+0xec 0x00 0x00 0x00 0x80 0xfc
-# CHECK: cgh %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x34
+# CHECK: cgib %r0, -1, 0, 0
+0xec 0x00 0x00 0x00 0xff 0xfc
-# CHECK: cgh %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x34
+# CHECK: cgib %r0, 127, 0, 0
+0xec 0x00 0x00 0x00 0x7f 0xfc
-# CHECK: cgh %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x34
+# CHECK: cgib %r15, 0, 0, 0
+0xec 0xf0 0x00 0x00 0x00 0xfc
-# CHECK: cgh %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x34
+# CHECK: cgib %r7, 100, 0, 0
+0xec 0x70 0x00 0x00 0x64 0xfc
-# CHECK: cgh %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x34
+# CHECK: cgib %r0, 0, 0, 4095(%r15)
+0xec 0x00 0xff 0xff 0x00 0xfc
-# CHECK: cgh %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x34
+# CHECK: cgib %r0, 0, 0, 0(%r8)
+0xec 0x00 0x80 0x00 0x00 0xfc
-# CHECK: cgh %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x34
+# CHECK: cgib %r0, 0, 0, 4095(%r7)
+0xec 0x00 0x7f 0xff 0x00 0xfc
-# CHECK: cgh %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x34
+# CHECK: cgib %r0, 0, 1, 0
+0xec 0x01 0x00 0x00 0x00 0xfc
+
+# CHECK: cgibh %r0, 0, 0
+0xec 0x02 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 3, 0
+0xec 0x03 0x00 0x00 0x00 0xfc
+
+# CHECK: cgibl %r0, 0, 0
+0xec 0x04 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 5, 0
+0xec 0x05 0x00 0x00 0x00 0xfc
+
+# CHECK: cgiblh %r0, 0, 0
+0xec 0x06 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 7, 0
+0xec 0x07 0x00 0x00 0x00 0xfc
+
+# CHECK: cgibe %r0, 0, 0
+0xec 0x08 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 9, 0
+0xec 0x09 0x00 0x00 0x00 0xfc
+
+# CHECK: cgibhe %r0, 0, 0
+0xec 0x0a 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 11, 0
+0xec 0x0b 0x00 0x00 0x00 0xfc
+
+# CHECK: cgible %r0, 0, 0
+0xec 0x0c 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 13, 0
+0xec 0x0d 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 14, 0
+0xec 0x0e 0x00 0x00 0x00 0xfc
+
+# CHECK: cgib %r0, 0, 15, 0
+0xec 0x0f 0x00 0x00 0x00 0xfc
+
+# CHECK: cgith %r0, 0
+0xec 0x00 0x00 0x00 0x20 0x70
+
+# CHECK: cgitl %r0, 0
+0xec 0x00 0x00 0x00 0x40 0x70
+
+# CHECK: cgite %r0, 0
+0xec 0x00 0x00 0x00 0x80 0x70
+
+# CHECK: cgitlh %r0, 0
+0xec 0x00 0x00 0x00 0x60 0x70
+
+# CHECK: cgithe %r0, 0
+0xec 0x00 0x00 0x00 0xa0 0x70
+
+# CHECK: cgitle %r0, 0
+0xec 0x00 0x00 0x00 0xc0 0x70
# CHECK: cgr %r0, %r0
0xb9 0x20 0x00 0x00
@@ -1987,126 +2266,6 @@
# CHECK: cgrtle %r0, %r1
0xb9 0x60 0xc0 0x01
-# CHECK: cg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x20
-
-# CHECK: cg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x20
-
-# CHECK: cg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x20
-
-# CHECK: cg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x20
-
-# CHECK: cg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x20
-
-# CHECK: cg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x20
-
-# CHECK: cg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x20
-
-# CHECK: cg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x20
-
-# CHECK: cg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x20
-
-# CHECK: cg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x20
-
-# CHECK: cgib %r0, 0, 0, 0
-0xec 0x00 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, -128, 0, 0
-0xec 0x00 0x00 0x00 0x80 0xfc
-
-# CHECK: cgib %r0, -1, 0, 0
-0xec 0x00 0x00 0x00 0xff 0xfc
-
-# CHECK: cgib %r0, 127, 0, 0
-0xec 0x00 0x00 0x00 0x7f 0xfc
-
-# CHECK: cgib %r15, 0, 0, 0
-0xec 0xf0 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r7, 100, 0, 0
-0xec 0x70 0x00 0x00 0x64 0xfc
-
-# CHECK: cgib %r0, 0, 0, 4095(%r15)
-0xec 0x00 0xff 0xff 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 0, 0(%r8)
-0xec 0x00 0x80 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 0, 4095(%r7)
-0xec 0x00 0x7f 0xff 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 1, 0
-0xec 0x01 0x00 0x00 0x00 0xfc
-
-# CHECK: cgibh %r0, 0, 0
-0xec 0x02 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 3, 0
-0xec 0x03 0x00 0x00 0x00 0xfc
-
-# CHECK: cgibl %r0, 0, 0
-0xec 0x04 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 5, 0
-0xec 0x05 0x00 0x00 0x00 0xfc
-
-# CHECK: cgiblh %r0, 0, 0
-0xec 0x06 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 7, 0
-0xec 0x07 0x00 0x00 0x00 0xfc
-
-# CHECK: cgibe %r0, 0, 0
-0xec 0x08 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 9, 0
-0xec 0x09 0x00 0x00 0x00 0xfc
-
-# CHECK: cgibhe %r0, 0, 0
-0xec 0x0a 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 11, 0
-0xec 0x0b 0x00 0x00 0x00 0xfc
-
-# CHECK: cgible %r0, 0, 0
-0xec 0x0c 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 13, 0
-0xec 0x0d 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 14, 0
-0xec 0x0e 0x00 0x00 0x00 0xfc
-
-# CHECK: cgib %r0, 0, 15, 0
-0xec 0x0f 0x00 0x00 0x00 0xfc
-
-# CHECK: cgith %r0, 0
-0xec 0x00 0x00 0x00 0x20 0x70
-
-# CHECK: cgitl %r0, 0
-0xec 0x00 0x00 0x00 0x40 0x70
-
-# CHECK: cgite %r0, 0
-0xec 0x00 0x00 0x00 0x80 0x70
-
-# CHECK: cgitlh %r0, 0
-0xec 0x00 0x00 0x00 0x60 0x70
-
-# CHECK: cgithe %r0, 0
-0xec 0x00 0x00 0x00 0xa0 0x70
-
-# CHECK: cgitle %r0, 0
-0xec 0x00 0x00 0x00 0xc0 0x70
-
# CHECK: cgxbr %r0, 0, %f0
0xb3 0xaa 0x00 0x00
@@ -2122,24 +2281,45 @@
# CHECK: cgxbr %r15, 0, %f0
0xb3 0xaa 0x00 0xf0
-# CHECK: cgxbra %r0, 0, %f0, 1
+# CHECK: cgxbra %r0, 0, %f0, 1
0xb3 0xaa 0x01 0x00
-# CHECK: cgxbra %r0, 0, %f0, 15
+# CHECK: cgxbra %r0, 0, %f0, 15
0xb3 0xaa 0x0f 0x00
-# CHECK: cgxbra %r0, 0, %f13, 1
+# CHECK: cgxbra %r0, 0, %f13, 1
0xb3 0xaa 0x01 0x0d
-# CHECK: cgxbra %r0, 15, %f0, 1
+# CHECK: cgxbra %r0, 15, %f0, 1
0xb3 0xaa 0xf1 0x00
-# CHECK: cgxbra %r4, 5, %f8, 9
+# CHECK: cgxbra %r4, 5, %f8, 9
0xb3 0xaa 0x59 0x48
-# CHECK: cgxbra %r15, 0, %f0, 1
+# CHECK: cgxbra %r15, 0, %f0, 1
0xb3 0xaa 0x01 0xf0
+# CHECK: ch %r0, 0
+0x49 0x00 0x00 0x00
+
+# CHECK: ch %r0, 4095
+0x49 0x00 0x0f 0xff
+
+# CHECK: ch %r0, 0(%r1)
+0x49 0x00 0x10 0x00
+
+# CHECK: ch %r0, 0(%r15)
+0x49 0x00 0xf0 0x00
+
+# CHECK: ch %r0, 4095(%r1,%r15)
+0x49 0x01 0xff 0xff
+
+# CHECK: ch %r0, 4095(%r15,%r1)
+0x49 0x0f 0x1f 0xff
+
+# CHECK: ch %r15, 0
+0x49 0xf0 0x00 0x00
+
# CHECK: chf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0xcd
@@ -2254,27 +2434,6 @@
# CHECK: chsi 4095(%r15), 42
0xe5 0x5c 0xff 0xff 0x00 0x2a
-# CHECK: ch %r0, 0
-0x49 0x00 0x00 0x00
-
-# CHECK: ch %r0, 4095
-0x49 0x00 0x0f 0xff
-
-# CHECK: ch %r0, 0(%r1)
-0x49 0x00 0x10 0x00
-
-# CHECK: ch %r0, 0(%r15)
-0x49 0x00 0xf0 0x00
-
-# CHECK: ch %r0, 4095(%r1,%r15)
-0x49 0x01 0xff 0xff
-
-# CHECK: ch %r0, 4095(%r15,%r1)
-0x49 0x0f 0x1f 0xff
-
-# CHECK: ch %r15, 0
-0x49 0xf0 0x00 0x00
-
# CHECK: chy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x79
@@ -2413,6 +2572,39 @@
# CHECK: citle %r0, 0
0xec 0x00 0x00 0x00 0xc0 0x72
+# CHECK: cksm %r0, %r0
+0xb2 0x41 0x00 0x00
+
+# CHECK: cksm %r0, %r14
+0xb2 0x41 0x00 0x0e
+
+# CHECK: cksm %r15, %r0
+0xb2 0x41 0x00 0xf0
+
+# CHECK: cksm %r6, %r8
+0xb2 0x41 0x00 0x68
+
+# CHECK: cl %r0, 0
+0x55 0x00 0x00 0x00
+
+# CHECK: cl %r0, 4095
+0x55 0x00 0x0f 0xff
+
+# CHECK: cl %r0, 0(%r1)
+0x55 0x00 0x10 0x00
+
+# CHECK: cl %r0, 0(%r15)
+0x55 0x00 0xf0 0x00
+
+# CHECK: cl %r0, 4095(%r1,%r15)
+0x55 0x01 0xff 0xff
+
+# CHECK: cl %r0, 4095(%r15,%r1)
+0x55 0x0f 0x1f 0xff
+
+# CHECK: cl %r15, 0
+0x55 0xf0 0x00 0x00
+
# CHECK: clc 0(1), 0
0xd5 0x00 0x00 0x00 0x00 0x00
@@ -2449,94 +2641,157 @@
# CHECK: clc 0(256,%r15), 0
0xd5 0xff 0xf0 0x00 0x00 0x00
-# CHECK: clfdbr %r0, 0, %f0, 1
+# CHECK: clcl %r0, %r8
+0x0f 0x08
+
+# CHECK: clcl %r0, %r14
+0x0f 0x0e
+
+# CHECK: clcl %r14, %r0
+0x0f 0xe0
+
+# CHECK: clcl %r14, %r8
+0x0f 0xe8
+
+# CHECK: clcle %r0, %r0, 0
+0xa9 0x00 0x00 0x00
+
+# CHECK: clcle %r0, %r14, 4095
+0xa9 0x0e 0x0f 0xff
+
+# CHECK: clcle %r0, %r0, 0(%r1)
+0xa9 0x00 0x10 0x00
+
+# CHECK: clcle %r0, %r0, 0(%r15)
+0xa9 0x00 0xf0 0x00
+
+# CHECK: clcle %r0, %r14, 4095(%r15)
+0xa9 0x0e 0xff 0xff
+
+# CHECK: clcle %r0, %r0, 4095(%r1)
+0xa9 0x00 0x1f 0xff
+
+# CHECK: clcle %r14, %r0, 0
+0xa9 0xe0 0x00 0x00
+
+# CHECK: clclu %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x8f
+
+# CHECK: clclu %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x8f
+
+# CHECK: clclu %r0, %r14, 0
+0xeb 0x0e 0x00 0x00 0x00 0x8f
+
+# CHECK: clclu %r0, %r14, 1
+0xeb 0x0e 0x00 0x01 0x00 0x8f
+
+# CHECK: clclu %r0, %r8, 524287
+0xeb 0x08 0x0f 0xff 0x7f 0x8f
+
+# CHECK: clclu %r0, %r8, 0(%r1)
+0xeb 0x08 0x10 0x00 0x00 0x8f
+
+# CHECK: clclu %r0, %r4, 0(%r15)
+0xeb 0x04 0xf0 0x00 0x00 0x8f
+
+# CHECK: clclu %r0, %r4, 524287(%r15)
+0xeb 0x04 0xff 0xff 0x7f 0x8f
+
+# CHECK: clclu %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x8f
+
+# CHECK: clclu %r14, %r0, 0
+0xeb 0xe0 0x00 0x00 0x00 0x8f
+
+# CHECK: clfdbr %r0, 0, %f0, 1
0xb3 0x9d 0x01 0x00
-# CHECK: clfdbr %r0, 0, %f0, 15
+# CHECK: clfdbr %r0, 0, %f0, 15
0xb3 0x9d 0x0f 0x00
-# CHECK: clfdbr %r0, 0, %f15, 1
+# CHECK: clfdbr %r0, 0, %f15, 1
0xb3 0x9d 0x01 0x0f
-# CHECK: clfdbr %r0, 15, %f0, 1
+# CHECK: clfdbr %r0, 15, %f0, 1
0xb3 0x9d 0xf1 0x00
-# CHECK: clfdbr %r4, 5, %f6, 7
+# CHECK: clfdbr %r4, 5, %f6, 7
0xb3 0x9d 0x57 0x46
-# CHECK: clfdbr %r15, 0, %f0, 1
+# CHECK: clfdbr %r15, 0, %f0, 1
0xb3 0x9d 0x01 0xf0
-# CHECK: clfebr %r0, 0, %f0, 1
+# CHECK: clfebr %r0, 0, %f0, 1
0xb3 0x9c 0x01 0x00
-# CHECK: clfebr %r0, 0, %f0, 15
+# CHECK: clfebr %r0, 0, %f0, 15
0xb3 0x9c 0x0f 0x00
-# CHECK: clfebr %r0, 0, %f15, 1
+# CHECK: clfebr %r0, 0, %f15, 1
0xb3 0x9c 0x01 0x0f
-# CHECK: clfebr %r0, 15, %f0, 1
+# CHECK: clfebr %r0, 15, %f0, 1
0xb3 0x9c 0xf1 0x00
-# CHECK: clfebr %r4, 5, %f6, 7
+# CHECK: clfebr %r4, 5, %f6, 7
0xb3 0x9c 0x57 0x46
-# CHECK: clfebr %r15, 0, %f0, 1
+# CHECK: clfebr %r15, 0, %f0, 1
0xb3 0x9c 0x01 0xf0
-# CHECK: clfxbr %r0, 0, %f0, 1
+# CHECK: clfxbr %r0, 0, %f0, 1
0xb3 0x9e 0x01 0x00
-# CHECK: clfxbr %r0, 0, %f0, 15
+# CHECK: clfxbr %r0, 0, %f0, 15
0xb3 0x9e 0x0f 0x00
-# CHECK: clfxbr %r0, 0, %f13, 1
+# CHECK: clfxbr %r0, 0, %f13, 1
0xb3 0x9e 0x01 0x0d
-# CHECK: clfxbr %r0, 15, %f0, 1
+# CHECK: clfxbr %r0, 15, %f0, 1
0xb3 0x9e 0xf1 0x00
-# CHECK: clfxbr %r4, 5, %f8, 9
+# CHECK: clfxbr %r4, 5, %f8, 9
0xb3 0x9e 0x59 0x48
-# CHECK: clfxbr %r15, 0, %f0, 1
+# CHECK: clfxbr %r15, 0, %f0, 1
0xb3 0x9e 0x01 0xf0
-# CHECK: clgdbr %r0, 0, %f0, 1
+# CHECK: clgdbr %r0, 0, %f0, 1
0xb3 0xad 0x01 0x00
-# CHECK: clgdbr %r0, 0, %f0, 15
+# CHECK: clgdbr %r0, 0, %f0, 15
0xb3 0xad 0x0f 0x00
-# CHECK: clgdbr %r0, 0, %f15, 1
+# CHECK: clgdbr %r0, 0, %f15, 1
0xb3 0xad 0x01 0x0f
-# CHECK: clgdbr %r0, 15, %f0, 1
+# CHECK: clgdbr %r0, 15, %f0, 1
0xb3 0xad 0xf1 0x00
-# CHECK: clgdbr %r4, 5, %f6, 7
+# CHECK: clgdbr %r4, 5, %f6, 7
0xb3 0xad 0x57 0x46
-# CHECK: clgdbr %r15, 0, %f0, 1
+# CHECK: clgdbr %r15, 0, %f0, 1
0xb3 0xad 0x01 0xf0
-# CHECK: clgebr %r0, 0, %f0, 1
+# CHECK: clgebr %r0, 0, %f0, 1
0xb3 0xac 0x01 0x00
-# CHECK: clgebr %r0, 0, %f0, 15
+# CHECK: clgebr %r0, 0, %f0, 15
0xb3 0xac 0x0f 0x00
-# CHECK: clgebr %r0, 0, %f15, 1
+# CHECK: clgebr %r0, 0, %f15, 1
0xb3 0xac 0x01 0x0f
-# CHECK: clgebr %r0, 15, %f0, 1
+# CHECK: clgebr %r0, 15, %f0, 1
0xb3 0xac 0xf1 0x00
-# CHECK: clgebr %r4, 5, %f6, 7
+# CHECK: clgebr %r4, 5, %f6, 7
0xb3 0xac 0x57 0x46
-# CHECK: clgebr %r15, 0, %f0, 1
+# CHECK: clgebr %r15, 0, %f0, 1
0xb3 0xac 0x01 0xf0
# CHECK: clgib %r0, 0, 0, 0
@@ -2611,22 +2866,22 @@
# CHECK: clgib %r0, 0, 15, 0
0xec 0x0f 0x00 0x00 0x00 0xfd
-# CHECK: clgxbr %r0, 0, %f0, 1
+# CHECK: clgxbr %r0, 0, %f0, 1
0xb3 0xae 0x01 0x00
-# CHECK: clgxbr %r0, 0, %f0, 15
+# CHECK: clgxbr %r0, 0, %f0, 15
0xb3 0xae 0x0f 0x00
-# CHECK: clgxbr %r0, 0, %f13, 1
+# CHECK: clgxbr %r0, 0, %f13, 1
0xb3 0xae 0x01 0x0d
-# CHECK: clgxbr %r0, 15, %f0, 1
+# CHECK: clgxbr %r0, 15, %f0, 1
0xb3 0xae 0xf1 0x00
-# CHECK: clgxbr %r4, 5, %f8, 9
+# CHECK: clgxbr %r4, 5, %f8, 9
0xb3 0xae 0x59 0x48
-# CHECK: clgxbr %r15, 0, %f0, 1
+# CHECK: clgxbr %r15, 0, %f0, 1
0xb3 0xae 0x01 0xf0
# CHECK: clfhsi 0, 0
@@ -2677,6 +2932,36 @@
# CHECK: clfitle %r0, 0
0xec 0x00 0x00 0x00 0xc0 0x73
+# CHECK: clg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x21
+
+# CHECK: clg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x21
+
+# CHECK: clg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x21
+
+# CHECK: clg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x21
+
+# CHECK: clg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x21
+
+# CHECK: clg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x21
+
+# CHECK: clg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x21
+
+# CHECK: clg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x21
+
+# CHECK: clg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x21
+
+# CHECK: clg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x21
+
# CHECK: clgith %r0, 0
0xec 0x00 0x00 0x00 0x20 0x71
@@ -2845,36 +3130,6 @@
# CHECK: clgrb %r0, %r0, 15, 0
0xec 0x00 0x00 0x00 0xf0 0xe5
-# CHECK: clg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x21
-
-# CHECK: clg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x21
-
-# CHECK: clg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x21
-
-# CHECK: clg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x21
-
-# CHECK: clg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x21
-
-# CHECK: clg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x21
-
-# CHECK: clg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x21
-
-# CHECK: clg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x21
-
-# CHECK: clg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x21
-
-# CHECK: clg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x21
-
# CHECK: clhf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0xcf
@@ -3061,6 +3316,87 @@
# CHECK: cliy 524287(%r15), 42
0xeb 0x2a 0xff 0xff 0x7f 0x55
+# CHECK: clm %r0, 0, 0
+0xbd 0x00 0x00 0x00
+
+# CHECK: clm %r0, 15, 4095
+0xbd 0x0f 0x0f 0xff
+
+# CHECK: clm %r0, 0, 0(%r1)
+0xbd 0x00 0x10 0x00
+
+# CHECK: clm %r0, 0, 0(%r15)
+0xbd 0x00 0xf0 0x00
+
+# CHECK: clm %r0, 15, 4095(%r15)
+0xbd 0x0f 0xff 0xff
+
+# CHECK: clm %r0, 0, 4095(%r1)
+0xbd 0x00 0x1f 0xff
+
+# CHECK: clm %r15, 0, 0
+0xbd 0xf0 0x00 0x00
+
+# CHECK: clmh %r0, 0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x20
+
+# CHECK: clmh %r0, 0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x20
+
+# CHECK: clmh %r0, 15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x20
+
+# CHECK: clmh %r0, 15, 1
+0xeb 0x0f 0x00 0x01 0x00 0x20
+
+# CHECK: clmh %r0, 8, 524287
+0xeb 0x08 0x0f 0xff 0x7f 0x20
+
+# CHECK: clmh %r0, 8, 0(%r1)
+0xeb 0x08 0x10 0x00 0x00 0x20
+
+# CHECK: clmh %r0, 4, 0(%r15)
+0xeb 0x04 0xf0 0x00 0x00 0x20
+
+# CHECK: clmh %r0, 4, 524287(%r15)
+0xeb 0x04 0xff 0xff 0x7f 0x20
+
+# CHECK: clmh %r0, 0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x20
+
+# CHECK: clmh %r15, 0, 0
+0xeb 0xf0 0x00 0x00 0x00 0x20
+
+# CHECK: clmy %r0, 0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x21
+
+# CHECK: clmy %r0, 0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x21
+
+# CHECK: clmy %r0, 15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x21
+
+# CHECK: clmy %r0, 15, 1
+0xeb 0x0f 0x00 0x01 0x00 0x21
+
+# CHECK: clmy %r0, 8, 524287
+0xeb 0x08 0x0f 0xff 0x7f 0x21
+
+# CHECK: clmy %r0, 8, 0(%r1)
+0xeb 0x08 0x10 0x00 0x00 0x21
+
+# CHECK: clmy %r0, 4, 0(%r15)
+0xeb 0x04 0xf0 0x00 0x00 0x21
+
+# CHECK: clmy %r0, 4, 524287(%r15)
+0xeb 0x04 0xff 0xff 0x7f 0x21
+
+# CHECK: clmy %r0, 0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x21
+
+# CHECK: clmy %r15, 0, 0
+0xeb 0xf0 0x00 0x00 0x00 0x21
+
# CHECK: clr %r0, %r0
0x15 0x00
@@ -3271,27 +3607,6 @@
# CHECK: clst %r7, %r8
0xb2 0x5d 0x00 0x78
-# CHECK: cl %r0, 0
-0x55 0x00 0x00 0x00
-
-# CHECK: cl %r0, 4095
-0x55 0x00 0x0f 0xff
-
-# CHECK: cl %r0, 0(%r1)
-0x55 0x00 0x10 0x00
-
-# CHECK: cl %r0, 0(%r15)
-0x55 0x00 0xf0 0x00
-
-# CHECK: cl %r0, 4095(%r1,%r15)
-0x55 0x01 0xff 0xff
-
-# CHECK: cl %r0, 4095(%r15,%r1)
-0x55 0x0f 0x1f 0xff
-
-# CHECK: cl %r15, 0
-0x55 0xf0 0x00 0x00
-
# CHECK: cly %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x55
@@ -3322,6 +3637,60 @@
# CHECK: cly %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x55
+# CHECK: cmpsc %r0, %r0
+0xb2 0x63 0x00 0x00
+
+# CHECK: cmpsc %r0, %r14
+0xb2 0x63 0x00 0x0e
+
+# CHECK: cmpsc %r14, %r0
+0xb2 0x63 0x00 0xe0
+
+# CHECK: cmpsc %r6, %r8
+0xb2 0x63 0x00 0x68
+
+# CHECK: cp 0(1), 0(1)
+0xf9 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: cp 0(1), 0(1,%r1)
+0xf9 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: cp 0(1), 0(1,%r15)
+0xf9 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: cp 0(1), 4095(1)
+0xf9 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: cp 0(1), 4095(1,%r1)
+0xf9 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: cp 0(1), 4095(1,%r15)
+0xf9 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: cp 0(1,%r1), 0(1)
+0xf9 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: cp 0(1,%r15), 0(1)
+0xf9 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: cp 4095(1,%r1), 0(1)
+0xf9 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: cp 4095(1,%r15), 0(1)
+0xf9 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: cp 0(16,%r1), 0(1)
+0xf9 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: cp 0(16,%r15), 0(1)
+0xf9 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: cp 0(1), 0(16,%r1)
+0xf9 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: cp 0(1), 0(16,%r15)
+0xf9 0x0f 0x00 0x00 0xf0 0x00
+
# CHECK: cpsdr %f0, %f0, %f0
0xb3 0x72 0x00 0x00
@@ -3451,6 +3820,30 @@
# CHECK: crtle %r0, %r1
0xb9 0x72 0xc0 0x01
+# CHECK: cs %r0, %r0, 0
+0xba 0x00 0x00 0x00
+
+# CHECK: cs %r0, %r0, 4095
+0xba 0x00 0x0f 0xff
+
+# CHECK: cs %r0, %r0, 0(%r1)
+0xba 0x00 0x10 0x00
+
+# CHECK: cs %r0, %r0, 0(%r15)
+0xba 0x00 0xf0 0x00
+
+# CHECK: cs %r0, %r0, 4095(%r1)
+0xba 0x00 0x1f 0xff
+
+# CHECK: cs %r0, %r0, 4095(%r15)
+0xba 0x00 0xff 0xff
+
+# CHECK: cs %r0, %r15, 0
+0xba 0x0f 0x00 0x00
+
+# CHECK: cs %r15, %r0, 0
+0xba 0xf0 0x00 0x00
+
# CHECK: csg %r0, %r0, -524288
0xeb 0x00 0x00 0x00 0x80 0x30
@@ -3484,29 +3877,26 @@
# CHECK: csg %r15, %r0, 0
0xeb 0xf0 0x00 0x00 0x00 0x30
-# CHECK: cs %r0, %r0, 0
-0xba 0x00 0x00 0x00
-
-# CHECK: cs %r0, %r0, 4095
-0xba 0x00 0x0f 0xff
+# CHECK: csst 0, 0, %r0
+0xc8 0x02 0x00 0x00 0x00 0x00
-# CHECK: cs %r0, %r0, 0(%r1)
-0xba 0x00 0x10 0x00
+# CHECK: csst 0, 4095, %r2
+0xc8 0x22 0x00 0x00 0x0f 0xff
-# CHECK: cs %r0, %r0, 0(%r15)
-0xba 0x00 0xf0 0x00
+# CHECK: csst 0, 0(%r1), %r2
+0xc8 0x22 0x00 0x00 0x10 0x00
-# CHECK: cs %r0, %r0, 4095(%r1)
-0xba 0x00 0x1f 0xff
+# CHECK: csst 0, 0(%r15), %r2
+0xc8 0x22 0x00 0x00 0xf0 0x00
-# CHECK: cs %r0, %r0, 4095(%r15)
-0xba 0x00 0xff 0xff
+# CHECK: csst 0(%r1), 4095(%r15), %r2
+0xc8 0x22 0x10 0x00 0xff 0xff
-# CHECK: cs %r0, %r15, 0
-0xba 0x0f 0x00 0x00
+# CHECK: csst 0(%r1), 0(%r15), %r2
+0xc8 0x22 0x10 0x00 0xf0 0x00
-# CHECK: cs %r15, %r0, 0
-0xba 0xf0 0x00 0x00
+# CHECK: csst 4095(%r1), 0(%r15), %r2
+0xc8 0x22 0x1f 0xff 0xf0 0x00
# CHECK: csy %r0, %r0, -524288
0xeb 0x00 0x00 0x00 0x80 0x14
@@ -3541,47 +3931,275 @@
# CHECK: csy %r15, %r0, 0
0xeb 0xf0 0x00 0x00 0x00 0x14
-# CHECK: csst 0, 0, %r0
-0xc8 0x02 0x00 0x00 0x00 0x00
+# CHECK: cu12 %r0, %r0
+0xb2 0xa7 0x00 0x00
-# CHECK: csst 0, 4095, %r2
-0xc8 0x22 0x00 0x00 0x0f 0xff
+# CHECK: cu12 %r0, %r14
+0xb2 0xa7 0x00 0x0e
-# CHECK: csst 0, 0(%r1), %r2
-0xc8 0x22 0x00 0x00 0x10 0x00
+# CHECK: cu12 %r14, %r0
+0xb2 0xa7 0x00 0xe0
-# CHECK: csst 0, 0(%r15), %r2
-0xc8 0x22 0x00 0x00 0xf0 0x00
+# CHECK: cu12 %r6, %r8
+0xb2 0xa7 0x00 0x68
-# CHECK: csst 0(%r1), 4095(%r15), %r2
-0xc8 0x22 0x10 0x00 0xff 0xff
+# CHECK: cu12 %r4, %r12, 1
+0xb2 0xa7 0x10 0x4c
-# CHECK: csst 0(%r1), 0(%r15), %r2
-0xc8 0x22 0x10 0x00 0xf0 0x00
+# CHECK: cu12 %r4, %r12, 15
+0xb2 0xa7 0xf0 0x4c
-# CHECK: csst 4095(%r1), 0(%r15), %r2
-0xc8 0x22 0x1f 0xff 0xf0 0x00
+# CHECK: cu14 %r0, %r0
+0xb9 0xb0 0x00 0x00
-# CHECK: c %r0, 0
-0x59 0x00 0x00 0x00
+# CHECK: cu14 %r0, %r14
+0xb9 0xb0 0x00 0x0e
-# CHECK: c %r0, 4095
-0x59 0x00 0x0f 0xff
+# CHECK: cu14 %r14, %r0
+0xb9 0xb0 0x00 0xe0
-# CHECK: c %r0, 0(%r1)
-0x59 0x00 0x10 0x00
+# CHECK: cu14 %r6, %r8
+0xb9 0xb0 0x00 0x68
-# CHECK: c %r0, 0(%r15)
-0x59 0x00 0xf0 0x00
+# CHECK: cu14 %r4, %r12, 1
+0xb9 0xb0 0x10 0x4c
-# CHECK: c %r0, 4095(%r1,%r15)
-0x59 0x01 0xff 0xff
+# CHECK: cu14 %r4, %r12, 15
+0xb9 0xb0 0xf0 0x4c
-# CHECK: c %r0, 4095(%r15,%r1)
-0x59 0x0f 0x1f 0xff
+# CHECK: cu21 %r0, %r0
+0xb2 0xa6 0x00 0x00
-# CHECK: c %r15, 0
-0x59 0xf0 0x00 0x00
+# CHECK: cu21 %r0, %r14
+0xb2 0xa6 0x00 0x0e
+
+# CHECK: cu21 %r14, %r0
+0xb2 0xa6 0x00 0xe0
+
+# CHECK: cu21 %r6, %r8
+0xb2 0xa6 0x00 0x68
+
+# CHECK: cu21 %r4, %r12, 1
+0xb2 0xa6 0x10 0x4c
+
+# CHECK: cu21 %r4, %r12, 15
+0xb2 0xa6 0xf0 0x4c
+
+# CHECK: cu24 %r0, %r0
+0xb9 0xb1 0x00 0x00
+
+# CHECK: cu24 %r0, %r14
+0xb9 0xb1 0x00 0x0e
+
+# CHECK: cu24 %r14, %r0
+0xb9 0xb1 0x00 0xe0
+
+# CHECK: cu24 %r6, %r8
+0xb9 0xb1 0x00 0x68
+
+# CHECK: cu24 %r4, %r12, 1
+0xb9 0xb1 0x10 0x4c
+
+# CHECK: cu24 %r4, %r12, 15
+0xb9 0xb1 0xf0 0x4c
+
+# CHECK: cu41 %r0, %r0
+0xb9 0xb2 0x00 0x00
+
+# CHECK: cu41 %r0, %r14
+0xb9 0xb2 0x00 0x0e
+
+# CHECK: cu41 %r14, %r0
+0xb9 0xb2 0x00 0xe0
+
+# CHECK: cu41 %r6, %r8
+0xb9 0xb2 0x00 0x68
+
+# CHECK: cu42 %r0, %r0
+0xb9 0xb3 0x00 0x00
+
+# CHECK: cu42 %r0, %r14
+0xb9 0xb3 0x00 0x0e
+
+# CHECK: cu42 %r14, %r0
+0xb9 0xb3 0x00 0xe0
+
+# CHECK: cu42 %r6, %r8
+0xb9 0xb3 0x00 0x68
+
+# CHECK: cuse %r0, %r0
+0xb2 0x57 0x00 0x00
+
+# CHECK: cuse %r0, %r14
+0xb2 0x57 0x00 0x0e
+
+# CHECK: cuse %r14, %r0
+0xb2 0x57 0x00 0xe0
+
+# CHECK: cuse %r6, %r8
+0xb2 0x57 0x00 0x68
+
+# CHECK: cvb %r0, 0
+0x4f 0x00 0x00 0x00
+
+# CHECK: cvb %r0, 4095
+0x4f 0x00 0x0f 0xff
+
+# CHECK: cvb %r0, 0(%r1)
+0x4f 0x00 0x10 0x00
+
+# CHECK: cvb %r0, 0(%r15)
+0x4f 0x00 0xf0 0x00
+
+# CHECK: cvb %r0, 4095(%r1,%r15)
+0x4f 0x01 0xff 0xff
+
+# CHECK: cvb %r0, 4095(%r15,%r1)
+0x4f 0x0f 0x1f 0xff
+
+# CHECK: cvb %r15, 0
+0x4f 0xf0 0x00 0x00
+
+# CHECK: cvbg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x0e
+
+# CHECK: cvbg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x0e
+
+# CHECK: cvbg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x0e
+
+# CHECK: cvbg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x0e
+
+# CHECK: cvbg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x0e
+
+# CHECK: cvbg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x0e
+
+# CHECK: cvbg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x0e
+
+# CHECK: cvbg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x0e
+
+# CHECK: cvbg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x0e
+
+# CHECK: cvbg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x0e
+
+# CHECK: cvby %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x06
+
+# CHECK: cvby %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x06
+
+# CHECK: cvby %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x06
+
+# CHECK: cvby %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x06
+
+# CHECK: cvby %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x06
+
+# CHECK: cvby %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x06
+
+# CHECK: cvby %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x06
+
+# CHECK: cvby %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x06
+
+# CHECK: cvby %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x06
+
+# CHECK: cvby %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x06
+
+# CHECK: cvd %r0, 0
+0x4e 0x00 0x00 0x00
+
+# CHECK: cvd %r0, 4095
+0x4e 0x00 0x0f 0xff
+
+# CHECK: cvd %r0, 0(%r1)
+0x4e 0x00 0x10 0x00
+
+# CHECK: cvd %r0, 0(%r15)
+0x4e 0x00 0xf0 0x00
+
+# CHECK: cvd %r0, 4095(%r1,%r15)
+0x4e 0x01 0xff 0xff
+
+# CHECK: cvd %r0, 4095(%r15,%r1)
+0x4e 0x0f 0x1f 0xff
+
+# CHECK: cvd %r15, 0
+0x4e 0xf0 0x00 0x00
+
+# CHECK: cvdg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x2e
+
+# CHECK: cvdg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x2e
+
+# CHECK: cvdg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x2e
+
+# CHECK: cvdg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x2e
+
+# CHECK: cvdg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x2e
+
+# CHECK: cvdg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x2e
+
+# CHECK: cvdg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x2e
+
+# CHECK: cvdg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x2e
+
+# CHECK: cvdg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x2e
+
+# CHECK: cvdg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x2e
+
+# CHECK: cvdy %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x26
+
+# CHECK: cvdy %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x26
+
+# CHECK: cvdy %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x26
+
+# CHECK: cvdy %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x26
+
+# CHECK: cvdy %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x26
+
+# CHECK: cvdy %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x26
+
+# CHECK: cvdy %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x26
+
+# CHECK: cvdy %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x26
+
+# CHECK: cvdy %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x26
+
+# CHECK: cvdy %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x26
# CHECK: cxbr %f0, %f0
0xb3 0x49 0x00 0x00
@@ -3610,22 +4228,22 @@
# CHECK: cxfbr %f13, %r15
0xb3 0x96 0x00 0xdf
-# CHECK: cxfbra %f0, 0, %r0, 1
+# CHECK: cxfbra %f0, 0, %r0, 1
0xb3 0x96 0x01 0x00
-# CHECK: cxfbra %f0, 0, %r0, 15
+# CHECK: cxfbra %f0, 0, %r0, 15
0xb3 0x96 0x0f 0x00
-# CHECK: cxfbra %f0, 0, %r15, 1
+# CHECK: cxfbra %f0, 0, %r15, 1
0xb3 0x96 0x01 0x0f
-# CHECK: cxfbra %f0, 15, %r0, 1
+# CHECK: cxfbra %f0, 15, %r0, 1
0xb3 0x96 0xf1 0x00
-# CHECK: cxfbra %f4, 5, %r6, 7
+# CHECK: cxfbra %f4, 5, %r6, 7
0xb3 0x96 0x57 0x46
-# CHECK: cxfbra %f13, 0, %r0, 1
+# CHECK: cxfbra %f13, 0, %r0, 1
0xb3 0x96 0x01 0xd0
# CHECK: cxgbr %f0, %r0
@@ -3643,58 +4261,58 @@
# CHECK: cxgbr %f13, %r15
0xb3 0xa6 0x00 0xdf
-# CHECK: cxgbra %f0, 0, %r0, 1
+# CHECK: cxgbra %f0, 0, %r0, 1
0xb3 0xa6 0x01 0x00
-# CHECK: cxgbra %f0, 0, %r0, 15
+# CHECK: cxgbra %f0, 0, %r0, 15
0xb3 0xa6 0x0f 0x00
-# CHECK: cxgbra %f0, 0, %r15, 1
+# CHECK: cxgbra %f0, 0, %r15, 1
0xb3 0xa6 0x01 0x0f
-# CHECK: cxgbra %f0, 15, %r0, 1
+# CHECK: cxgbra %f0, 15, %r0, 1
0xb3 0xa6 0xf1 0x00
-# CHECK: cxgbra %f4, 5, %r6, 7
+# CHECK: cxgbra %f4, 5, %r6, 7
0xb3 0xa6 0x57 0x46
-# CHECK: cxgbra %f13, 0, %r0, 1
+# CHECK: cxgbra %f13, 0, %r0, 1
0xb3 0xa6 0x01 0xd0
-# CHECK: cxlfbr %f0, 0, %r0, 1
+# CHECK: cxlfbr %f0, 0, %r0, 1
0xb3 0x92 0x01 0x00
-# CHECK: cxlfbr %f0, 0, %r0, 15
+# CHECK: cxlfbr %f0, 0, %r0, 15
0xb3 0x92 0x0f 0x00
-# CHECK: cxlfbr %f0, 0, %r15, 1
+# CHECK: cxlfbr %f0, 0, %r15, 1
0xb3 0x92 0x01 0x0f
-# CHECK: cxlfbr %f0, 15, %r0, 1
+# CHECK: cxlfbr %f0, 15, %r0, 1
0xb3 0x92 0xf1 0x00
-# CHECK: cxlfbr %f4, 5, %r6, 7
+# CHECK: cxlfbr %f4, 5, %r6, 7
0xb3 0x92 0x57 0x46
-# CHECK: cxlfbr %f13, 0, %r0, 1
+# CHECK: cxlfbr %f13, 0, %r0, 1
0xb3 0x92 0x01 0xd0
-# CHECK: cxlgbr %f0, 0, %r0, 1
+# CHECK: cxlgbr %f0, 0, %r0, 1
0xb3 0xa2 0x01 0x00
-# CHECK: cxlgbr %f0, 0, %r0, 15
+# CHECK: cxlgbr %f0, 0, %r0, 15
0xb3 0xa2 0x0f 0x00
-# CHECK: cxlgbr %f0, 0, %r15, 1
+# CHECK: cxlgbr %f0, 0, %r15, 1
0xb3 0xa2 0x01 0x0f
-# CHECK: cxlgbr %f0, 15, %r0, 1
+# CHECK: cxlgbr %f0, 15, %r0, 1
0xb3 0xa2 0xf1 0x00
-# CHECK: cxlgbr %f4, 5, %r6, 7
+# CHECK: cxlgbr %f4, 5, %r6, 7
0xb3 0xa2 0x57 0x46
-# CHECK: cxlgbr %f13, 0, %r0, 1
+# CHECK: cxlgbr %f13, 0, %r0, 1
0xb3 0xa2 0x01 0xd0
# CHECK: cy %r0, -524288
@@ -3727,17 +4345,26 @@
# CHECK: cy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x59
-# CHECK: ddbr %f0, %f0
-0xb3 0x1d 0x00 0x00
+# CHECK: d %r0, 0
+0x5d 0x00 0x00 0x00
-# CHECK: ddbr %f0, %f15
-0xb3 0x1d 0x00 0x0f
+# CHECK: d %r0, 4095
+0x5d 0x00 0x0f 0xff
-# CHECK: ddbr %f7, %f8
-0xb3 0x1d 0x00 0x78
+# CHECK: d %r0, 0(%r1)
+0x5d 0x00 0x10 0x00
-# CHECK: ddbr %f15, %f0
-0xb3 0x1d 0x00 0xf0
+# CHECK: d %r0, 0(%r15)
+0x5d 0x00 0xf0 0x00
+
+# CHECK: d %r0, 4095(%r1,%r15)
+0x5d 0x01 0xff 0xff
+
+# CHECK: d %r0, 4095(%r15,%r1)
+0x5d 0x0f 0x1f 0xff
+
+# CHECK: d %r14, 0
+0x5d 0xe0 0x00 0x00
# CHECK: ddb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x1d
@@ -3760,17 +4387,17 @@
# CHECK: ddb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x1d
-# CHECK: debr %f0, %f0
-0xb3 0x0d 0x00 0x00
+# CHECK: ddbr %f0, %f0
+0xb3 0x1d 0x00 0x00
-# CHECK: debr %f0, %f15
-0xb3 0x0d 0x00 0x0f
+# CHECK: ddbr %f0, %f15
+0xb3 0x1d 0x00 0x0f
-# CHECK: debr %f7, %f8
-0xb3 0x0d 0x00 0x78
+# CHECK: ddbr %f7, %f8
+0xb3 0x1d 0x00 0x78
-# CHECK: debr %f15, %f0
-0xb3 0x0d 0x00 0xf0
+# CHECK: ddbr %f15, %f0
+0xb3 0x1d 0x00 0xf0
# CHECK: deb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x0d
@@ -3793,17 +4420,83 @@
# CHECK: deb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x0d
-# CHECK: dlgr %r0, %r0
-0xb9 0x87 0x00 0x00
+# CHECK: debr %f0, %f0
+0xb3 0x0d 0x00 0x00
-# CHECK: dlgr %r0, %r15
-0xb9 0x87 0x00 0x0f
+# CHECK: debr %f0, %f15
+0xb3 0x0d 0x00 0x0f
-# CHECK: dlgr %r14, %r0
-0xb9 0x87 0x00 0xe0
+# CHECK: debr %f7, %f8
+0xb3 0x0d 0x00 0x78
-# CHECK: dlgr %r6, %r9
-0xb9 0x87 0x00 0x69
+# CHECK: debr %f15, %f0
+0xb3 0x0d 0x00 0xf0
+
+# CHECK: didbr %f0, %f0, %f0, 1
+0xb3 0x5b 0x01 0x00
+
+# CHECK: didbr %f0, %f0, %f0, 15
+0xb3 0x5b 0x0f 0x00
+
+# CHECK: didbr %f0, %f0, %f15, 1
+0xb3 0x5b 0x01 0x0f
+
+# CHECK: didbr %f0, %f15, %f0, 1
+0xb3 0x5b 0xf1 0x00
+
+# CHECK: didbr %f4, %f5, %f6, 7
+0xb3 0x5b 0x57 0x46
+
+# CHECK: didbr %f15, %f0, %f0, 1
+0xb3 0x5b 0x01 0xf0
+
+# CHECK: diebr %f0, %f0, %f0, 1
+0xb3 0x53 0x01 0x00
+
+# CHECK: diebr %f0, %f0, %f0, 15
+0xb3 0x53 0x0f 0x00
+
+# CHECK: diebr %f0, %f0, %f15, 1
+0xb3 0x53 0x01 0x0f
+
+# CHECK: diebr %f0, %f15, %f0, 1
+0xb3 0x53 0xf1 0x00
+
+# CHECK: diebr %f4, %f5, %f6, 7
+0xb3 0x53 0x57 0x46
+
+# CHECK: diebr %f15, %f0, %f0, 1
+0xb3 0x53 0x01 0xf0
+
+# CHECK: dl %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x97
+
+# CHECK: dl %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x97
+
+# CHECK: dl %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x97
+
+# CHECK: dl %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x97
+
+# CHECK: dl %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x97
+
+# CHECK: dl %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x97
+
+# CHECK: dl %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x97
+
+# CHECK: dl %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x97
+
+# CHECK: dl %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x97
+
+# CHECK: dl %r14, 0
+0xe3 0xe0 0x00 0x00 0x00 0x97
# CHECK: dlg %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x87
@@ -3835,6 +4528,18 @@
# CHECK: dlg %r14, 0
0xe3 0xe0 0x00 0x00 0x00 0x87
+# CHECK: dlgr %r0, %r0
+0xb9 0x87 0x00 0x00
+
+# CHECK: dlgr %r0, %r15
+0xb9 0x87 0x00 0x0f
+
+# CHECK: dlgr %r14, %r0
+0xb9 0x87 0x00 0xe0
+
+# CHECK: dlgr %r6, %r9
+0xb9 0x87 0x00 0x69
+
# CHECK: dlr %r0, %r0
0xb9 0x97 0x00 0x00
@@ -3847,47 +4552,89 @@
# CHECK: dlr %r6, %r9
0xb9 0x97 0x00 0x69
-# CHECK: dl %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x97
+# CHECK: dp 0(1), 0(1)
+0xfd 0x00 0x00 0x00 0x00 0x00
-# CHECK: dl %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x97
+# CHECK: dp 0(1), 0(1,%r1)
+0xfd 0x00 0x00 0x00 0x10 0x00
-# CHECK: dl %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x97
+# CHECK: dp 0(1), 0(1,%r15)
+0xfd 0x00 0x00 0x00 0xf0 0x00
-# CHECK: dl %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x97
+# CHECK: dp 0(1), 4095(1)
+0xfd 0x00 0x00 0x00 0x0f 0xff
-# CHECK: dl %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x97
+# CHECK: dp 0(1), 4095(1,%r1)
+0xfd 0x00 0x00 0x00 0x1f 0xff
-# CHECK: dl %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x97
+# CHECK: dp 0(1), 4095(1,%r15)
+0xfd 0x00 0x00 0x00 0xff 0xff
-# CHECK: dl %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x97
+# CHECK: dp 0(1,%r1), 0(1)
+0xfd 0x00 0x10 0x00 0x00 0x00
-# CHECK: dl %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x97
+# CHECK: dp 0(1,%r15), 0(1)
+0xfd 0x00 0xf0 0x00 0x00 0x00
-# CHECK: dl %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x97
+# CHECK: dp 4095(1,%r1), 0(1)
+0xfd 0x00 0x1f 0xff 0x00 0x00
-# CHECK: dl %r14, 0
-0xe3 0xe0 0x00 0x00 0x00 0x97
+# CHECK: dp 4095(1,%r15), 0(1)
+0xfd 0x00 0xff 0xff 0x00 0x00
-# CHECK: dsgfr %r0, %r0
-0xb9 0x1d 0x00 0x00
+# CHECK: dp 0(16,%r1), 0(1)
+0xfd 0xf0 0x10 0x00 0x00 0x00
-# CHECK: dsgfr %r0, %r15
-0xb9 0x1d 0x00 0x0f
+# CHECK: dp 0(16,%r15), 0(1)
+0xfd 0xf0 0xf0 0x00 0x00 0x00
-# CHECK: dsgfr %r14, %r0
-0xb9 0x1d 0x00 0xe0
+# CHECK: dp 0(1), 0(16,%r1)
+0xfd 0x0f 0x00 0x00 0x10 0x00
-# CHECK: dsgfr %r6, %r9
-0xb9 0x1d 0x00 0x69
+# CHECK: dp 0(1), 0(16,%r15)
+0xfd 0x0f 0x00 0x00 0xf0 0x00
+
+# CHECK: dr %r0, %r0
+0x1d 0x00
+
+# CHECK: dr %r0, %r15
+0x1d 0x0f
+
+# CHECK: dr %r14, %r0
+0x1d 0xe0
+
+# CHECK: dr %r6, %r9
+0x1d 0x69
+
+# CHECK: dsg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x0d
+
+# CHECK: dsg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x0d
+
+# CHECK: dsg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x0d
+
+# CHECK: dsg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x0d
+
+# CHECK: dsg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x0d
+
+# CHECK: dsg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x0d
+
+# CHECK: dsg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x0d
+
+# CHECK: dsg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x0d
+
+# CHECK: dsg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x0d
+
+# CHECK: dsg %r14, 0
+0xe3 0xe0 0x00 0x00 0x00 0x0d
# CHECK: dsgf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x1d
@@ -3919,6 +4666,18 @@
# CHECK: dsgf %r14, 0
0xe3 0xe0 0x00 0x00 0x00 0x1d
+# CHECK: dsgfr %r0, %r0
+0xb9 0x1d 0x00 0x00
+
+# CHECK: dsgfr %r0, %r15
+0xb9 0x1d 0x00 0x0f
+
+# CHECK: dsgfr %r14, %r0
+0xb9 0x1d 0x00 0xe0
+
+# CHECK: dsgfr %r6, %r9
+0xb9 0x1d 0x00 0x69
+
# CHECK: dsgr %r0, %r0
0xb9 0x0d 0x00 0x00
@@ -3931,36 +4690,6 @@
# CHECK: dsgr %r6, %r9
0xb9 0x0d 0x00 0x69
-# CHECK: dsg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x0d
-
-# CHECK: dsg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x0d
-
-# CHECK: dsg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x0d
-
-# CHECK: dsg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x0d
-
-# CHECK: dsg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x0d
-
-# CHECK: dsg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x0d
-
-# CHECK: dsg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x0d
-
-# CHECK: dsg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x0d
-
-# CHECK: dsg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x0d
-
-# CHECK: dsg %r14, 0
-0xe3 0xe0 0x00 0x00 0x00 0x0d
-
# CHECK: dxbr %f0, %f0
0xb3 0x4d 0x00 0x00
@@ -3988,6 +4717,45 @@
# CHECK: ear %r15, %a15
0xb2 0x4f 0x00 0xff
+# CHECK: ecag %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0x4c
+
+# CHECK: ecag %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x4c
+
+# CHECK: ecag %r14, %r15, 0
+0xeb 0xef 0x00 0x00 0x00 0x4c
+
+# CHECK: ecag %r15, %r15, 0
+0xeb 0xff 0x00 0x00 0x00 0x4c
+
+# CHECK: ecag %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x4c
+
+# CHECK: ecag %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x4c
+
+# CHECK: ecag %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0x4c
+
+# CHECK: ecag %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0x4c
+
+# CHECK: ecag %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0x4c
+
+# CHECK: ecag %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0x4c
+
+# CHECK: ecag %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0x4c
+
+# CHECK: ecag %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x4c
+
+# CHECK: ecag %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0x4c
+
# CHECK: ectg 0, 0, %r0
0xc8 0x01 0x00 0x00 0x00 0x00
@@ -4009,6 +4777,78 @@
# CHECK: ectg 4095(%r1), 0(%r15), %r2
0xc8 0x21 0x1f 0xff 0xf0 0x00
+# CHECK: ed 0(1), 0
+0xde 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: ed 0(1), 0(%r1)
+0xde 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: ed 0(1), 0(%r15)
+0xde 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: ed 0(1), 4095
+0xde 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: ed 0(1), 4095(%r1)
+0xde 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: ed 0(1), 4095(%r15)
+0xde 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: ed 0(1,%r1), 0
+0xde 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: ed 0(1,%r15), 0
+0xde 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: ed 4095(1,%r1), 0
+0xde 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: ed 4095(1,%r15), 0
+0xde 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: ed 0(256,%r1), 0
+0xde 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: ed 0(256,%r15), 0
+0xde 0xff 0xf0 0x00 0x00 0x00
+
+# CHECK: edmk 0(1), 0
+0xdf 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: edmk 0(1), 0(%r1)
+0xdf 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: edmk 0(1), 0(%r15)
+0xdf 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: edmk 0(1), 4095
+0xdf 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: edmk 0(1), 4095(%r1)
+0xdf 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: edmk 0(1), 4095(%r15)
+0xdf 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: edmk 0(1,%r1), 0
+0xdf 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: edmk 0(1,%r15), 0
+0xdf 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: edmk 4095(1,%r1), 0
+0xdf 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: edmk 4095(1,%r15), 0
+0xdf 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: edmk 0(256,%r1), 0
+0xdf 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: edmk 0(256,%r15), 0
+0xdf 0xff 0xf0 0x00 0x00 0x00
+
# CHECK: efpc %r0
0xb3 0x8c 0x00 0x00
@@ -4018,6 +4858,18 @@
# CHECK: efpc %r15
0xb3 0x8c 0x00 0xf0
+# CHECK: epsw %r0, %r0
+0xb9 0x8d 0x00 0x00
+
+# CHECK: epsw %r0, %r15
+0xb9 0x8d 0x00 0x0f
+
+# CHECK: epsw %r15, %r0
+0xb9 0x8d 0x00 0xf0
+
+# CHECK: epsw %r6, %r8
+0xb9 0x8d 0x00 0x68
+
# CHECK: etnd %r0
0xb2 0xec 0x00 0x00
@@ -4063,22 +4915,22 @@
# CHECK: fidbr %f15, 0, %f0
0xb3 0x5f 0x00 0xf0
-# CHECK: fidbra %f0, 0, %f0, 1
+# CHECK: fidbra %f0, 0, %f0, 1
0xb3 0x5f 0x01 0x00
-# CHECK: fidbra %f0, 0, %f0, 15
+# CHECK: fidbra %f0, 0, %f0, 15
0xb3 0x5f 0x0f 0x00
-# CHECK: fidbra %f0, 0, %f15, 1
+# CHECK: fidbra %f0, 0, %f15, 1
0xb3 0x5f 0x01 0x0f
-# CHECK: fidbra %f0, 15, %f0, 1
+# CHECK: fidbra %f0, 15, %f0, 1
0xb3 0x5f 0xf1 0x00
-# CHECK: fidbra %f4, 5, %f6, 7
+# CHECK: fidbra %f4, 5, %f6, 7
0xb3 0x5f 0x57 0x46
-# CHECK: fidbra %f15, 0, %f0, 1
+# CHECK: fidbra %f15, 0, %f0, 1
0xb3 0x5f 0x01 0xf0
# CHECK: fiebr %f0, 0, %f0
@@ -4096,22 +4948,22 @@
# CHECK: fiebr %f15, 0, %f0
0xb3 0x57 0x00 0xf0
-# CHECK: fiebra %f0, 0, %f0, 1
+# CHECK: fiebra %f0, 0, %f0, 1
0xb3 0x57 0x01 0x00
-# CHECK: fiebra %f0, 0, %f0, 15
+# CHECK: fiebra %f0, 0, %f0, 15
0xb3 0x57 0x0f 0x00
-# CHECK: fiebra %f0, 0, %f15, 1
+# CHECK: fiebra %f0, 0, %f15, 1
0xb3 0x57 0x01 0x0f
-# CHECK: fiebra %f0, 15, %f0, 1
+# CHECK: fiebra %f0, 15, %f0, 1
0xb3 0x57 0xf1 0x00
-# CHECK: fiebra %f4, 5, %f6, 7
+# CHECK: fiebra %f4, 5, %f6, 7
0xb3 0x57 0x57 0x46
-# CHECK: fiebra %f15, 0, %f0, 1
+# CHECK: fiebra %f15, 0, %f0, 1
0xb3 0x57 0x01 0xf0
# CHECK: fixbr %f0, 0, %f0
@@ -4129,22 +4981,22 @@
# CHECK: fixbr %f13, 0, %f0
0xb3 0x47 0x00 0xd0
-# CHECK: fixbra %f0, 0, %f0, 1
+# CHECK: fixbra %f0, 0, %f0, 1
0xb3 0x47 0x01 0x00
-# CHECK: fixbra %f0, 0, %f0, 15
+# CHECK: fixbra %f0, 0, %f0, 15
0xb3 0x47 0x0f 0x00
-# CHECK: fixbra %f0, 0, %f13, 1
+# CHECK: fixbra %f0, 0, %f13, 1
0xb3 0x47 0x01 0x0d
-# CHECK: fixbra %f0, 15, %f0, 1
+# CHECK: fixbra %f0, 15, %f0, 1
0xb3 0x47 0xf1 0x00
-# CHECK: fixbra %f4, 5, %f8, 9
+# CHECK: fixbra %f4, 5, %f8, 9
0xb3 0x47 0x59 0x48
-# CHECK: fixbra %f13, 0, %f0, 1
+# CHECK: fixbra %f13, 0, %f0, 1
0xb3 0x47 0x01 0xd0
# CHECK: flogr %r0, %r0
@@ -4366,6 +5218,201 @@
# CHECK: ipm %r15
0xb2 0x22 0x00 0xf0
+# CHECK: kdb %f0, 0
+0xed 0x00 0x00 0x00 0x00 0x18
+
+# CHECK: kdb %f0, 4095
+0xed 0x00 0x0f 0xff 0x00 0x18
+
+# CHECK: kdb %f0, 0(%r1)
+0xed 0x00 0x10 0x00 0x00 0x18
+
+# CHECK: kdb %f0, 0(%r15)
+0xed 0x00 0xf0 0x00 0x00 0x18
+
+# CHECK: kdb %f0, 4095(%r1,%r15)
+0xed 0x01 0xff 0xff 0x00 0x18
+
+# CHECK: kdb %f0, 4095(%r15,%r1)
+0xed 0x0f 0x1f 0xff 0x00 0x18
+
+# CHECK: kdb %f15, 0
+0xed 0xf0 0x00 0x00 0x00 0x18
+
+# CHECK: kdbr %f0, %f0
+0xb3 0x18 0x00 0x00
+
+# CHECK: kdbr %f0, %f15
+0xb3 0x18 0x00 0x0f
+
+# CHECK: kdbr %f7, %f8
+0xb3 0x18 0x00 0x78
+
+# CHECK: kdbr %f15, %f0
+0xb3 0x18 0x00 0xf0
+
+# CHECK: keb %f0, 0
+0xed 0x00 0x00 0x00 0x00 0x08
+
+# CHECK: keb %f0, 4095
+0xed 0x00 0x0f 0xff 0x00 0x08
+
+# CHECK: keb %f0, 0(%r1)
+0xed 0x00 0x10 0x00 0x00 0x08
+
+# CHECK: keb %f0, 0(%r15)
+0xed 0x00 0xf0 0x00 0x00 0x08
+
+# CHECK: keb %f0, 4095(%r1,%r15)
+0xed 0x01 0xff 0xff 0x00 0x08
+
+# CHECK: keb %f0, 4095(%r15,%r1)
+0xed 0x0f 0x1f 0xff 0x00 0x08
+
+# CHECK: keb %f15, 0
+0xed 0xf0 0x00 0x00 0x00 0x08
+
+# CHECK: kebr %f0, %f0
+0xb3 0x08 0x00 0x00
+
+# CHECK: kebr %f0, %f15
+0xb3 0x08 0x00 0x0f
+
+# CHECK: kebr %f7, %f8
+0xb3 0x08 0x00 0x78
+
+# CHECK: kebr %f15, %f0
+0xb3 0x08 0x00 0xf0
+
+# CHECK: kimd %r2, %r10
+0xb9 0x3e 0x00 0x2a
+
+# CHECK: kimd %r2, %r14
+0xb9 0x3e 0x00 0x2e
+
+# CHECK: kimd %r14, %r2
+0xb9 0x3e 0x00 0xe2
+
+# CHECK: kimd %r14, %r10
+0xb9 0x3e 0x00 0xea
+
+# CHECK: klmd %r2, %r10
+0xb9 0x3f 0x00 0x2a
+
+# CHECK: klmd %r2, %r14
+0xb9 0x3f 0x00 0x2e
+
+# CHECK: klmd %r14, %r2
+0xb9 0x3f 0x00 0xe2
+
+# CHECK: klmd %r14, %r10
+0xb9 0x3f 0x00 0xea
+
+# CHECK: km %r2, %r10
+0xb9 0x2e 0x00 0x2a
+
+# CHECK: km %r2, %r14
+0xb9 0x2e 0x00 0x2e
+
+# CHECK: km %r14, %r2
+0xb9 0x2e 0x00 0xe2
+
+# CHECK: km %r14, %r10
+0xb9 0x2e 0x00 0xea
+
+# CHECK: kmac %r2, %r10
+0xb9 0x1e 0x00 0x2a
+
+# CHECK: kmac %r2, %r14
+0xb9 0x1e 0x00 0x2e
+
+# CHECK: kmac %r14, %r2
+0xb9 0x1e 0x00 0xe2
+
+# CHECK: kmac %r14, %r10
+0xb9 0x1e 0x00 0xea
+
+# CHECK: kmc %r2, %r10
+0xb9 0x2f 0x00 0x2a
+
+# CHECK: kmc %r2, %r14
+0xb9 0x2f 0x00 0x2e
+
+# CHECK: kmc %r14, %r2
+0xb9 0x2f 0x00 0xe2
+
+# CHECK: kmc %r14, %r10
+0xb9 0x2f 0x00 0xea
+
+# CHECK: kmctr %r2, %r4, %r10
+0xb9 0x2d 0x40 0x2a
+
+# CHECK: kmctr %r2, %r6, %r14
+0xb9 0x2d 0x60 0x2e
+
+# CHECK: kmctr %r14, %r8, %r2
+0xb9 0x2d 0x80 0xe2
+
+# CHECK: kmctr %r14, %r12, %r10
+0xb9 0x2d 0xc0 0xea
+
+# CHECK: kmf %r2, %r10
+0xb9 0x2a 0x00 0x2a
+
+# CHECK: kmf %r2, %r14
+0xb9 0x2a 0x00 0x2e
+
+# CHECK: kmf %r14, %r2
+0xb9 0x2a 0x00 0xe2
+
+# CHECK: kmf %r14, %r10
+0xb9 0x2a 0x00 0xea
+
+# CHECK: kmo %r2, %r10
+0xb9 0x2b 0x00 0x2a
+
+# CHECK: kmo %r2, %r14
+0xb9 0x2b 0x00 0x2e
+
+# CHECK: kmo %r14, %r2
+0xb9 0x2b 0x00 0xe2
+
+# CHECK: kmo %r14, %r10
+0xb9 0x2b 0x00 0xea
+
+# CHECK: kxbr %f0, %f0
+0xb3 0x48 0x00 0x00
+
+# CHECK: kxbr %f0, %f13
+0xb3 0x48 0x00 0x0d
+
+# CHECK: kxbr %f8, %f8
+0xb3 0x48 0x00 0x88
+
+# CHECK: kxbr %f13, %f0
+0xb3 0x48 0x00 0xd0
+
+# CHECK: l %r0, 0
+0x58 0x00 0x00 0x00
+
+# CHECK: l %r0, 4095
+0x58 0x00 0x0f 0xff
+
+# CHECK: l %r0, 0(%r1)
+0x58 0x00 0x10 0x00
+
+# CHECK: l %r0, 0(%r15)
+0x58 0x00 0xf0 0x00
+
+# CHECK: l %r0, 4095(%r1,%r15)
+0x58 0x01 0xff 0xff
+
+# CHECK: l %r0, 4095(%r15,%r1)
+0x58 0x0f 0x1f 0xff
+
+# CHECK: l %r15, 0
+0x58 0xf0 0x00 0x00
+
# CHECK: la %r0, 0
0x41 0x00 0x00 0x00
@@ -4672,8 +5719,8 @@
# CHECK: lan %r15, %r0, 0
0xeb 0xf0 0x00 0x00 0x00 0xf4
-# CHECK: csy %r0, %r0, -524288
-0xeb 0x00 0x00 0x00 0x80 0x14
+# CHECK: lang %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xe4
# CHECK: lang %r0, %r0, -1
0xeb 0x00 0x0f 0xff 0xff 0xe4
@@ -4771,6 +5818,36 @@
# CHECK: laog %r15, %r0, 0
0xeb 0xf0 0x00 0x00 0x00 0xe6
+# CHECK: lat %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x9f
+
+# CHECK: lat %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x9f
+
+# CHECK: lat %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x9f
+
+# CHECK: lat %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x9f
+
+# CHECK: lat %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x9f
+
+# CHECK: lat %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x9f
+
+# CHECK: lat %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x9f
+
+# CHECK: lat %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x9f
+
+# CHECK: lat %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x9f
+
+# CHECK: lat %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x9f
+
# CHECK: lax %r0, %r0, -524288
0xeb 0x00 0x00 0x00 0x80 0xf7
@@ -4867,15 +5944,6 @@
# CHECK: lay %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x71
-# CHECK: lbr %r0, %r15
-0xb9 0x26 0x00 0x0f
-
-# CHECK: lbr %r7, %r8
-0xb9 0x26 0x00 0x78
-
-# CHECK: lbr %r15, %r0
-0xb9 0x26 0x00 0xf0
-
# CHECK: lb %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x76
@@ -4936,6 +6004,15 @@
# CHECK: lbh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xc0
+# CHECK: lbr %r0, %r15
+0xb9 0x26 0x00 0x0f
+
+# CHECK: lbr %r7, %r8
+0xb9 0x26 0x00 0x78
+
+# CHECK: lbr %r15, %r0
+0xb9 0x26 0x00 0xf0
+
# CHECK: lcdbr %f0, %f9
0xb3 0x13 0x00 0x09
@@ -5008,14 +6085,26 @@
# CHECK: lcxbr %f13, %f9
0xb3 0x43 0x00 0xd9
-# CHECK: ldebr %f0, %f15
-0xb3 0x04 0x00 0x0f
+# CHECK: ld %f0, 0
+0x68 0x00 0x00 0x00
-# CHECK: ldebr %f7, %f8
-0xb3 0x04 0x00 0x78
+# CHECK: ld %f0, 4095
+0x68 0x00 0x0f 0xff
-# CHECK: ldebr %f15, %f0
-0xb3 0x04 0x00 0xf0
+# CHECK: ld %f0, 0(%r1)
+0x68 0x00 0x10 0x00
+
+# CHECK: ld %f0, 0(%r15)
+0x68 0x00 0xf0 0x00
+
+# CHECK: ld %f0, 4095(%r1,%r15)
+0x68 0x01 0xff 0xff
+
+# CHECK: ld %f0, 4095(%r15,%r1)
+0x68 0x0f 0x1f 0xff
+
+# CHECK: ld %f15, 0
+0x68 0xf0 0x00 0x00
# CHECK: ldeb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x04
@@ -5038,6 +6127,15 @@
# CHECK: ldeb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x04
+# CHECK: ldebr %f0, %f15
+0xb3 0x04 0x00 0x0f
+
+# CHECK: ldebr %f7, %f8
+0xb3 0x04 0x00 0x78
+
+# CHECK: ldebr %f15, %f0
+0xb3 0x04 0x00 0xf0
+
# CHECK: ldgr %f0, %r0
0xb3 0xc1 0x00 0x00
@@ -5065,27 +6163,6 @@
# CHECK: ldr %f15, %f9
0x28 0xf9
-# CHECK: ld %f0, 0
-0x68 0x00 0x00 0x00
-
-# CHECK: ld %f0, 4095
-0x68 0x00 0x0f 0xff
-
-# CHECK: ld %f0, 0(%r1)
-0x68 0x00 0x10 0x00
-
-# CHECK: ld %f0, 0(%r15)
-0x68 0x00 0xf0 0x00
-
-# CHECK: ld %f0, 4095(%r1,%r15)
-0x68 0x01 0xff 0xff
-
-# CHECK: ld %f0, 4095(%r15,%r1)
-0x68 0x0f 0x1f 0xff
-
-# CHECK: ld %f15, 0
-0x68 0xf0 0x00 0x00
-
# CHECK: ldxbr %f0, %f0
0xb3 0x45 0x00 0x00
@@ -5101,22 +6178,22 @@
# CHECK: ldxbr %f13, %f13
0xb3 0x45 0x00 0xdd
-# CHECK: ldxbra %f0, 0, %f0, 1
+# CHECK: ldxbra %f0, 0, %f0, 1
0xb3 0x45 0x01 0x00
-# CHECK: ldxbra %f0, 0, %f0, 15
+# CHECK: ldxbra %f0, 0, %f0, 15
0xb3 0x45 0x0f 0x00
-# CHECK: ldxbra %f0, 0, %f13, 1
+# CHECK: ldxbra %f0, 0, %f13, 1
0xb3 0x45 0x01 0x0d
-# CHECK: ldxbra %f0, 15, %f0, 1
+# CHECK: ldxbra %f0, 15, %f0, 1
0xb3 0x45 0xf1 0x00
-# CHECK: ldxbra %f4, 5, %f8, 9
+# CHECK: ldxbra %f4, 5, %f8, 9
0xb3 0x45 0x59 0x48
-# CHECK: ldxbra %f13, 0, %f0, 1
+# CHECK: ldxbra %f13, 0, %f0, 1
0xb3 0x45 0x01 0xd0
# CHECK: ldy %f0, -524288
@@ -5149,6 +6226,27 @@
# CHECK: ldy %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x65
+# CHECK: le %f0, 0
+0x78 0x00 0x00 0x00
+
+# CHECK: le %f0, 4095
+0x78 0x00 0x0f 0xff
+
+# CHECK: le %f0, 0(%r1)
+0x78 0x00 0x10 0x00
+
+# CHECK: le %f0, 0(%r15)
+0x78 0x00 0xf0 0x00
+
+# CHECK: le %f0, 4095(%r1,%r15)
+0x78 0x01 0xff 0xff
+
+# CHECK: le %f0, 4095(%r15,%r1)
+0x78 0x0f 0x1f 0xff
+
+# CHECK: le %f15, 0
+0x78 0xf0 0x00 0x00
+
# CHECK: ledbr %f0, %f0
0xb3 0x44 0x00 0x00
@@ -5164,22 +6262,22 @@
# CHECK: ledbr %f15, %f15
0xb3 0x44 0x00 0xff
-# CHECK: ledbra %f0, 0, %f0, 1
+# CHECK: ledbra %f0, 0, %f0, 1
0xb3 0x44 0x01 0x00
-# CHECK: ledbra %f0, 0, %f0, 15
+# CHECK: ledbra %f0, 0, %f0, 15
0xb3 0x44 0x0f 0x00
-# CHECK: ledbra %f0, 0, %f15, 1
+# CHECK: ledbra %f0, 0, %f15, 1
0xb3 0x44 0x01 0x0f
-# CHECK: ledbra %f0, 15, %f0, 1
+# CHECK: ledbra %f0, 15, %f0, 1
0xb3 0x44 0xf1 0x00
-# CHECK: ledbra %f4, 5, %f6, 7
+# CHECK: ledbra %f4, 5, %f6, 7
0xb3 0x44 0x57 0x46
-# CHECK: ledbra %f15, 0, %f0, 1
+# CHECK: ledbra %f15, 0, %f0, 1
0xb3 0x44 0x01 0xf0
# CHECK: ler %f0, %f9
@@ -5194,27 +6292,6 @@
# CHECK: ler %f15, %f9
0x38 0xf9
-# CHECK: le %f0, 0
-0x78 0x00 0x00 0x00
-
-# CHECK: le %f0, 4095
-0x78 0x00 0x0f 0xff
-
-# CHECK: le %f0, 0(%r1)
-0x78 0x00 0x10 0x00
-
-# CHECK: le %f0, 0(%r15)
-0x78 0x00 0xf0 0x00
-
-# CHECK: le %f0, 4095(%r1,%r15)
-0x78 0x01 0xff 0xff
-
-# CHECK: le %f0, 4095(%r15,%r1)
-0x78 0x0f 0x1f 0xff
-
-# CHECK: le %f15, 0
-0x78 0xf0 0x00 0x00
-
# CHECK: lexbr %f0, %f0
0xb3 0x46 0x00 0x00
@@ -5230,22 +6307,22 @@
# CHECK: lexbr %f13, %f13
0xb3 0x46 0x00 0xdd
-# CHECK: lexbra %f0, 0, %f0, 1
+# CHECK: lexbra %f0, 0, %f0, 1
0xb3 0x46 0x01 0x00
-# CHECK: lexbra %f0, 0, %f0, 15
+# CHECK: lexbra %f0, 0, %f0, 15
0xb3 0x46 0x0f 0x00
-# CHECK: lexbra %f0, 0, %f13, 1
+# CHECK: lexbra %f0, 0, %f13, 1
0xb3 0x46 0x01 0x0d
-# CHECK: lexbra %f0, 15, %f0, 1
+# CHECK: lexbra %f0, 15, %f0, 1
0xb3 0x46 0xf1 0x00
-# CHECK: lexbra %f4, 5, %f8, 9
+# CHECK: lexbra %f4, 5, %f8, 9
0xb3 0x46 0x59 0x48
-# CHECK: lexbra %f13, 0, %f0, 1
+# CHECK: lexbra %f13, 0, %f0, 1
0xb3 0x46 0x01 0xd0
# CHECK: ley %f0, -524288
@@ -5278,6 +6355,24 @@
# CHECK: ley %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x64
+# CHECK: lfas 0
+0xb2 0xbd 0x00 0x00
+
+# CHECK: lfas 0(%r1)
+0xb2 0xbd 0x10 0x00
+
+# CHECK: lfas 0(%r15)
+0xb2 0xbd 0xf0 0x00
+
+# CHECK: lfas 4095
+0xb2 0xbd 0x0f 0xff
+
+# CHECK: lfas 4095(%r1)
+0xb2 0xbd 0x1f 0xff
+
+# CHECK: lfas 4095(%r15)
+0xb2 0xbd 0xff 0xff
+
# CHECK: lfh %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0xca
@@ -5338,24 +6433,6 @@
# CHECK: lfhat %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xc8
-# CHECK: lfas 0
-0xb2 0xbd 0x00 0x00
-
-# CHECK: lfas 0(%r1)
-0xb2 0xbd 0x10 0x00
-
-# CHECK: lfas 0(%r15)
-0xb2 0xbd 0xf0 0x00
-
-# CHECK: lfas 4095
-0xb2 0xbd 0x0f 0xff
-
-# CHECK: lfas 4095(%r1)
-0xb2 0xbd 0x1f 0xff
-
-# CHECK: lfas 4095(%r15)
-0xb2 0xbd 0xff 0xff
-
# CHECK: lfpc 0
0xb2 0x9d 0x00 0x00
@@ -5374,14 +6451,65 @@
# CHECK: lfpc 4095(%r15)
0xb2 0x9d 0xff 0xff
-# CHECK: lgbr %r0, %r15
-0xb9 0x06 0x00 0x0f
+# CHECK: lg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x04
-# CHECK: lgbr %r7, %r8
-0xb9 0x06 0x00 0x78
+# CHECK: lg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x04
-# CHECK: lgbr %r15, %r0
-0xb9 0x06 0x00 0xf0
+# CHECK: lg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x04
+
+# CHECK: lg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x04
+
+# CHECK: lg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x04
+
+# CHECK: lg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x04
+
+# CHECK: lg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x04
+
+# CHECK: lg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x04
+
+# CHECK: lg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x04
+
+# CHECK: lg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x04
+
+# CHECK: lgat %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x85
+
+# CHECK: lgat %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x85
+
+# CHECK: lgat %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x85
+
+# CHECK: lgat %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x85
+
+# CHECK: lgat %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x85
+
+# CHECK: lgat %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x85
+
+# CHECK: lgat %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x85
+
+# CHECK: lgat %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x85
+
+# CHECK: lgat %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x85
+
+# CHECK: lgat %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x85
# CHECK: lgb %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x77
@@ -5413,6 +6541,15 @@
# CHECK: lgb %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x77
+# CHECK: lgbr %r0, %r15
+0xb9 0x06 0x00 0x0f
+
+# CHECK: lgbr %r7, %r8
+0xb9 0x06 0x00 0x78
+
+# CHECK: lgbr %r15, %r0
+0xb9 0x06 0x00 0xf0
+
# CHECK: lgdr %r0, %f0
0xb3 0xcd 0x00 0x00
@@ -5428,33 +6565,6 @@
# CHECK: lgdr %r15, %f15
0xb3 0xcd 0x00 0xff
-# CHECK: lgfi %r0, -2147483648
-0xc0 0x01 0x80 0x00 0x00 0x00
-
-# CHECK: lgfi %r0, -1
-0xc0 0x01 0xff 0xff 0xff 0xff
-
-# CHECK: lgfi %r0, 0
-0xc0 0x01 0x00 0x00 0x00 0x00
-
-# CHECK: lgfi %r0, 1
-0xc0 0x01 0x00 0x00 0x00 0x01
-
-# CHECK: lgfi %r0, 2147483647
-0xc0 0x01 0x7f 0xff 0xff 0xff
-
-# CHECK: lgfi %r15, 0
-0xc0 0xf1 0x00 0x00 0x00 0x00
-
-# CHECK: lgfr %r0, %r15
-0xb9 0x14 0x00 0x0f
-
-# CHECK: lgfr %r7, %r8
-0xb9 0x14 0x00 0x78
-
-# CHECK: lgfr %r15, %r0
-0xb9 0x14 0x00 0xf0
-
# CHECK: lgf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x14
@@ -5485,32 +6595,32 @@
# CHECK: lgf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x14
-# CHECK: lghi %r0, -32768
-0xa7 0x09 0x80 0x00
+# CHECK: lgfi %r0, -2147483648
+0xc0 0x01 0x80 0x00 0x00 0x00
-# CHECK: lghi %r0, -1
-0xa7 0x09 0xff 0xff
+# CHECK: lgfi %r0, -1
+0xc0 0x01 0xff 0xff 0xff 0xff
-# CHECK: lghi %r0, 0
-0xa7 0x09 0x00 0x00
+# CHECK: lgfi %r0, 0
+0xc0 0x01 0x00 0x00 0x00 0x00
-# CHECK: lghi %r0, 1
-0xa7 0x09 0x00 0x01
+# CHECK: lgfi %r0, 1
+0xc0 0x01 0x00 0x00 0x00 0x01
-# CHECK: lghi %r0, 32767
-0xa7 0x09 0x7f 0xff
+# CHECK: lgfi %r0, 2147483647
+0xc0 0x01 0x7f 0xff 0xff 0xff
-# CHECK: lghi %r15, 0
-0xa7 0xf9 0x00 0x00
+# CHECK: lgfi %r15, 0
+0xc0 0xf1 0x00 0x00 0x00 0x00
-# CHECK: lghr %r0, %r15
-0xb9 0x07 0x00 0x0f
+# CHECK: lgfr %r0, %r15
+0xb9 0x14 0x00 0x0f
-# CHECK: lghr %r7, %r8
-0xb9 0x07 0x00 0x78
+# CHECK: lgfr %r7, %r8
+0xb9 0x14 0x00 0x78
-# CHECK: lghr %r15, %r0
-0xb9 0x07 0x00 0xf0
+# CHECK: lgfr %r15, %r0
+0xb9 0x14 0x00 0xf0
# CHECK: lgh %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x15
@@ -5542,95 +6652,65 @@
# CHECK: lgh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x15
-# CHECK: lgr %r0, %r9
-0xb9 0x04 0x00 0x09
-
-# CHECK: lgr %r0, %r15
-0xb9 0x04 0x00 0x0f
-
-# CHECK: lgr %r15, %r0
-0xb9 0x04 0x00 0xf0
-
-# CHECK: lgr %r15, %r9
-0xb9 0x04 0x00 0xf9
-
-# CHECK: lg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x04
-
-# CHECK: lg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x04
-
-# CHECK: lg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x04
-
-# CHECK: lg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x04
-
-# CHECK: lg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x04
-
-# CHECK: lg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x04
-
-# CHECK: lg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x04
+# CHECK: lghi %r0, -32768
+0xa7 0x09 0x80 0x00
-# CHECK: lg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x04
+# CHECK: lghi %r0, -1
+0xa7 0x09 0xff 0xff
-# CHECK: lg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x04
+# CHECK: lghi %r0, 0
+0xa7 0x09 0x00 0x00
-# CHECK: lg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x04
+# CHECK: lghi %r0, 1
+0xa7 0x09 0x00 0x01
-# CHECK: lgat %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x85
+# CHECK: lghi %r0, 32767
+0xa7 0x09 0x7f 0xff
-# CHECK: lgat %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x85
+# CHECK: lghi %r15, 0
+0xa7 0xf9 0x00 0x00
-# CHECK: lgat %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x85
+# CHECK: lghr %r0, %r15
+0xb9 0x07 0x00 0x0f
-# CHECK: lgat %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x85
+# CHECK: lghr %r7, %r8
+0xb9 0x07 0x00 0x78
-# CHECK: lgat %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x85
+# CHECK: lghr %r15, %r0
+0xb9 0x07 0x00 0xf0
-# CHECK: lgat %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x85
+# CHECK: lgr %r0, %r9
+0xb9 0x04 0x00 0x09
-# CHECK: lgat %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x85
+# CHECK: lgr %r0, %r15
+0xb9 0x04 0x00 0x0f
-# CHECK: lgat %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x85
+# CHECK: lgr %r15, %r0
+0xb9 0x04 0x00 0xf0
-# CHECK: lgat %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x85
+# CHECK: lgr %r15, %r9
+0xb9 0x04 0x00 0xf9
-# CHECK: lgat %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x85
+# CHECK: lh %r0, 0
+0x48 0x00 0x00 0x00
-# CHECK: lhi %r0, -32768
-0xa7 0x08 0x80 0x00
+# CHECK: lh %r0, 4095
+0x48 0x00 0x0f 0xff
-# CHECK: lhi %r0, -1
-0xa7 0x08 0xff 0xff
+# CHECK: lh %r0, 0(%r1)
+0x48 0x00 0x10 0x00
-# CHECK: lhi %r0, 0
-0xa7 0x08 0x00 0x00
+# CHECK: lh %r0, 0(%r15)
+0x48 0x00 0xf0 0x00
-# CHECK: lhi %r0, 1
-0xa7 0x08 0x00 0x01
+# CHECK: lh %r0, 4095(%r1,%r15)
+0x48 0x01 0xff 0xff
-# CHECK: lhi %r0, 32767
-0xa7 0x08 0x7f 0xff
+# CHECK: lh %r0, 4095(%r15,%r1)
+0x48 0x0f 0x1f 0xff
-# CHECK: lhi %r15, 0
-0xa7 0xf8 0x00 0x00
+# CHECK: lh %r15, 0
+0x48 0xf0 0x00 0x00
# CHECK: lhh %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0xc4
@@ -5662,35 +6742,32 @@
# CHECK: lhh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xc4
-# CHECK: lhr %r0, %r15
-0xb9 0x27 0x00 0x0f
-
-# CHECK: lhr %r7, %r8
-0xb9 0x27 0x00 0x78
+# CHECK: lhi %r0, -32768
+0xa7 0x08 0x80 0x00
-# CHECK: lhr %r15, %r0
-0xb9 0x27 0x00 0xf0
+# CHECK: lhi %r0, -1
+0xa7 0x08 0xff 0xff
-# CHECK: lh %r0, 0
-0x48 0x00 0x00 0x00
+# CHECK: lhi %r0, 0
+0xa7 0x08 0x00 0x00
-# CHECK: lh %r0, 4095
-0x48 0x00 0x0f 0xff
+# CHECK: lhi %r0, 1
+0xa7 0x08 0x00 0x01
-# CHECK: lh %r0, 0(%r1)
-0x48 0x00 0x10 0x00
+# CHECK: lhi %r0, 32767
+0xa7 0x08 0x7f 0xff
-# CHECK: lh %r0, 0(%r15)
-0x48 0x00 0xf0 0x00
+# CHECK: lhi %r15, 0
+0xa7 0xf8 0x00 0x00
-# CHECK: lh %r0, 4095(%r1,%r15)
-0x48 0x01 0xff 0xff
+# CHECK: lhr %r0, %r15
+0xb9 0x27 0x00 0x0f
-# CHECK: lh %r0, 4095(%r15,%r1)
-0x48 0x0f 0x1f 0xff
+# CHECK: lhr %r7, %r8
+0xb9 0x27 0x00 0x78
-# CHECK: lh %r15, 0
-0x48 0xf0 0x00 0x00
+# CHECK: lhr %r15, %r0
+0xb9 0x27 0x00 0xf0
# CHECK: lhy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x78
@@ -5722,15 +6799,6 @@
# CHECK: lhy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x78
-# CHECK: llcr %r0, %r15
-0xb9 0x94 0x00 0x0f
-
-# CHECK: llcr %r7, %r8
-0xb9 0x94 0x00 0x78
-
-# CHECK: llcr %r15, %r0
-0xb9 0x94 0x00 0xf0
-
# CHECK: llc %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x94
@@ -5791,14 +6859,14 @@
# CHECK: llch %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xc2
-# CHECK: llgcr %r0, %r15
-0xb9 0x84 0x00 0x0f
+# CHECK: llcr %r0, %r15
+0xb9 0x94 0x00 0x0f
-# CHECK: llgcr %r7, %r8
-0xb9 0x84 0x00 0x78
+# CHECK: llcr %r7, %r8
+0xb9 0x94 0x00 0x78
-# CHECK: llgcr %r15, %r0
-0xb9 0x84 0x00 0xf0
+# CHECK: llcr %r15, %r0
+0xb9 0x94 0x00 0xf0
# CHECK: llgc %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x90
@@ -5830,14 +6898,14 @@
# CHECK: llgc %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x90
-# CHECK: llgfr %r0, %r15
-0xb9 0x16 0x00 0x0f
+# CHECK: llgcr %r0, %r15
+0xb9 0x84 0x00 0x0f
-# CHECK: llgfr %r7, %r8
-0xb9 0x16 0x00 0x78
+# CHECK: llgcr %r7, %r8
+0xb9 0x84 0x00 0x78
-# CHECK: llgfr %r15, %r0
-0xb9 0x16 0x00 0xf0
+# CHECK: llgcr %r15, %r0
+0xb9 0x84 0x00 0xf0
# CHECK: llgf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x16
@@ -5899,14 +6967,53 @@
# CHECK: llgfat %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x9d
-# CHECK: llgtr %r0, %r15
-0xb9 0x17 0x00 0x0f
+# CHECK: llgfr %r0, %r15
+0xb9 0x16 0x00 0x0f
-# CHECK: llgtr %r7, %r8
-0xb9 0x17 0x00 0x78
+# CHECK: llgfr %r7, %r8
+0xb9 0x16 0x00 0x78
-# CHECK: llgtr %r15, %r0
-0xb9 0x17 0x00 0xf0
+# CHECK: llgfr %r15, %r0
+0xb9 0x16 0x00 0xf0
+
+# CHECK: llgh %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x91
+
+# CHECK: llgh %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x91
+
+# CHECK: llgh %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x91
+
+# CHECK: llgh %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x91
+
+# CHECK: llgh %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x91
+
+# CHECK: llgh %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x91
+
+# CHECK: llgh %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x91
+
+# CHECK: llgh %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x91
+
+# CHECK: llgh %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x91
+
+# CHECK: llgh %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x91
+
+# CHECK: llghr %r0, %r15
+0xb9 0x85 0x00 0x0f
+
+# CHECK: llghr %r7, %r8
+0xb9 0x85 0x00 0x78
+
+# CHECK: llghr %r15, %r0
+0xb9 0x85 0x00 0xf0
# CHECK: llgt %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x17
@@ -5968,53 +7075,14 @@
# CHECK: llgtat %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x9c
-# CHECK: llghr %r0, %r15
-0xb9 0x85 0x00 0x0f
-
-# CHECK: llghr %r7, %r8
-0xb9 0x85 0x00 0x78
-
-# CHECK: llghr %r15, %r0
-0xb9 0x85 0x00 0xf0
-
-# CHECK: llgh %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x91
-
-# CHECK: llgh %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x91
-
-# CHECK: llgh %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x91
-
-# CHECK: llgh %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x91
-
-# CHECK: llgh %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x91
-
-# CHECK: llgh %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x91
-
-# CHECK: llgh %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x91
-
-# CHECK: llgh %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x91
-
-# CHECK: llgh %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x91
-
-# CHECK: llgh %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x91
-
-# CHECK: llhr %r0, %r15
-0xb9 0x95 0x00 0x0f
+# CHECK: llgtr %r0, %r15
+0xb9 0x17 0x00 0x0f
-# CHECK: llhr %r7, %r8
-0xb9 0x95 0x00 0x78
+# CHECK: llgtr %r7, %r8
+0xb9 0x17 0x00 0x78
-# CHECK: llhr %r15, %r0
-0xb9 0x95 0x00 0xf0
+# CHECK: llgtr %r15, %r0
+0xb9 0x17 0x00 0xf0
# CHECK: llh %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x95
@@ -6076,6 +7144,15 @@
# CHECK: llhh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xc6
+# CHECK: llhr %r0, %r15
+0xb9 0x95 0x00 0x0f
+
+# CHECK: llhr %r7, %r8
+0xb9 0x95 0x00 0x78
+
+# CHECK: llhr %r15, %r0
+0xb9 0x95 0x00 0xf0
+
# CHECK: llihf %r0, 0
0xc0 0x0e 0x00 0x00 0x00 0x00
@@ -6172,6 +7249,27 @@
# CHECK: lm %r0, %r0, 4095(%r15)
0x98 0x00 0xff 0xff
+# CHECK: lmd %r0, %r0, 0, 0
+0xef 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: lmd %r2, %r4, 0, 4095
+0xef 0x24 0x00 0x00 0x0f 0xff
+
+# CHECK: lmd %r2, %r4, 0, 0(%r1)
+0xef 0x24 0x00 0x00 0x10 0x00
+
+# CHECK: lmd %r2, %r4, 0, 0(%r15)
+0xef 0x24 0x00 0x00 0xf0 0x00
+
+# CHECK: lmd %r2, %r4, 0(%r1), 4095(%r15)
+0xef 0x24 0x10 0x00 0xff 0xff
+
+# CHECK: lmd %r2, %r4, 0(%r1), 0(%r15)
+0xef 0x24 0x10 0x00 0xf0 0x00
+
+# CHECK: lmd %r2, %r4, 4095(%r1), 0(%r15)
+0xef 0x24 0x1f 0xff 0xf0 0x00
+
# CHECK: lmg %r0, %r0, 0
0xeb 0x00 0x00 0x00 0x00 0x04
@@ -6574,6 +7672,18 @@
# CHECK: lpd %r2, 4095(%r1), 0(%r15)
0xc8 0x24 0x1f 0xff 0xf0 0x00
+# CHECK: lpdbr %f0, %f9
+0xb3 0x10 0x00 0x09
+
+# CHECK: lpdbr %f0, %f15
+0xb3 0x10 0x00 0x0f
+
+# CHECK: lpdbr %f15, %f0
+0xb3 0x10 0x00 0xf0
+
+# CHECK: lpdbr %f15, %f9
+0xb3 0x10 0x00 0xf9
+
# CHECK: lpdg %r0, 0, 0
0xc8 0x05 0x00 0x00 0x00 0x00
@@ -6595,18 +7705,6 @@
# CHECK: lpdg %r2, 4095(%r1), 0(%r15)
0xc8 0x25 0x1f 0xff 0xf0 0x00
-# CHECK: lpdbr %f0, %f9
-0xb3 0x10 0x00 0x09
-
-# CHECK: lpdbr %f0, %f15
-0xb3 0x10 0x00 0x0f
-
-# CHECK: lpdbr %f15, %f0
-0xb3 0x10 0x00 0xf0
-
-# CHECK: lpdbr %f15, %f9
-0xb3 0x10 0x00 0xf9
-
# CHECK: lpebr %f0, %f9
0xb3 0x00 0x00 0x09
@@ -6643,18 +7741,6 @@
# CHECK: lpgr %r7, %r8
0xb9 0x00 0x00 0x78
-# CHECK: lpr %r0, %r0
-0x10 0x00
-
-# CHECK: lpr %r0, %r15
-0x10 0x0f
-
-# CHECK: lpr %r15, %r0
-0x10 0xf0
-
-# CHECK: lpr %r7, %r8
-0x10 0x78
-
# CHECK: lpq %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x8f
@@ -6685,6 +7771,18 @@
# CHECK: lpq %r14, 0
0xe3 0xe0 0x00 0x00 0x00 0x8f
+# CHECK: lpr %r0, %r0
+0x10 0x00
+
+# CHECK: lpr %r0, %r15
+0x10 0x0f
+
+# CHECK: lpr %r15, %r0
+0x10 0xf0
+
+# CHECK: lpr %r7, %r8
+0x10 0x78
+
# CHECK: lpxbr %f0, %f8
0xb3 0x40 0x00 0x08
@@ -6709,20 +7807,35 @@
# CHECK: lr %r15, %r9
0x18 0xf9
-# CHECK: lrvgr %r0, %r0
-0xb9 0x0f 0x00 0x00
+# CHECK: lrv %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x1e
-# CHECK: lrvgr %r0, %r15
-0xb9 0x0f 0x00 0x0f
+# CHECK: lrv %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x1e
-# CHECK: lrvgr %r15, %r0
-0xb9 0x0f 0x00 0xf0
+# CHECK: lrv %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x1e
-# CHECK: lrvgr %r7, %r8
-0xb9 0x0f 0x00 0x78
+# CHECK: lrv %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x1e
-# CHECK: lrvgr %r15, %r15
-0xb9 0x0f 0x00 0xff
+# CHECK: lrv %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x1e
+
+# CHECK: lrv %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x1e
+
+# CHECK: lrv %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x1e
+
+# CHECK: lrv %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x1e
+
+# CHECK: lrv %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x1e
+
+# CHECK: lrv %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x1e
# CHECK: lrvg %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x0f
@@ -6754,20 +7867,20 @@
# CHECK: lrvg %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x0f
-# CHECK: lrvr %r0, %r0
-0xb9 0x1f 0x00 0x00
+# CHECK: lrvgr %r0, %r0
+0xb9 0x0f 0x00 0x00
-# CHECK: lrvr %r0, %r15
-0xb9 0x1f 0x00 0x0f
+# CHECK: lrvgr %r0, %r15
+0xb9 0x0f 0x00 0x0f
-# CHECK: lrvr %r15, %r0
-0xb9 0x1f 0x00 0xf0
+# CHECK: lrvgr %r15, %r0
+0xb9 0x0f 0x00 0xf0
-# CHECK: lrvr %r7, %r8
-0xb9 0x1f 0x00 0x78
+# CHECK: lrvgr %r7, %r8
+0xb9 0x0f 0x00 0x78
-# CHECK: lrvr %r15, %r15
-0xb9 0x1f 0x00 0xff
+# CHECK: lrvgr %r15, %r15
+0xb9 0x0f 0x00 0xff
# CHECK: lrvh %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x1f
@@ -6799,86 +7912,20 @@
# CHECK: lrvh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x1f
-# CHECK: lrv %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x1e
-
-# CHECK: lrv %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x1e
-
-# CHECK: lrv %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x1e
-
-# CHECK: lrv %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x1e
-
-# CHECK: lrv %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x1e
-
-# CHECK: lrv %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x1e
-
-# CHECK: lrv %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x1e
-
-# CHECK: lrv %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x1e
-
-# CHECK: lrv %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x1e
-
-# CHECK: lrv %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x1e
-
-# CHECK: l %r0, 0
-0x58 0x00 0x00 0x00
-
-# CHECK: l %r0, 4095
-0x58 0x00 0x0f 0xff
-
-# CHECK: l %r0, 0(%r1)
-0x58 0x00 0x10 0x00
-
-# CHECK: l %r0, 0(%r15)
-0x58 0x00 0xf0 0x00
-
-# CHECK: l %r0, 4095(%r1,%r15)
-0x58 0x01 0xff 0xff
-
-# CHECK: l %r0, 4095(%r15,%r1)
-0x58 0x0f 0x1f 0xff
-
-# CHECK: l %r15, 0
-0x58 0xf0 0x00 0x00
-
-# CHECK: lat %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x9f
-
-# CHECK: lat %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x9f
-
-# CHECK: lat %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x9f
-
-# CHECK: lat %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x9f
-
-# CHECK: lat %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x9f
-
-# CHECK: lat %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x9f
+# CHECK: lrvr %r0, %r0
+0xb9 0x1f 0x00 0x00
-# CHECK: lat %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x9f
+# CHECK: lrvr %r0, %r15
+0xb9 0x1f 0x00 0x0f
-# CHECK: lat %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x9f
+# CHECK: lrvr %r15, %r0
+0xb9 0x1f 0x00 0xf0
-# CHECK: lat %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x9f
+# CHECK: lrvr %r7, %r8
+0xb9 0x1f 0x00 0x78
-# CHECK: lat %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x9f
+# CHECK: lrvr %r15, %r15
+0xb9 0x1f 0x00 0xff
# CHECK: lt %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x12
@@ -7111,23 +8158,26 @@
# CHECK: lzxr %f13
0xb3 0x76 0x00 0xd0
-# CHECK: madbr %f0, %f0, %f0
-0xb3 0x1e 0x00 0x00
+# CHECK: m %r0, 0
+0x5c 0x00 0x00 0x00
-# CHECK: madbr %f0, %f0, %f15
-0xb3 0x1e 0x00 0x0f
+# CHECK: m %r0, 4095
+0x5c 0x00 0x0f 0xff
-# CHECK: madbr %f0, %f15, %f0
-0xb3 0x1e 0x00 0xf0
+# CHECK: m %r0, 0(%r1)
+0x5c 0x00 0x10 0x00
-# CHECK: madbr %f15, %f0, %f0
-0xb3 0x1e 0xf0 0x00
+# CHECK: m %r0, 0(%r15)
+0x5c 0x00 0xf0 0x00
-# CHECK: madbr %f7, %f8, %f9
-0xb3 0x1e 0x70 0x89
+# CHECK: m %r0, 4095(%r1,%r15)
+0x5c 0x01 0xff 0xff
-# CHECK: madbr %f15, %f15, %f15
-0xb3 0x1e 0xf0 0xff
+# CHECK: m %r0, 4095(%r15,%r1)
+0x5c 0x0f 0x1f 0xff
+
+# CHECK: m %r14, 0
+0x5c 0xe0 0x00 0x00
# CHECK: madb %f0, %f0, 0
0xed 0x00 0x00 0x00 0x00 0x1e
@@ -7156,23 +8206,23 @@
# CHECK: madb %f15, %f15, 0
0xed 0xf0 0x00 0x00 0xf0 0x1e
-# CHECK: maebr %f0, %f0, %f0
-0xb3 0x0e 0x00 0x00
+# CHECK: madbr %f0, %f0, %f0
+0xb3 0x1e 0x00 0x00
-# CHECK: maebr %f0, %f0, %f15
-0xb3 0x0e 0x00 0x0f
+# CHECK: madbr %f0, %f0, %f15
+0xb3 0x1e 0x00 0x0f
-# CHECK: maebr %f0, %f15, %f0
-0xb3 0x0e 0x00 0xf0
+# CHECK: madbr %f0, %f15, %f0
+0xb3 0x1e 0x00 0xf0
-# CHECK: maebr %f15, %f0, %f0
-0xb3 0x0e 0xf0 0x00
+# CHECK: madbr %f15, %f0, %f0
+0xb3 0x1e 0xf0 0x00
-# CHECK: maebr %f7, %f8, %f9
-0xb3 0x0e 0x70 0x89
+# CHECK: madbr %f7, %f8, %f9
+0xb3 0x1e 0x70 0x89
-# CHECK: maebr %f15, %f15, %f15
-0xb3 0x0e 0xf0 0xff
+# CHECK: madbr %f15, %f15, %f15
+0xb3 0x1e 0xf0 0xff
# CHECK: maeb %f0, %f0, 0
0xed 0x00 0x00 0x00 0x00 0x0e
@@ -7201,17 +8251,44 @@
# CHECK: maeb %f15, %f15, 0
0xed 0xf0 0x00 0x00 0xf0 0x0e
-# CHECK: mdbr %f0, %f0
-0xb3 0x1c 0x00 0x00
+# CHECK: maebr %f0, %f0, %f0
+0xb3 0x0e 0x00 0x00
-# CHECK: mdbr %f0, %f15
-0xb3 0x1c 0x00 0x0f
+# CHECK: maebr %f0, %f0, %f15
+0xb3 0x0e 0x00 0x0f
-# CHECK: mdbr %f7, %f8
-0xb3 0x1c 0x00 0x78
+# CHECK: maebr %f0, %f15, %f0
+0xb3 0x0e 0x00 0xf0
-# CHECK: mdbr %f15, %f0
-0xb3 0x1c 0x00 0xf0
+# CHECK: maebr %f15, %f0, %f0
+0xb3 0x0e 0xf0 0x00
+
+# CHECK: maebr %f7, %f8, %f9
+0xb3 0x0e 0x70 0x89
+
+# CHECK: maebr %f15, %f15, %f15
+0xb3 0x0e 0xf0 0xff
+
+# CHECK: mc 0, 0
+0xaf 0x00 0x00 0x00
+
+# CHECK: mc 4095, 0
+0xaf 0x00 0x0f 0xff
+
+# CHECK: mc 0, 255
+0xaf 0xff 0x00 0x00
+
+# CHECK: mc 0(%r1), 42
+0xaf 0x2a 0x10 0x00
+
+# CHECK: mc 0(%r15), 42
+0xaf 0x2a 0xf0 0x00
+
+# CHECK: mc 4095(%r1), 42
+0xaf 0x2a 0x1f 0xff
+
+# CHECK: mc 4095(%r15), 42
+0xaf 0x2a 0xff 0xff
# CHECK: mdb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x1c
@@ -7234,17 +8311,17 @@
# CHECK: mdb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x1c
-# CHECK: mdebr %f0, %f0
-0xb3 0x0c 0x00 0x00
+# CHECK: mdbr %f0, %f0
+0xb3 0x1c 0x00 0x00
-# CHECK: mdebr %f0, %f15
-0xb3 0x0c 0x00 0x0f
+# CHECK: mdbr %f0, %f15
+0xb3 0x1c 0x00 0x0f
-# CHECK: mdebr %f7, %f8
-0xb3 0x0c 0x00 0x78
+# CHECK: mdbr %f7, %f8
+0xb3 0x1c 0x00 0x78
-# CHECK: mdebr %f15, %f0
-0xb3 0x0c 0x00 0xf0
+# CHECK: mdbr %f15, %f0
+0xb3 0x1c 0x00 0xf0
# CHECK: mdeb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x0c
@@ -7267,17 +8344,17 @@
# CHECK: mdeb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x0c
-# CHECK: meebr %f0, %f0
-0xb3 0x17 0x00 0x00
+# CHECK: mdebr %f0, %f0
+0xb3 0x0c 0x00 0x00
-# CHECK: meebr %f0, %f15
-0xb3 0x17 0x00 0x0f
+# CHECK: mdebr %f0, %f15
+0xb3 0x0c 0x00 0x0f
-# CHECK: meebr %f7, %f8
-0xb3 0x17 0x00 0x78
+# CHECK: mdebr %f7, %f8
+0xb3 0x0c 0x00 0x78
-# CHECK: meebr %f15, %f0
-0xb3 0x17 0x00 0xf0
+# CHECK: mdebr %f15, %f0
+0xb3 0x0c 0x00 0xf0
# CHECK: meeb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x17
@@ -7300,6 +8377,48 @@
# CHECK: meeb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x17
+# CHECK: meebr %f0, %f0
+0xb3 0x17 0x00 0x00
+
+# CHECK: meebr %f0, %f15
+0xb3 0x17 0x00 0x0f
+
+# CHECK: meebr %f7, %f8
+0xb3 0x17 0x00 0x78
+
+# CHECK: meebr %f15, %f0
+0xb3 0x17 0x00 0xf0
+
+# CHECK: mfy %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x5c
+
+# CHECK: mfy %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x5c
+
+# CHECK: mfy %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x5c
+
+# CHECK: mfy %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x5c
+
+# CHECK: mfy %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x5c
+
+# CHECK: mfy %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x5c
+
+# CHECK: mfy %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x5c
+
+# CHECK: mfy %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x5c
+
+# CHECK: mfy %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x5c
+
+# CHECK: mfy %r14, 0
+0xe3 0xe0 0x00 0x00 0x00 0x5c
+
# CHECK: mghi %r0, -32768
0xa7 0x0d 0x80 0x00
@@ -7318,24 +8437,6 @@
# CHECK: mghi %r15, 0
0xa7 0xfd 0x00 0x00
-# CHECK: mhi %r0, -32768
-0xa7 0x0c 0x80 0x00
-
-# CHECK: mhi %r0, -1
-0xa7 0x0c 0xff 0xff
-
-# CHECK: mhi %r0, 0
-0xa7 0x0c 0x00 0x00
-
-# CHECK: mhi %r0, 1
-0xa7 0x0c 0x00 0x01
-
-# CHECK: mhi %r0, 32767
-0xa7 0x0c 0x7f 0xff
-
-# CHECK: mhi %r15, 0
-0xa7 0xfc 0x00 0x00
-
# CHECK: mh %r0, 0
0x4c 0x00 0x00 0x00
@@ -7357,6 +8458,24 @@
# CHECK: mh %r15, 0
0x4c 0xf0 0x00 0x00
+# CHECK: mhi %r0, -32768
+0xa7 0x0c 0x80 0x00
+
+# CHECK: mhi %r0, -1
+0xa7 0x0c 0xff 0xff
+
+# CHECK: mhi %r0, 0
+0xa7 0x0c 0x00 0x00
+
+# CHECK: mhi %r0, 1
+0xa7 0x0c 0x00 0x01
+
+# CHECK: mhi %r0, 32767
+0xa7 0x0c 0x7f 0xff
+
+# CHECK: mhi %r15, 0
+0xa7 0xfc 0x00 0x00
+
# CHECK: mhy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x7c
@@ -7387,17 +8506,35 @@
# CHECK: mhy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x7c
-# CHECK: mlgr %r0, %r0
-0xb9 0x86 0x00 0x00
+# CHECK: ml %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x96
-# CHECK: mlgr %r0, %r15
-0xb9 0x86 0x00 0x0f
+# CHECK: ml %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x96
-# CHECK: mlgr %r14, %r0
-0xb9 0x86 0x00 0xe0
+# CHECK: ml %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x96
-# CHECK: mlgr %r6, %r9
-0xb9 0x86 0x00 0x69
+# CHECK: ml %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x96
+
+# CHECK: ml %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x96
+
+# CHECK: ml %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x96
+
+# CHECK: ml %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x96
+
+# CHECK: ml %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x96
+
+# CHECK: ml %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x96
+
+# CHECK: ml %r14, 0
+0xe3 0xe0 0x00 0x00 0x00 0x96
# CHECK: mlg %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x86
@@ -7429,23 +8566,104 @@
# CHECK: mlg %r14, 0
0xe3 0xe0 0x00 0x00 0x00 0x86
-# CHECK: msdbr %f0, %f0, %f0
-0xb3 0x1f 0x00 0x00
+# CHECK: mlgr %r0, %r0
+0xb9 0x86 0x00 0x00
-# CHECK: msdbr %f0, %f0, %f15
-0xb3 0x1f 0x00 0x0f
+# CHECK: mlgr %r0, %r15
+0xb9 0x86 0x00 0x0f
-# CHECK: msdbr %f0, %f15, %f0
-0xb3 0x1f 0x00 0xf0
+# CHECK: mlgr %r14, %r0
+0xb9 0x86 0x00 0xe0
-# CHECK: msdbr %f15, %f0, %f0
-0xb3 0x1f 0xf0 0x00
+# CHECK: mlgr %r6, %r9
+0xb9 0x86 0x00 0x69
-# CHECK: msdbr %f7, %f8, %f9
-0xb3 0x1f 0x70 0x89
+# CHECK: mlr %r0, %r0
+0xb9 0x96 0x00 0x00
-# CHECK: msdbr %f15, %f15, %f15
-0xb3 0x1f 0xf0 0xff
+# CHECK: mlr %r0, %r15
+0xb9 0x96 0x00 0x0f
+
+# CHECK: mlr %r14, %r0
+0xb9 0x96 0x00 0xe0
+
+# CHECK: mlr %r6, %r9
+0xb9 0x96 0x00 0x69
+
+# CHECK: mp 0(1), 0(1)
+0xfc 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: mp 0(1), 0(1,%r1)
+0xfc 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: mp 0(1), 0(1,%r15)
+0xfc 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: mp 0(1), 4095(1)
+0xfc 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: mp 0(1), 4095(1,%r1)
+0xfc 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: mp 0(1), 4095(1,%r15)
+0xfc 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: mp 0(1,%r1), 0(1)
+0xfc 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: mp 0(1,%r15), 0(1)
+0xfc 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: mp 4095(1,%r1), 0(1)
+0xfc 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: mp 4095(1,%r15), 0(1)
+0xfc 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: mp 0(16,%r1), 0(1)
+0xfc 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: mp 0(16,%r15), 0(1)
+0xfc 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: mp 0(1), 0(16,%r1)
+0xfc 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: mp 0(1), 0(16,%r15)
+0xfc 0x0f 0x00 0x00 0xf0 0x00
+
+# CHECK: mr %r0, %r0
+0x1c 0x00
+
+# CHECK: mr %r0, %r15
+0x1c 0x0f
+
+# CHECK: mr %r14, %r0
+0x1c 0xe0
+
+# CHECK: mr %r6, %r9
+0x1c 0x69
+
+# CHECK: ms %r0, 0
+0x71 0x00 0x00 0x00
+
+# CHECK: ms %r0, 4095
+0x71 0x00 0x0f 0xff
+
+# CHECK: ms %r0, 0(%r1)
+0x71 0x00 0x10 0x00
+
+# CHECK: ms %r0, 0(%r15)
+0x71 0x00 0xf0 0x00
+
+# CHECK: ms %r0, 4095(%r1,%r15)
+0x71 0x01 0xff 0xff
+
+# CHECK: ms %r0, 4095(%r15,%r1)
+0x71 0x0f 0x1f 0xff
+
+# CHECK: ms %r15, 0
+0x71 0xf0 0x00 0x00
# CHECK: msdb %f0, %f0, 0
0xed 0x00 0x00 0x00 0x00 0x1f
@@ -7474,23 +8692,23 @@
# CHECK: msdb %f15, %f15, 0
0xed 0xf0 0x00 0x00 0xf0 0x1f
-# CHECK: msebr %f0, %f0, %f0
-0xb3 0x0f 0x00 0x00
+# CHECK: msdbr %f0, %f0, %f0
+0xb3 0x1f 0x00 0x00
-# CHECK: msebr %f0, %f0, %f15
-0xb3 0x0f 0x00 0x0f
+# CHECK: msdbr %f0, %f0, %f15
+0xb3 0x1f 0x00 0x0f
-# CHECK: msebr %f0, %f15, %f0
-0xb3 0x0f 0x00 0xf0
+# CHECK: msdbr %f0, %f15, %f0
+0xb3 0x1f 0x00 0xf0
-# CHECK: msebr %f15, %f0, %f0
-0xb3 0x0f 0xf0 0x00
+# CHECK: msdbr %f15, %f0, %f0
+0xb3 0x1f 0xf0 0x00
-# CHECK: msebr %f7, %f8, %f9
-0xb3 0x0f 0x70 0x89
+# CHECK: msdbr %f7, %f8, %f9
+0xb3 0x1f 0x70 0x89
-# CHECK: msebr %f15, %f15, %f15
-0xb3 0x0f 0xf0 0xff
+# CHECK: msdbr %f15, %f15, %f15
+0xb3 0x1f 0xf0 0xff
# CHECK: mseb %f0, %f0, 0
0xed 0x00 0x00 0x00 0x00 0x0f
@@ -7519,6 +8737,24 @@
# CHECK: mseb %f15, %f15, 0
0xed 0xf0 0x00 0x00 0xf0 0x0f
+# CHECK: msebr %f0, %f0, %f0
+0xb3 0x0f 0x00 0x00
+
+# CHECK: msebr %f0, %f0, %f15
+0xb3 0x0f 0x00 0x0f
+
+# CHECK: msebr %f0, %f15, %f0
+0xb3 0x0f 0x00 0xf0
+
+# CHECK: msebr %f15, %f0, %f0
+0xb3 0x0f 0xf0 0x00
+
+# CHECK: msebr %f7, %f8, %f9
+0xb3 0x0f 0x70 0x89
+
+# CHECK: msebr %f15, %f15, %f15
+0xb3 0x0f 0xf0 0xff
+
# CHECK: msfi %r0, -2147483648
0xc2 0x01 0x80 0x00 0x00 0x00
@@ -7537,35 +8773,35 @@
# CHECK: msfi %r15, 0
0xc2 0xf1 0x00 0x00 0x00 0x00
-# CHECK: msgfi %r0, -2147483648
-0xc2 0x00 0x80 0x00 0x00 0x00
+# CHECK: msg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x0c
-# CHECK: msgfi %r0, -1
-0xc2 0x00 0xff 0xff 0xff 0xff
+# CHECK: msg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x0c
-# CHECK: msgfi %r0, 0
-0xc2 0x00 0x00 0x00 0x00 0x00
+# CHECK: msg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x0c
-# CHECK: msgfi %r0, 1
-0xc2 0x00 0x00 0x00 0x00 0x01
+# CHECK: msg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x0c
-# CHECK: msgfi %r0, 2147483647
-0xc2 0x00 0x7f 0xff 0xff 0xff
+# CHECK: msg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x0c
-# CHECK: msgfi %r15, 0
-0xc2 0xf0 0x00 0x00 0x00 0x00
+# CHECK: msg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x0c
-# CHECK: msgfr %r0, %r0
-0xb9 0x1c 0x00 0x00
+# CHECK: msg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x0c
-# CHECK: msgfr %r0, %r15
-0xb9 0x1c 0x00 0x0f
+# CHECK: msg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x0c
-# CHECK: msgfr %r15, %r0
-0xb9 0x1c 0x00 0xf0
+# CHECK: msg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x0c
-# CHECK: msgfr %r7, %r8
-0xb9 0x1c 0x00 0x78
+# CHECK: msg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x0c
# CHECK: msgf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x1c
@@ -7597,47 +8833,47 @@
# CHECK: msgf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x1c
-# CHECK: msgr %r0, %r0
-0xb9 0x0c 0x00 0x00
+# CHECK: msgfi %r0, -2147483648
+0xc2 0x00 0x80 0x00 0x00 0x00
-# CHECK: msgr %r0, %r15
-0xb9 0x0c 0x00 0x0f
+# CHECK: msgfi %r0, -1
+0xc2 0x00 0xff 0xff 0xff 0xff
-# CHECK: msgr %r15, %r0
-0xb9 0x0c 0x00 0xf0
+# CHECK: msgfi %r0, 0
+0xc2 0x00 0x00 0x00 0x00 0x00
-# CHECK: msgr %r7, %r8
-0xb9 0x0c 0x00 0x78
+# CHECK: msgfi %r0, 1
+0xc2 0x00 0x00 0x00 0x00 0x01
-# CHECK: msg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x0c
+# CHECK: msgfi %r0, 2147483647
+0xc2 0x00 0x7f 0xff 0xff 0xff
-# CHECK: msg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x0c
+# CHECK: msgfi %r15, 0
+0xc2 0xf0 0x00 0x00 0x00 0x00
-# CHECK: msg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x0c
+# CHECK: msgfr %r0, %r0
+0xb9 0x1c 0x00 0x00
-# CHECK: msg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x0c
+# CHECK: msgfr %r0, %r15
+0xb9 0x1c 0x00 0x0f
-# CHECK: msg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x0c
+# CHECK: msgfr %r15, %r0
+0xb9 0x1c 0x00 0xf0
-# CHECK: msg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x0c
+# CHECK: msgfr %r7, %r8
+0xb9 0x1c 0x00 0x78
-# CHECK: msg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x0c
+# CHECK: msgr %r0, %r0
+0xb9 0x0c 0x00 0x00
-# CHECK: msg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x0c
+# CHECK: msgr %r0, %r15
+0xb9 0x0c 0x00 0x0f
-# CHECK: msg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x0c
+# CHECK: msgr %r15, %r0
+0xb9 0x0c 0x00 0xf0
-# CHECK: msg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x0c
+# CHECK: msgr %r7, %r8
+0xb9 0x0c 0x00 0x78
# CHECK: msr %r0, %r0
0xb2 0x52 0x00 0x00
@@ -7651,27 +8887,6 @@
# CHECK: msr %r7, %r8
0xb2 0x52 0x00 0x78
-# CHECK: ms %r0, 0
-0x71 0x00 0x00 0x00
-
-# CHECK: ms %r0, 4095
-0x71 0x00 0x0f 0xff
-
-# CHECK: ms %r0, 0(%r1)
-0x71 0x00 0x10 0x00
-
-# CHECK: ms %r0, 0(%r15)
-0x71 0x00 0xf0 0x00
-
-# CHECK: ms %r0, 4095(%r1,%r15)
-0x71 0x01 0xff 0xff
-
-# CHECK: ms %r0, 4095(%r15,%r1)
-0x71 0x0f 0x1f 0xff
-
-# CHECK: ms %r15, 0
-0x71 0xf0 0x00 0x00
-
# CHECK: msy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x51
@@ -7738,6 +8953,42 @@
# CHECK: mvc 0(256,%r15), 0
0xd2 0xff 0xf0 0x00 0x00 0x00
+# CHECK: mvcin 0(1), 0
+0xe8 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: mvcin 0(1), 0(%r1)
+0xe8 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: mvcin 0(1), 0(%r15)
+0xe8 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: mvcin 0(1), 4095
+0xe8 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: mvcin 0(1), 4095(%r1)
+0xe8 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: mvcin 0(1), 4095(%r15)
+0xe8 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: mvcin 0(1,%r1), 0
+0xe8 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: mvcin 0(1,%r15), 0
+0xe8 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: mvcin 4095(1,%r1), 0
+0xe8 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: mvcin 4095(1,%r15), 0
+0xe8 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: mvcin 0(256,%r1), 0
+0xe8 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: mvcin 0(256,%r15), 0
+0xe8 0xff 0xf0 0x00 0x00 0x00
+
# CHECK: mvck 0(%r0), 0, %r0
0xd9 0x00 0x00 0x00 0x00 0x00
@@ -7759,6 +9010,69 @@
# CHECK: mvck 4095(%r15,%r1), 0(%r15), %r2
0xd9 0xf2 0x1f 0xff 0xf0 0x00
+# CHECK: mvcl %r0, %r8
+0x0e 0x08
+
+# CHECK: mvcl %r0, %r14
+0x0e 0x0e
+
+# CHECK: mvcl %r14, %r0
+0x0e 0xe0
+
+# CHECK: mvcl %r14, %r8
+0x0e 0xe8
+
+# CHECK: mvcle %r0, %r0, 0
+0xa8 0x00 0x00 0x00
+
+# CHECK: mvcle %r0, %r14, 4095
+0xa8 0x0e 0x0f 0xff
+
+# CHECK: mvcle %r0, %r0, 0(%r1)
+0xa8 0x00 0x10 0x00
+
+# CHECK: mvcle %r0, %r0, 0(%r15)
+0xa8 0x00 0xf0 0x00
+
+# CHECK: mvcle %r0, %r14, 4095(%r15)
+0xa8 0x0e 0xff 0xff
+
+# CHECK: mvcle %r0, %r0, 4095(%r1)
+0xa8 0x00 0x1f 0xff
+
+# CHECK: mvcle %r14, %r0, 0
+0xa8 0xe0 0x00 0x00
+
+# CHECK: mvclu %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x8e
+
+# CHECK: mvclu %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x8e
+
+# CHECK: mvclu %r0, %r14, 0
+0xeb 0x0e 0x00 0x00 0x00 0x8e
+
+# CHECK: mvclu %r0, %r14, 1
+0xeb 0x0e 0x00 0x01 0x00 0x8e
+
+# CHECK: mvclu %r0, %r8, 524287
+0xeb 0x08 0x0f 0xff 0x7f 0x8e
+
+# CHECK: mvclu %r0, %r8, 0(%r1)
+0xeb 0x08 0x10 0x00 0x00 0x8e
+
+# CHECK: mvclu %r0, %r4, 0(%r15)
+0xeb 0x04 0xf0 0x00 0x00 0x8e
+
+# CHECK: mvclu %r0, %r4, 524287(%r15)
+0xeb 0x04 0xff 0xff 0x7f 0x8e
+
+# CHECK: mvclu %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x8e
+
+# CHECK: mvclu %r14, %r0, 0
+0xeb 0xe0 0x00 0x00 0x00 0x8e
+
# CHECK: mvghi 0, 0
0xe5 0x48 0x00 0x00 0x00 0x00
@@ -7909,6 +9223,84 @@
# CHECK: mviy 524287(%r15), 42
0xeb 0x2a 0xff 0xff 0x7f 0x52
+# CHECK: mvn 0(1), 0
+0xd1 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: mvn 0(1), 0(%r1)
+0xd1 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: mvn 0(1), 0(%r15)
+0xd1 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: mvn 0(1), 4095
+0xd1 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: mvn 0(1), 4095(%r1)
+0xd1 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: mvn 0(1), 4095(%r15)
+0xd1 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: mvn 0(1,%r1), 0
+0xd1 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: mvn 0(1,%r15), 0
+0xd1 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: mvn 4095(1,%r1), 0
+0xd1 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: mvn 4095(1,%r15), 0
+0xd1 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: mvn 0(256,%r1), 0
+0xd1 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: mvn 0(256,%r15), 0
+0xd1 0xff 0xf0 0x00 0x00 0x00
+
+# CHECK: mvo 0(1), 0(1)
+0xf1 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: mvo 0(1), 0(1,%r1)
+0xf1 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: mvo 0(1), 0(1,%r15)
+0xf1 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: mvo 0(1), 4095(1)
+0xf1 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: mvo 0(1), 4095(1,%r1)
+0xf1 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: mvo 0(1), 4095(1,%r15)
+0xf1 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: mvo 0(1,%r1), 0(1)
+0xf1 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: mvo 0(1,%r15), 0(1)
+0xf1 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: mvo 4095(1,%r1), 0(1)
+0xf1 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: mvo 4095(1,%r15), 0(1)
+0xf1 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: mvo 0(16,%r1), 0(1)
+0xf1 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: mvo 0(16,%r15), 0(1)
+0xf1 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: mvo 0(1), 0(16,%r1)
+0xf1 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: mvo 0(1), 0(16,%r15)
+0xf1 0x0f 0x00 0x00 0xf0 0x00
+
# CHECK: mvst %r0, %r0
0xb2 0x55 0x00 0x00
@@ -7921,6 +9313,42 @@
# CHECK: mvst %r7, %r8
0xb2 0x55 0x00 0x78
+# CHECK: mvz 0(1), 0
+0xd3 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: mvz 0(1), 0(%r1)
+0xd3 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: mvz 0(1), 0(%r15)
+0xd3 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: mvz 0(1), 4095
+0xd3 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: mvz 0(1), 4095(%r1)
+0xd3 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: mvz 0(1), 4095(%r15)
+0xd3 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: mvz 0(1,%r1), 0
+0xd3 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: mvz 0(1,%r15), 0
+0xd3 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: mvz 4095(1,%r1), 0
+0xd3 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: mvz 4095(1,%r15), 0
+0xd3 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: mvz 0(256,%r1), 0
+0xd3 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: mvz 0(256,%r15), 0
+0xd3 0xff 0xf0 0x00 0x00 0x00
+
# CHECK: mxbr %f0, %f0
0xb3 0x4c 0x00 0x00
@@ -7933,18 +9361,6 @@
# CHECK: mxbr %f13, %f13
0xb3 0x4c 0x00 0xdd
-# CHECK: mxdbr %f0, %f0
-0xb3 0x07 0x00 0x00
-
-# CHECK: mxdbr %f0, %f15
-0xb3 0x07 0x00 0x0f
-
-# CHECK: mxdbr %f8, %f8
-0xb3 0x07 0x00 0x88
-
-# CHECK: mxdbr %f13, %f0
-0xb3 0x07 0x00 0xd0
-
# CHECK: mxdb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x07
@@ -7966,6 +9382,39 @@
# CHECK: mxdb %f13, 0
0xed 0xd0 0x00 0x00 0x00 0x07
+# CHECK: mxdbr %f0, %f0
+0xb3 0x07 0x00 0x00
+
+# CHECK: mxdbr %f0, %f15
+0xb3 0x07 0x00 0x0f
+
+# CHECK: mxdbr %f8, %f8
+0xb3 0x07 0x00 0x88
+
+# CHECK: mxdbr %f13, %f0
+0xb3 0x07 0x00 0xd0
+
+# CHECK: n %r0, 0
+0x54 0x00 0x00 0x00
+
+# CHECK: n %r0, 4095
+0x54 0x00 0x0f 0xff
+
+# CHECK: n %r0, 0(%r1)
+0x54 0x00 0x10 0x00
+
+# CHECK: n %r0, 0(%r15)
+0x54 0x00 0xf0 0x00
+
+# CHECK: n %r0, 4095(%r1,%r15)
+0x54 0x01 0xff 0xff
+
+# CHECK: n %r0, 4095(%r15,%r1)
+0x54 0x0f 0x1f 0xff
+
+# CHECK: n %r15, 0
+0x54 0xf0 0x00 0x00
+
# CHECK: nc 0(1), 0
0xd4 0x00 0x00 0x00 0x00 0x00
@@ -8002,27 +9451,9 @@
# CHECK: nc 0(256,%r15), 0
0xd4 0xff 0xf0 0x00 0x00 0x00
-# CHECK: ngr %r0, %r0
-0xb9 0x80 0x00 0x00
-
-# CHECK: ngr %r0, %r15
-0xb9 0x80 0x00 0x0f
-
-# CHECK: ngr %r15, %r0
-0xb9 0x80 0x00 0xf0
-
-# CHECK: ngr %r7, %r8
-0xb9 0x80 0x00 0x78
-
# CHECK: ng %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x80
-# CHECK: ngrk %r0, %r0, %r0
-0xb9 0xe4 0x00 0x00
-
-# CHECK: ngrk %r2, %r3, %r4
-0xb9 0xe4 0x40 0x23
-
# CHECK: ng %r0, -1
0xe3 0x00 0x0f 0xff 0xff 0x80
@@ -8050,6 +9481,57 @@
# CHECK: ng %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x80
+# CHECK: ngr %r0, %r0
+0xb9 0x80 0x00 0x00
+
+# CHECK: ngr %r0, %r15
+0xb9 0x80 0x00 0x0f
+
+# CHECK: ngr %r15, %r0
+0xb9 0x80 0x00 0xf0
+
+# CHECK: ngr %r7, %r8
+0xb9 0x80 0x00 0x78
+
+# CHECK: ngrk %r0, %r0, %r0
+0xb9 0xe4 0x00 0x00
+
+# CHECK: ngrk %r2, %r3, %r4
+0xb9 0xe4 0x40 0x23
+
+# CHECK: ni 0, 0
+0x94 0x00 0x00 0x00
+
+# CHECK: ni 4095, 0
+0x94 0x00 0x0f 0xff
+
+# CHECK: ni 0, 255
+0x94 0xff 0x00 0x00
+
+# CHECK: ni 0(%r1), 42
+0x94 0x2a 0x10 0x00
+
+# CHECK: ni 0(%r15), 42
+0x94 0x2a 0xf0 0x00
+
+# CHECK: ni 4095(%r1), 42
+0x94 0x2a 0x1f 0xff
+
+# CHECK: ni 4095(%r15), 42
+0x94 0x2a 0xff 0xff
+
+# CHECK: niai 0, 0
+0xb2 0xfa 0x00 0x00
+
+# CHECK: niai 15, 0
+0xb2 0xfa 0x00 0xf0
+
+# CHECK: niai 0, 15
+0xb2 0xfa 0x00 0x0f
+
+# CHECK: niai 15, 15
+0xb2 0xfa 0x00 0xff
+
# CHECK: nihf %r0, 0
0xc0 0x0a 0x00 0x00 0x00 0x00
@@ -8116,27 +9598,6 @@
# CHECK: nill %r15, 0
0xa5 0xf7 0x00 0x00
-# CHECK: ni 0, 0
-0x94 0x00 0x00 0x00
-
-# CHECK: ni 4095, 0
-0x94 0x00 0x0f 0xff
-
-# CHECK: ni 0, 255
-0x94 0xff 0x00 0x00
-
-# CHECK: ni 0(%r1), 42
-0x94 0x2a 0x10 0x00
-
-# CHECK: ni 0(%r15), 42
-0x94 0x2a 0xf0 0x00
-
-# CHECK: ni 4095(%r1), 42
-0x94 0x2a 0x1f 0xff
-
-# CHECK: ni 4095(%r15), 42
-0x94 0x2a 0xff 0xff
-
# CHECK: niy -524288, 0
0xeb 0x00 0x00 0x00 0x80 0x54
@@ -8185,26 +9646,35 @@
# CHECK: nrk %r2, %r3, %r4
0xb9 0xf4 0x40 0x23
-# CHECK: n %r0, 0
-0x54 0x00 0x00 0x00
+# CHECK: ntstg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x25
-# CHECK: n %r0, 4095
-0x54 0x00 0x0f 0xff
+# CHECK: ntstg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x25
-# CHECK: n %r0, 0(%r1)
-0x54 0x00 0x10 0x00
+# CHECK: ntstg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x25
-# CHECK: n %r0, 0(%r15)
-0x54 0x00 0xf0 0x00
+# CHECK: ntstg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x25
-# CHECK: n %r0, 4095(%r1,%r15)
-0x54 0x01 0xff 0xff
+# CHECK: ntstg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x25
-# CHECK: n %r0, 4095(%r15,%r1)
-0x54 0x0f 0x1f 0xff
+# CHECK: ntstg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x25
-# CHECK: n %r15, 0
-0x54 0xf0 0x00 0x00
+# CHECK: ntstg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x25
+
+# CHECK: ntstg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x25
+
+# CHECK: ntstg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x25
+
+# CHECK: ntstg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x25
# CHECK: ny %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x54
@@ -8236,47 +9706,26 @@
# CHECK: ny %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x54
-# CHECK: niai 0, 0
-0xb2 0xfa 0x00 0x00
-
-# CHECK: niai 15, 0
-0xb2 0xfa 0x00 0xf0
-
-# CHECK: niai 0, 15
-0xb2 0xfa 0x00 0x0f
-
-# CHECK: niai 15, 15
-0xb2 0xfa 0x00 0xff
-
-# CHECK: ntstg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x25
-
-# CHECK: ntstg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x25
-
-# CHECK: ntstg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x25
-
-# CHECK: ntstg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x25
+# CHECK: o %r0, 0
+0x56 0x00 0x00 0x00
-# CHECK: ntstg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x25
+# CHECK: o %r0, 4095
+0x56 0x00 0x0f 0xff
-# CHECK: ntstg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x25
+# CHECK: o %r0, 0(%r1)
+0x56 0x00 0x10 0x00
-# CHECK: ntstg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x25
+# CHECK: o %r0, 0(%r15)
+0x56 0x00 0xf0 0x00
-# CHECK: ntstg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x25
+# CHECK: o %r0, 4095(%r1,%r15)
+0x56 0x01 0xff 0xff
-# CHECK: ntstg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x25
+# CHECK: o %r0, 4095(%r15,%r1)
+0x56 0x0f 0x1f 0xff
-# CHECK: ntstg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x25
+# CHECK: o %r15, 0
+0x56 0xf0 0x00 0x00
# CHECK: oc 0(1), 0
0xd6 0x00 0x00 0x00 0x00 0x00
@@ -8314,24 +9763,6 @@
# CHECK: oc 0(256,%r15), 0
0xd6 0xff 0xf0 0x00 0x00 0x00
-# CHECK: ogr %r0, %r0
-0xb9 0x81 0x00 0x00
-
-# CHECK: ogr %r0, %r15
-0xb9 0x81 0x00 0x0f
-
-# CHECK: ogr %r15, %r0
-0xb9 0x81 0x00 0xf0
-
-# CHECK: ogr %r7, %r8
-0xb9 0x81 0x00 0x78
-
-# CHECK: ogrk %r0, %r0, %r0
-0xb9 0xe6 0x00 0x00
-
-# CHECK: ogrk %r2, %r3, %r4
-0xb9 0xe6 0x40 0x23
-
# CHECK: og %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x81
@@ -8362,6 +9793,45 @@
# CHECK: og %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x81
+# CHECK: ogr %r0, %r0
+0xb9 0x81 0x00 0x00
+
+# CHECK: ogr %r0, %r15
+0xb9 0x81 0x00 0x0f
+
+# CHECK: ogr %r15, %r0
+0xb9 0x81 0x00 0xf0
+
+# CHECK: ogr %r7, %r8
+0xb9 0x81 0x00 0x78
+
+# CHECK: ogrk %r0, %r0, %r0
+0xb9 0xe6 0x00 0x00
+
+# CHECK: ogrk %r2, %r3, %r4
+0xb9 0xe6 0x40 0x23
+
+# CHECK: oi 0, 0
+0x96 0x00 0x00 0x00
+
+# CHECK: oi 4095, 0
+0x96 0x00 0x0f 0xff
+
+# CHECK: oi 0, 255
+0x96 0xff 0x00 0x00
+
+# CHECK: oi 0(%r1), 42
+0x96 0x2a 0x10 0x00
+
+# CHECK: oi 0(%r15), 42
+0x96 0x2a 0xf0 0x00
+
+# CHECK: oi 4095(%r1), 42
+0x96 0x2a 0x1f 0xff
+
+# CHECK: oi 4095(%r15), 42
+0x96 0x2a 0xff 0xff
+
# CHECK: oihf %r0, 0
0xc0 0x0c 0x00 0x00 0x00 0x00
@@ -8428,27 +9898,6 @@
# CHECK: oill %r15, 0
0xa5 0xfb 0x00 0x00
-# CHECK: oi 0, 0
-0x96 0x00 0x00 0x00
-
-# CHECK: oi 4095, 0
-0x96 0x00 0x0f 0xff
-
-# CHECK: oi 0, 255
-0x96 0xff 0x00 0x00
-
-# CHECK: oi 0(%r1), 42
-0x96 0x2a 0x10 0x00
-
-# CHECK: oi 0(%r15), 42
-0x96 0x2a 0xf0 0x00
-
-# CHECK: oi 4095(%r1), 42
-0x96 0x2a 0x1f 0xff
-
-# CHECK: oi 4095(%r15), 42
-0x96 0x2a 0xff 0xff
-
# CHECK: oiy -524288, 0
0xeb 0x00 0x00 0x00 0x80 0x56
@@ -8497,27 +9946,6 @@
# CHECK: ork %r2, %r3, %r4
0xb9 0xf6 0x40 0x23
-# CHECK: o %r0, 0
-0x56 0x00 0x00 0x00
-
-# CHECK: o %r0, 4095
-0x56 0x00 0x0f 0xff
-
-# CHECK: o %r0, 0(%r1)
-0x56 0x00 0x10 0x00
-
-# CHECK: o %r0, 0(%r15)
-0x56 0x00 0xf0 0x00
-
-# CHECK: o %r0, 4095(%r1,%r15)
-0x56 0x01 0xff 0xff
-
-# CHECK: o %r0, 4095(%r15,%r1)
-0x56 0x0f 0x1f 0xff
-
-# CHECK: o %r15, 0
-0x56 0xf0 0x00 0x00
-
# CHECK: oy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x56
@@ -8548,6 +9976,51 @@
# CHECK: oy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x56
+# CHECK: pack 0(1), 0(1)
+0xf2 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: pack 0(1), 0(1,%r1)
+0xf2 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: pack 0(1), 0(1,%r15)
+0xf2 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: pack 0(1), 4095(1)
+0xf2 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: pack 0(1), 4095(1,%r1)
+0xf2 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: pack 0(1), 4095(1,%r15)
+0xf2 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: pack 0(1,%r1), 0(1)
+0xf2 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: pack 0(1,%r15), 0(1)
+0xf2 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: pack 4095(1,%r1), 0(1)
+0xf2 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: pack 4095(1,%r15), 0(1)
+0xf2 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: pack 0(16,%r1), 0(1)
+0xf2 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: pack 0(16,%r15), 0(1)
+0xf2 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: pack 0(1), 0(16,%r1)
+0xf2 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: pack 0(1), 0(16,%r15)
+0xf2 0x0f 0x00 0x00 0xf0 0x00
+
+# CHECK: pcc
+0xb9 0x2c 0x00 0x00
+
# CHECK: pfd 0, -524288
0xe3 0x00 0x00 0x00 0x80 0x36
@@ -8578,6 +10051,78 @@
# CHECK: pfd 15, 0
0xe3 0xf0 0x00 0x00 0x00 0x36
+# CHECK: pka 0, 0(1)
+0xe9 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: pka 0, 0(1,%r1)
+0xe9 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: pka 0, 0(1,%r15)
+0xe9 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: pka 0, 4095(1)
+0xe9 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: pka 0, 4095(1,%r1)
+0xe9 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: pka 0, 4095(1,%r15)
+0xe9 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: pka 0(%r1), 0(1)
+0xe9 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: pka 0(%r15), 0(1)
+0xe9 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: pka 4095(%r1), 0(1)
+0xe9 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: pka 4095(%r15), 0(1)
+0xe9 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: pka 0, 0(256,%r1)
+0xe9 0xff 0x00 0x00 0x10 0x00
+
+# CHECK: pka 0, 0(256,%r15)
+0xe9 0xff 0x00 0x00 0xf0 0x00
+
+# CHECK: pku 0, 0(1)
+0xe1 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: pku 0, 0(1,%r1)
+0xe1 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: pku 0, 0(1,%r15)
+0xe1 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: pku 0, 4095(1)
+0xe1 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: pku 0, 4095(1,%r1)
+0xe1 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: pku 0, 4095(1,%r15)
+0xe1 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: pku 0(%r1), 0(1)
+0xe1 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: pku 0(%r15), 0(1)
+0xe1 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: pku 4095(%r1), 0(1)
+0xe1 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: pku 4095(%r15), 0(1)
+0xe1 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: pku 0, 0(256,%r1)
+0xe1 0xff 0x00 0x00 0x10 0x00
+
+# CHECK: pku 0, 0(256,%r15)
+0xe1 0xff 0x00 0x00 0xf0 0x00
+
# CHECK: plo %r0, 0, %r0, 0
0xee 0x00 0x00 0x00 0x00 0x00
@@ -8599,9 +10144,6 @@
# CHECK: plo %r2, 4095(%r1), %r4, 0(%r15)
0xee 0x24 0x1f 0xff 0xf0 0x00
-# CHECK: pr
-0x01 0x01
-
# CHECK: popcnt %r0, %r0
0xb9 0xe1 0x00 0x00
@@ -8629,6 +10171,9 @@
# CHECK: ppa %r15, %r0, 0
0xb2 0xe8 0x00 0xf0
+# CHECK: pr
+0x01 0x01
+
# CHECK: risbg %r0, %r0, 0, 0, 0
0xec 0x00 0x00 0x00 0x00 0x55
@@ -8713,6 +10258,78 @@
# CHECK: risblg %r4, %r5, 6, 7, 8
0xec 0x45 0x06 0x07 0x08 0x51
+# CHECK: rll %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0x1d
+
+# CHECK: rll %r15, %r1, 0
+0xeb 0xf1 0x00 0x00 0x00 0x1d
+
+# CHECK: rll %r1, %r15, 0
+0xeb 0x1f 0x00 0x00 0x00 0x1d
+
+# CHECK: rll %r15, %r15, 0
+0xeb 0xff 0x00 0x00 0x00 0x1d
+
+# CHECK: rll %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x1d
+
+# CHECK: rll %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x1d
+
+# CHECK: rll %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0x1d
+
+# CHECK: rll %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0x1d
+
+# CHECK: rll %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0x1d
+
+# CHECK: rll %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0x1d
+
+# CHECK: rll %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x1d
+
+# CHECK: rll %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0x1d
+
+# CHECK: rllg %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0x1c
+
+# CHECK: rllg %r15, %r1, 0
+0xeb 0xf1 0x00 0x00 0x00 0x1c
+
+# CHECK: rllg %r1, %r15, 0
+0xeb 0x1f 0x00 0x00 0x00 0x1c
+
+# CHECK: rllg %r15, %r15, 0
+0xeb 0xff 0x00 0x00 0x00 0x1c
+
+# CHECK: rllg %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x1c
+
+# CHECK: rllg %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x1c
+
+# CHECK: rllg %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0x1c
+
+# CHECK: rllg %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0x1c
+
+# CHECK: rllg %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0x1c
+
+# CHECK: rllg %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0x1c
+
+# CHECK: rllg %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x1c
+
+# CHECK: rllg %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0x1c
+
# CHECK: rnsbg %r0, %r0, 0, 0, 0
0xec 0x00 0x00 0x00 0x00 0x54
@@ -8776,77 +10393,35 @@
# CHECK: rxsbg %r4, %r5, 6, 7, 8
0xec 0x45 0x06 0x07 0x08 0x57
-# CHECK: rllg %r0, %r0, 0
-0xeb 0x00 0x00 0x00 0x00 0x1c
-
-# CHECK: rllg %r15, %r1, 0
-0xeb 0xf1 0x00 0x00 0x00 0x1c
-
-# CHECK: rllg %r1, %r15, 0
-0xeb 0x1f 0x00 0x00 0x00 0x1c
-
-# CHECK: rllg %r15, %r15, 0
-0xeb 0xff 0x00 0x00 0x00 0x1c
-
-# CHECK: rllg %r0, %r0, -524288
-0xeb 0x00 0x00 0x00 0x80 0x1c
-
-# CHECK: rllg %r0, %r0, -1
-0xeb 0x00 0x0f 0xff 0xff 0x1c
-
-# CHECK: rllg %r0, %r0, 1
-0xeb 0x00 0x00 0x01 0x00 0x1c
-
-# CHECK: rllg %r0, %r0, 524287
-0xeb 0x00 0x0f 0xff 0x7f 0x1c
-
-# CHECK: rllg %r0, %r0, 0(%r1)
-0xeb 0x00 0x10 0x00 0x00 0x1c
-
-# CHECK: rllg %r0, %r0, 0(%r15)
-0xeb 0x00 0xf0 0x00 0x00 0x1c
-
-# CHECK: rllg %r0, %r0, 524287(%r1)
-0xeb 0x00 0x1f 0xff 0x7f 0x1c
-
-# CHECK: rllg %r0, %r0, 524287(%r15)
-0xeb 0x00 0xff 0xff 0x7f 0x1c
-
-# CHECK: rll %r0, %r0, 0
-0xeb 0x00 0x00 0x00 0x00 0x1d
-
-# CHECK: rll %r15, %r1, 0
-0xeb 0xf1 0x00 0x00 0x00 0x1d
-
-# CHECK: rll %r1, %r15, 0
-0xeb 0x1f 0x00 0x00 0x00 0x1d
+# CHECK: s %r0, 0
+0x5b 0x00 0x00 0x00
-# CHECK: rll %r15, %r15, 0
-0xeb 0xff 0x00 0x00 0x00 0x1d
+# CHECK: s %r0, 4095
+0x5b 0x00 0x0f 0xff
-# CHECK: rll %r0, %r0, -524288
-0xeb 0x00 0x00 0x00 0x80 0x1d
+# CHECK: s %r0, 0(%r1)
+0x5b 0x00 0x10 0x00
-# CHECK: rll %r0, %r0, -1
-0xeb 0x00 0x0f 0xff 0xff 0x1d
+# CHECK: s %r0, 0(%r15)
+0x5b 0x00 0xf0 0x00
-# CHECK: rll %r0, %r0, 1
-0xeb 0x00 0x00 0x01 0x00 0x1d
+# CHECK: s %r0, 4095(%r1,%r15)
+0x5b 0x01 0xff 0xff
-# CHECK: rll %r0, %r0, 524287
-0xeb 0x00 0x0f 0xff 0x7f 0x1d
+# CHECK: s %r0, 4095(%r15,%r1)
+0x5b 0x0f 0x1f 0xff
-# CHECK: rll %r0, %r0, 0(%r1)
-0xeb 0x00 0x10 0x00 0x00 0x1d
+# CHECK: s %r15, 0
+0x5b 0xf0 0x00 0x00
-# CHECK: rll %r0, %r0, 0(%r15)
-0xeb 0x00 0xf0 0x00 0x00 0x1d
+# CHECK: sam24
+0x01 0x0c
-# CHECK: rll %r0, %r0, 524287(%r1)
-0xeb 0x00 0x1f 0xff 0x7f 0x1d
+# CHECK: sam31
+0x01 0x0d
-# CHECK: rll %r0, %r0, 524287(%r15)
-0xeb 0x00 0xff 0xff 0x7f 0x1d
+# CHECK: sam64
+0x01 0x0e
# CHECK: sar %a0, %r0
0xb2 0x4e 0x00 0x00
@@ -8863,27 +10438,6 @@
# CHECK: sar %a15, %r15
0xb2 0x4e 0x00 0xff
-# CHECK: sam24
-0x01 0x0c
-
-# CHECK: sam31
-0x01 0x0d
-
-# CHECK: sam64
-0x01 0x0e
-
-# CHECK: sdbr %f0, %f0
-0xb3 0x1b 0x00 0x00
-
-# CHECK: sdbr %f0, %f15
-0xb3 0x1b 0x00 0x0f
-
-# CHECK: sdbr %f7, %f8
-0xb3 0x1b 0x00 0x78
-
-# CHECK: sdbr %f15, %f0
-0xb3 0x1b 0x00 0xf0
-
# CHECK: sdb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x1b
@@ -8905,17 +10459,17 @@
# CHECK: sdb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x1b
-# CHECK: sebr %f0, %f0
-0xb3 0x0b 0x00 0x00
+# CHECK: sdbr %f0, %f0
+0xb3 0x1b 0x00 0x00
-# CHECK: sebr %f0, %f15
-0xb3 0x0b 0x00 0x0f
+# CHECK: sdbr %f0, %f15
+0xb3 0x1b 0x00 0x0f
-# CHECK: sebr %f7, %f8
-0xb3 0x0b 0x00 0x78
+# CHECK: sdbr %f7, %f8
+0xb3 0x1b 0x00 0x78
-# CHECK: sebr %f15, %f0
-0xb3 0x0b 0x00 0xf0
+# CHECK: sdbr %f15, %f0
+0xb3 0x1b 0x00 0xf0
# CHECK: seb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x0b
@@ -8938,6 +10492,18 @@
# CHECK: seb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x0b
+# CHECK: sebr %f0, %f0
+0xb3 0x0b 0x00 0x00
+
+# CHECK: sebr %f0, %f15
+0xb3 0x0b 0x00 0x0f
+
+# CHECK: sebr %f7, %f8
+0xb3 0x0b 0x00 0x78
+
+# CHECK: sebr %f15, %f0
+0xb3 0x0b 0x00 0xf0
+
# CHECK: sfasr %r0
0xb3 0x85 0x00 0x00
@@ -8956,17 +10522,35 @@
# CHECK: sfpc %r15
0xb3 0x84 0x00 0xf0
-# CHECK: sgfr %r0, %r0
-0xb9 0x19 0x00 0x00
+# CHECK: sg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x09
-# CHECK: sgfr %r0, %r15
-0xb9 0x19 0x00 0x0f
+# CHECK: sg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x09
-# CHECK: sgfr %r15, %r0
-0xb9 0x19 0x00 0xf0
+# CHECK: sg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x09
-# CHECK: sgfr %r7, %r8
-0xb9 0x19 0x00 0x78
+# CHECK: sg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x09
+
+# CHECK: sg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x09
+
+# CHECK: sg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x09
+
+# CHECK: sg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x09
+
+# CHECK: sg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x09
+
+# CHECK: sg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x09
+
+# CHECK: sg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x09
# CHECK: sgf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x19
@@ -8998,6 +10582,18 @@
# CHECK: sgf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x19
+# CHECK: sgfr %r0, %r0
+0xb9 0x19 0x00 0x00
+
+# CHECK: sgfr %r0, %r15
+0xb9 0x19 0x00 0x0f
+
+# CHECK: sgfr %r15, %r0
+0xb9 0x19 0x00 0xf0
+
+# CHECK: sgfr %r7, %r8
+0xb9 0x19 0x00 0x78
+
# CHECK: sgr %r0, %r0
0xb9 0x09 0x00 0x00
@@ -9016,36 +10612,6 @@
# CHECK: sgrk %r2, %r3, %r4
0xb9 0xe9 0x40 0x23
-# CHECK: sg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x09
-
-# CHECK: sg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x09
-
-# CHECK: sg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x09
-
-# CHECK: sg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x09
-
-# CHECK: sg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x09
-
-# CHECK: sg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x09
-
-# CHECK: sg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x09
-
-# CHECK: sg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x09
-
-# CHECK: sg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x09
-
-# CHECK: sg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x09
-
# CHECK: sh %r0, 0
0x4b 0x00 0x00 0x00
@@ -9097,6 +10663,27 @@
# CHECK: shy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x7b
+# CHECK: sl %r0, 0
+0x5f 0x00 0x00 0x00
+
+# CHECK: sl %r0, 4095
+0x5f 0x00 0x0f 0xff
+
+# CHECK: sl %r0, 0(%r1)
+0x5f 0x00 0x10 0x00
+
+# CHECK: sl %r0, 0(%r15)
+0x5f 0x00 0xf0 0x00
+
+# CHECK: sl %r0, 4095(%r1,%r15)
+0x5f 0x01 0xff 0xff
+
+# CHECK: sl %r0, 4095(%r15,%r1)
+0x5f 0x0f 0x1f 0xff
+
+# CHECK: sl %r15, 0
+0x5f 0xf0 0x00 0x00
+
# CHECK: sla %r0, 0
0x8b 0x00 0x00 0x00
@@ -9121,6 +10708,42 @@
# CHECK: sla %r0, 4095(%r15)
0x8b 0x00 0xff 0xff
+# CHECK: slag %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0x0b
+
+# CHECK: slag %r15, %r1, 0
+0xeb 0xf1 0x00 0x00 0x00 0x0b
+
+# CHECK: slag %r1, %r15, 0
+0xeb 0x1f 0x00 0x00 0x00 0x0b
+
+# CHECK: slag %r15, %r15, 0
+0xeb 0xff 0x00 0x00 0x00 0x0b
+
+# CHECK: slag %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x0b
+
+# CHECK: slag %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x0b
+
+# CHECK: slag %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0x0b
+
+# CHECK: slag %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0x0b
+
+# CHECK: slag %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0x0b
+
+# CHECK: slag %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0x0b
+
+# CHECK: slag %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x0b
+
+# CHECK: slag %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0x0b
+
# CHECK: slak %r0, %r0, 0
0xeb 0x00 0x00 0x00 0x00 0xdd
@@ -9157,17 +10780,35 @@
# CHECK: slak %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0xdd
-# CHECK: slbgr %r0, %r0
-0xb9 0x89 0x00 0x00
+# CHECK: slb %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x99
-# CHECK: slbgr %r0, %r15
-0xb9 0x89 0x00 0x0f
+# CHECK: slb %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x99
-# CHECK: slbgr %r15, %r0
-0xb9 0x89 0x00 0xf0
+# CHECK: slb %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x99
-# CHECK: slbgr %r7, %r8
-0xb9 0x89 0x00 0x78
+# CHECK: slb %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x99
+
+# CHECK: slb %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x99
+
+# CHECK: slb %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x99
+
+# CHECK: slb %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x99
+
+# CHECK: slb %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x99
+
+# CHECK: slb %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x99
+
+# CHECK: slb %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x99
# CHECK: slbg %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x89
@@ -9199,6 +10840,18 @@
# CHECK: slbg %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x89
+# CHECK: slbgr %r0, %r0
+0xb9 0x89 0x00 0x00
+
+# CHECK: slbgr %r0, %r15
+0xb9 0x89 0x00 0x0f
+
+# CHECK: slbgr %r15, %r0
+0xb9 0x89 0x00 0xf0
+
+# CHECK: slbgr %r7, %r8
+0xb9 0x89 0x00 0x78
+
# CHECK: slbr %r0, %r0
0xb9 0x99 0x00 0x00
@@ -9211,35 +10864,53 @@
# CHECK: slbr %r7, %r8
0xb9 0x99 0x00 0x78
-# CHECK: slb %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x99
+# CHECK: slda %r0, 0
+0x8f 0x00 0x00 0x00
-# CHECK: slb %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x99
+# CHECK: slda %r6, 0
+0x8f 0x60 0x00 0x00
-# CHECK: slb %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x99
+# CHECK: slda %r14, 0
+0x8f 0xe0 0x00 0x00
-# CHECK: slb %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x99
+# CHECK: slda %r0, 4095
+0x8f 0x00 0x0f 0xff
-# CHECK: slb %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x99
+# CHECK: slda %r0, 0(%r1)
+0x8f 0x00 0x10 0x00
-# CHECK: slb %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x99
+# CHECK: slda %r0, 0(%r15)
+0x8f 0x00 0xf0 0x00
-# CHECK: slb %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x99
+# CHECK: slda %r0, 4095(%r1)
+0x8f 0x00 0x1f 0xff
-# CHECK: slb %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x99
+# CHECK: slda %r0, 4095(%r15)
+0x8f 0x00 0xff 0xff
-# CHECK: slb %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x99
+# CHECK: sldl %r0, 0
+0x8d 0x00 0x00 0x00
-# CHECK: slb %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x99
+# CHECK: sldl %r6, 0
+0x8d 0x60 0x00 0x00
+
+# CHECK: sldl %r14, 0
+0x8d 0xe0 0x00 0x00
+
+# CHECK: sldl %r0, 4095
+0x8d 0x00 0x0f 0xff
+
+# CHECK: sldl %r0, 0(%r1)
+0x8d 0x00 0x10 0x00
+
+# CHECK: sldl %r0, 0(%r15)
+0x8d 0x00 0xf0 0x00
+
+# CHECK: sldl %r0, 4095(%r1)
+0x8d 0x00 0x1f 0xff
+
+# CHECK: sldl %r0, 4095(%r15)
+0x8d 0x00 0xff 0xff
# CHECK: slfi %r0, 0
0xc2 0x05 0x00 0x00 0x00 0x00
@@ -9250,26 +10921,35 @@
# CHECK: slfi %r15, 0
0xc2 0xf5 0x00 0x00 0x00 0x00
-# CHECK: slgfi %r0, 0
-0xc2 0x04 0x00 0x00 0x00 0x00
+# CHECK: slg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x0b
-# CHECK: slgfi %r0, 4294967295
-0xc2 0x04 0xff 0xff 0xff 0xff
+# CHECK: slg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x0b
-# CHECK: slgfi %r15, 0
-0xc2 0xf4 0x00 0x00 0x00 0x00
+# CHECK: slg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x0b
-# CHECK: slgfr %r0, %r0
-0xb9 0x1b 0x00 0x00
+# CHECK: slg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x0b
-# CHECK: slgfr %r0, %r15
-0xb9 0x1b 0x00 0x0f
+# CHECK: slg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x0b
-# CHECK: slgfr %r15, %r0
-0xb9 0x1b 0x00 0xf0
+# CHECK: slg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x0b
-# CHECK: slgfr %r7, %r8
-0xb9 0x1b 0x00 0x78
+# CHECK: slg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x0b
+
+# CHECK: slg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x0b
+
+# CHECK: slg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x0b
+
+# CHECK: slg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x0b
# CHECK: slgf %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x1b
@@ -9301,6 +10981,27 @@
# CHECK: slgf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x1b
+# CHECK: slgfi %r0, 0
+0xc2 0x04 0x00 0x00 0x00 0x00
+
+# CHECK: slgfi %r0, 4294967295
+0xc2 0x04 0xff 0xff 0xff 0xff
+
+# CHECK: slgfi %r15, 0
+0xc2 0xf4 0x00 0x00 0x00 0x00
+
+# CHECK: slgfr %r0, %r0
+0xb9 0x1b 0x00 0x00
+
+# CHECK: slgfr %r0, %r15
+0xb9 0x1b 0x00 0x0f
+
+# CHECK: slgfr %r15, %r0
+0xb9 0x1b 0x00 0xf0
+
+# CHECK: slgfr %r7, %r8
+0xb9 0x1b 0x00 0x78
+
# CHECK: slgr %r0, %r0
0xb9 0x0b 0x00 0x00
@@ -9319,35 +11020,29 @@
# CHECK: slgrk %r2, %r3, %r4
0xb9 0xeb 0x40 0x23
-# CHECK: slg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x0b
-
-# CHECK: slg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x0b
-
-# CHECK: slg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x0b
+# CHECK: sll %r0, 0
+0x89 0x00 0x00 0x00
-# CHECK: slg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x0b
+# CHECK: sll %r7, 0
+0x89 0x70 0x00 0x00
-# CHECK: slg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x0b
+# CHECK: sll %r15, 0
+0x89 0xf0 0x00 0x00
-# CHECK: slg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x0b
+# CHECK: sll %r0, 4095
+0x89 0x00 0x0f 0xff
-# CHECK: slg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x0b
+# CHECK: sll %r0, 0(%r1)
+0x89 0x00 0x10 0x00
-# CHECK: slg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x0b
+# CHECK: sll %r0, 0(%r15)
+0x89 0x00 0xf0 0x00
-# CHECK: slg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x0b
+# CHECK: sll %r0, 4095(%r1)
+0x89 0x00 0x1f 0xff
-# CHECK: slg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x0b
+# CHECK: sll %r0, 4095(%r15)
+0x89 0x00 0xff 0xff
# CHECK: sllg %r0, %r0, 0
0xeb 0x00 0x00 0x00 0x00 0x0d
@@ -9421,30 +11116,6 @@
# CHECK: sllk %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0xdf
-# CHECK: sll %r0, 0
-0x89 0x00 0x00 0x00
-
-# CHECK: sll %r7, 0
-0x89 0x70 0x00 0x00
-
-# CHECK: sll %r15, 0
-0x89 0xf0 0x00 0x00
-
-# CHECK: sll %r0, 4095
-0x89 0x00 0x0f 0xff
-
-# CHECK: sll %r0, 0(%r1)
-0x89 0x00 0x10 0x00
-
-# CHECK: sll %r0, 0(%r15)
-0x89 0x00 0xf0 0x00
-
-# CHECK: sll %r0, 4095(%r1)
-0x89 0x00 0x1f 0xff
-
-# CHECK: sll %r0, 4095(%r15)
-0x89 0x00 0xff 0xff
-
# CHECK: slr %r0, %r0
0x1f 0x00
@@ -9463,27 +11134,6 @@
# CHECK: slrk %r2, %r3, %r4
0xb9 0xfb 0x40 0x23
-# CHECK: sl %r0, 0
-0x5f 0x00 0x00 0x00
-
-# CHECK: sl %r0, 4095
-0x5f 0x00 0x0f 0xff
-
-# CHECK: sl %r0, 0(%r1)
-0x5f 0x00 0x10 0x00
-
-# CHECK: sl %r0, 0(%r15)
-0x5f 0x00 0xf0 0x00
-
-# CHECK: sl %r0, 4095(%r1,%r15)
-0x5f 0x01 0xff 0xff
-
-# CHECK: sl %r0, 4095(%r15,%r1)
-0x5f 0x0f 0x1f 0xff
-
-# CHECK: sl %r15, 0
-0x5f 0xf0 0x00 0x00
-
# CHECK: sly %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x5f
@@ -9514,8 +11164,47 @@
# CHECK: sly %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x5f
-# CHECK: sqdbr %f0, %f0
-0xb3 0x15 0x00 0x00
+# CHECK: sp 0(1), 0(1)
+0xfb 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: sp 0(1), 0(1,%r1)
+0xfb 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: sp 0(1), 0(1,%r15)
+0xfb 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: sp 0(1), 4095(1)
+0xfb 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: sp 0(1), 4095(1,%r1)
+0xfb 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: sp 0(1), 4095(1,%r15)
+0xfb 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: sp 0(1,%r1), 0(1)
+0xfb 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: sp 0(1,%r15), 0(1)
+0xfb 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: sp 4095(1,%r1), 0(1)
+0xfb 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: sp 4095(1,%r15), 0(1)
+0xfb 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: sp 0(16,%r1), 0(1)
+0xfb 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: sp 0(16,%r15), 0(1)
+0xfb 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: sp 0(1), 0(16,%r1)
+0xfb 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: sp 0(1), 0(16,%r15)
+0xfb 0x0f 0x00 0x00 0xf0 0x00
# CHECK: spm %r0
0x04 0x00
@@ -9526,15 +11215,6 @@
# CHECK: spm %r15
0x04 0xf0
-# CHECK: sqdbr %f0, %f15
-0xb3 0x15 0x00 0x0f
-
-# CHECK: sqdbr %f7, %f8
-0xb3 0x15 0x00 0x78
-
-# CHECK: sqdbr %f15, %f0
-0xb3 0x15 0x00 0xf0
-
# CHECK: sqdb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x15
@@ -9556,17 +11236,17 @@
# CHECK: sqdb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x15
-# CHECK: sqebr %f0, %f0
-0xb3 0x14 0x00 0x00
+# CHECK: sqdbr %f0, %f0
+0xb3 0x15 0x00 0x00
-# CHECK: sqebr %f0, %f15
-0xb3 0x14 0x00 0x0f
+# CHECK: sqdbr %f0, %f15
+0xb3 0x15 0x00 0x0f
-# CHECK: sqebr %f7, %f8
-0xb3 0x14 0x00 0x78
+# CHECK: sqdbr %f7, %f8
+0xb3 0x15 0x00 0x78
-# CHECK: sqebr %f15, %f0
-0xb3 0x14 0x00 0xf0
+# CHECK: sqdbr %f15, %f0
+0xb3 0x15 0x00 0xf0
# CHECK: sqeb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x14
@@ -9589,6 +11269,18 @@
# CHECK: sqeb %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x14
+# CHECK: sqebr %f0, %f0
+0xb3 0x14 0x00 0x00
+
+# CHECK: sqebr %f0, %f15
+0xb3 0x14 0x00 0x0f
+
+# CHECK: sqebr %f7, %f8
+0xb3 0x14 0x00 0x78
+
+# CHECK: sqebr %f15, %f0
+0xb3 0x14 0x00 0xf0
+
# CHECK: sqxbr %f0, %f0
0xb3 0x16 0x00 0x00
@@ -9601,6 +11293,42 @@
# CHECK: sqxbr %f13, %f0
0xb3 0x16 0x00 0xd0
+# CHECK: sr %r0, %r0
+0x1b 0x00
+
+# CHECK: sr %r0, %r15
+0x1b 0x0f
+
+# CHECK: sr %r15, %r0
+0x1b 0xf0
+
+# CHECK: sr %r7, %r8
+0x1b 0x78
+
+# CHECK: sra %r0, 0
+0x8a 0x00 0x00 0x00
+
+# CHECK: sra %r7, 0
+0x8a 0x70 0x00 0x00
+
+# CHECK: sra %r15, 0
+0x8a 0xf0 0x00 0x00
+
+# CHECK: sra %r0, 4095
+0x8a 0x00 0x0f 0xff
+
+# CHECK: sra %r0, 0(%r1)
+0x8a 0x00 0x10 0x00
+
+# CHECK: sra %r0, 0(%r15)
+0x8a 0x00 0xf0 0x00
+
+# CHECK: sra %r0, 4095(%r1)
+0x8a 0x00 0x1f 0xff
+
+# CHECK: sra %r0, 4095(%r15)
+0x8a 0x00 0xff 0xff
+
# CHECK: srag %r0, %r0, 0
0xeb 0x00 0x00 0x00 0x00 0x0a
@@ -9673,29 +11401,83 @@
# CHECK: srak %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0xdc
-# CHECK: sra %r0, 0
-0x8a 0x00 0x00 0x00
+# CHECK: srda %r0, 0
+0x8e 0x00 0x00 0x00
-# CHECK: sra %r7, 0
-0x8a 0x70 0x00 0x00
+# CHECK: srda %r6, 0
+0x8e 0x60 0x00 0x00
-# CHECK: sra %r15, 0
-0x8a 0xf0 0x00 0x00
+# CHECK: srda %r14, 0
+0x8e 0xe0 0x00 0x00
-# CHECK: sra %r0, 4095
-0x8a 0x00 0x0f 0xff
+# CHECK: srda %r0, 4095
+0x8e 0x00 0x0f 0xff
-# CHECK: sra %r0, 0(%r1)
-0x8a 0x00 0x10 0x00
+# CHECK: srda %r0, 0(%r1)
+0x8e 0x00 0x10 0x00
-# CHECK: sra %r0, 0(%r15)
-0x8a 0x00 0xf0 0x00
+# CHECK: srda %r0, 0(%r15)
+0x8e 0x00 0xf0 0x00
-# CHECK: sra %r0, 4095(%r1)
-0x8a 0x00 0x1f 0xff
+# CHECK: srda %r0, 4095(%r1)
+0x8e 0x00 0x1f 0xff
-# CHECK: sra %r0, 4095(%r15)
-0x8a 0x00 0xff 0xff
+# CHECK: srda %r0, 4095(%r15)
+0x8e 0x00 0xff 0xff
+
+# CHECK: srdl %r0, 0
+0x8c 0x00 0x00 0x00
+
+# CHECK: srdl %r6, 0
+0x8c 0x60 0x00 0x00
+
+# CHECK: srdl %r14, 0
+0x8c 0xe0 0x00 0x00
+
+# CHECK: srdl %r0, 4095
+0x8c 0x00 0x0f 0xff
+
+# CHECK: srdl %r0, 0(%r1)
+0x8c 0x00 0x10 0x00
+
+# CHECK: srdl %r0, 0(%r15)
+0x8c 0x00 0xf0 0x00
+
+# CHECK: srdl %r0, 4095(%r1)
+0x8c 0x00 0x1f 0xff
+
+# CHECK: srdl %r0, 4095(%r15)
+0x8c 0x00 0xff 0xff
+
+# CHECK: srk %r0, %r0, %r0
+0xb9 0xf9 0x00 0x00
+
+# CHECK: srk %r2, %r3, %r4
+0xb9 0xf9 0x40 0x23
+
+# CHECK: srl %r0, 0
+0x88 0x00 0x00 0x00
+
+# CHECK: srl %r7, 0
+0x88 0x70 0x00 0x00
+
+# CHECK: srl %r15, 0
+0x88 0xf0 0x00 0x00
+
+# CHECK: srl %r0, 4095
+0x88 0x00 0x0f 0xff
+
+# CHECK: srl %r0, 0(%r1)
+0x88 0x00 0x10 0x00
+
+# CHECK: srl %r0, 0(%r15)
+0x88 0x00 0xf0 0x00
+
+# CHECK: srl %r0, 4095(%r1)
+0x88 0x00 0x1f 0xff
+
+# CHECK: srl %r0, 4095(%r15)
+0x88 0x00 0xff 0xff
# CHECK: srlg %r0, %r0, 0
0xeb 0x00 0x00 0x00 0x00 0x0c
@@ -9769,48 +11551,6 @@
# CHECK: srlk %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0xde
-# CHECK: srl %r0, 0
-0x88 0x00 0x00 0x00
-
-# CHECK: srl %r7, 0
-0x88 0x70 0x00 0x00
-
-# CHECK: srl %r15, 0
-0x88 0xf0 0x00 0x00
-
-# CHECK: srl %r0, 4095
-0x88 0x00 0x0f 0xff
-
-# CHECK: srl %r0, 0(%r1)
-0x88 0x00 0x10 0x00
-
-# CHECK: srl %r0, 0(%r15)
-0x88 0x00 0xf0 0x00
-
-# CHECK: srl %r0, 4095(%r1)
-0x88 0x00 0x1f 0xff
-
-# CHECK: srl %r0, 4095(%r15)
-0x88 0x00 0xff 0xff
-
-# CHECK: sr %r0, %r0
-0x1b 0x00
-
-# CHECK: sr %r0, %r15
-0x1b 0x0f
-
-# CHECK: sr %r15, %r0
-0x1b 0xf0
-
-# CHECK: sr %r7, %r8
-0x1b 0x78
-
-# CHECK: srk %r0, %r0, %r0
-0xb9 0xf9 0x00 0x00
-
-# CHECK: srk %r2, %r3, %r4
-0xb9 0xf9 0x40 0x23
-
# CHECK: srnm 0
0xb2 0x99 0x00 0x00
@@ -9865,6 +11605,45 @@
# CHECK: srnmt 4095(%r15)
0xb2 0xb9 0xff 0xff
+# CHECK: srp 0(1), 0, 0
+0xf0 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: srp 0(1), 0, 15
+0xf0 0x0f 0x00 0x00 0x00 0x00
+
+# CHECK: srp 0(1), 0(%r1), 0
+0xf0 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: srp 0(1), 0(%r15), 0
+0xf0 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: srp 0(1), 4095, 0
+0xf0 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: srp 0(1), 4095(%r1), 0
+0xf0 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: srp 0(1), 4095(%r15), 0
+0xf0 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: srp 0(1,%r1), 0, 0
+0xf0 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: srp 0(1,%r15), 0, 0
+0xf0 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: srp 4095(1,%r1), 0, 0
+0xf0 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: srp 4095(1,%r15), 0, 0
+0xf0 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: srp 0(16,%r1), 0, 0
+0xf0 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: srp 0(16,%r15), 0, 0
+0xf0 0xf0 0xf0 0x00 0x00 0x00
+
# CHECK: srst %r0, %r0
0xb2 0x5e 0x00 0x00
@@ -9877,6 +11656,39 @@
# CHECK: srst %r7, %r8
0xb2 0x5e 0x00 0x78
+# CHECK: srstu %r0, %r0
+0xb9 0xbe 0x00 0x00
+
+# CHECK: srstu %r0, %r15
+0xb9 0xbe 0x00 0x0f
+
+# CHECK: srstu %r15, %r0
+0xb9 0xbe 0x00 0xf0
+
+# CHECK: srstu %r7, %r8
+0xb9 0xbe 0x00 0x78
+
+# CHECK: st %r0, 0
+0x50 0x00 0x00 0x00
+
+# CHECK: st %r0, 4095
+0x50 0x00 0x0f 0xff
+
+# CHECK: st %r0, 0(%r1)
+0x50 0x00 0x10 0x00
+
+# CHECK: st %r0, 0(%r15)
+0x50 0x00 0xf0 0x00
+
+# CHECK: st %r0, 4095(%r1,%r15)
+0x50 0x01 0xff 0xff
+
+# CHECK: st %r0, 4095(%r15,%r1)
+0x50 0x0f 0x1f 0xff
+
+# CHECK: st %r15, 0
+0x50 0xf0 0x00 0x00
+
# CHECK: stam %a0, %a0, 0
0x9b 0x00 0x00 0x00
@@ -9997,82 +11809,145 @@
# CHECK: stch %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xc3
-# CHECK: stcy %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x72
-
# CHECK: stck 0
0xb2 0x05 0x00 0x00
# CHECK: stck 0(%r1)
0xb2 0x05 0x10 0x00
-#CHECK: stck 0(%r15)
+# CHECK: stck 0(%r15)
0xb2 0x05 0xf0 0x00
-#CHECK: stck 4095
-0xb2 0x05 0x0f 0xff
+# CHECK: stck 4095
+0xb2 0x05 0x0f 0xff
-#CHECK: stck 4095(%r1)
+# CHECK: stck 4095(%r1)
0xb2 0x05 0x1f 0xff
-#CHECK: stck 4095(%r15)
+# CHECK: stck 4095(%r15)
0xb2 0x05 0xff 0xff
+# CHECK: stcke 0
+0xb2 0x78 0x00 0x00
+
+# CHECK: stcke 0(%r1)
+0xb2 0x78 0x10 0x00
+
+# CHECK: stcke 0(%r15)
+0xb2 0x78 0xf0 0x00
+
+# CHECK: stcke 4095
+0xb2 0x78 0x0f 0xff
+
+# CHECK: stcke 4095(%r1)
+0xb2 0x78 0x1f 0xff
+
+# CHECK: stcke 4095(%r15)
+0xb2 0x78 0xff 0xff
+
# CHECK: stckf 0
0xb2 0x7c 0x00 0x00
# CHECK: stckf 0(%r1)
0xb2 0x7c 0x10 0x00
-#CHECK: stckf 0(%r15)
+# CHECK: stckf 0(%r15)
0xb2 0x7c 0xf0 0x00
-#CHECK: stckf 4095
-0xb2 0x7c 0x0f 0xff
+# CHECK: stckf 4095
+0xb2 0x7c 0x0f 0xff
-#CHECK: stckf 4095(%r1)
+# CHECK: stckf 4095(%r1)
0xb2 0x7c 0x1f 0xff
-#CHECK: stckf 4095(%r15)
+# CHECK: stckf 4095(%r15)
0xb2 0x7c 0xff 0xff
-# CHECK: stcke 0
-0xb2 0x78 0x00 0x00
+# CHECK: stcm %r0, 0, 0
+0xbe 0x00 0x00 0x00
-# CHECK: stcke 0(%r1)
-0xb2 0x78 0x10 0x00
+# CHECK: stcm %r0, 15, 4095
+0xbe 0x0f 0x0f 0xff
-#CHECK: stcke 0(%r15)
-0xb2 0x78 0xf0 0x00
+# CHECK: stcm %r0, 0, 0(%r1)
+0xbe 0x00 0x10 0x00
-#CHECK: stcke 4095
-0xb2 0x78 0x0f 0xff
+# CHECK: stcm %r0, 0, 0(%r15)
+0xbe 0x00 0xf0 0x00
-#CHECK: stcke 4095(%r1)
-0xb2 0x78 0x1f 0xff
+# CHECK: stcm %r0, 15, 4095(%r15)
+0xbe 0x0f 0xff 0xff
-#CHECK: stcke 4095(%r15)
-0xb2 0x78 0xff 0xff
+# CHECK: stcm %r0, 0, 4095(%r1)
+0xbe 0x00 0x1f 0xff
-# CHECK: stfle 0
-0xb2 0xb0 0x00 0x00
+# CHECK: stcm %r15, 0, 0
+0xbe 0xf0 0x00 0x00
-# CHECK: stfle 0(%r1)
-0xb2 0xb0 0x10 0x00
+# CHECK: stcmh %r0, 0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x2c
-#CHECK: stfle 0(%r15)
-0xb2 0xb0 0xf0 0x00
+# CHECK: stcmh %r0, 0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x2c
-#CHECK: stfle 4095
-0xb2 0xb0 0x0f 0xff
+# CHECK: stcmh %r0, 15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x2c
-#CHECK: stfle 4095(%r1)
-0xb2 0xb0 0x1f 0xff
+# CHECK: stcmh %r0, 15, 1
+0xeb 0x0f 0x00 0x01 0x00 0x2c
-#CHECK: stfle 4095(%r15)
-0xb2 0xb0 0xff 0xff
+# CHECK: stcmh %r0, 8, 524287
+0xeb 0x08 0x0f 0xff 0x7f 0x2c
+
+# CHECK: stcmh %r0, 8, 0(%r1)
+0xeb 0x08 0x10 0x00 0x00 0x2c
+
+# CHECK: stcmh %r0, 4, 0(%r15)
+0xeb 0x04 0xf0 0x00 0x00 0x2c
+
+# CHECK: stcmh %r0, 4, 524287(%r15)
+0xeb 0x04 0xff 0xff 0x7f 0x2c
+
+# CHECK: stcmh %r0, 0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x2c
+
+# CHECK: stcmh %r15, 0, 0
+0xeb 0xf0 0x00 0x00 0x00 0x2c
+
+# CHECK: stcmy %r0, 0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x2d
-# CHECK: stcy %r0, -1
+# CHECK: stcmy %r0, 0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x2d
+
+# CHECK: stcmy %r0, 15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x2d
+
+# CHECK: stcmy %r0, 15, 1
+0xeb 0x0f 0x00 0x01 0x00 0x2d
+
+# CHECK: stcmy %r0, 8, 524287
+0xeb 0x08 0x0f 0xff 0x7f 0x2d
+
+# CHECK: stcmy %r0, 8, 0(%r1)
+0xeb 0x08 0x10 0x00 0x00 0x2d
+
+# CHECK: stcmy %r0, 4, 0(%r15)
+0xeb 0x04 0xf0 0x00 0x00 0x2d
+
+# CHECK: stcmy %r0, 4, 524287(%r15)
+0xeb 0x04 0xff 0xff 0x7f 0x2d
+
+# CHECK: stcmy %r0, 0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x2d
+
+# CHECK: stcmy %r15, 0, 0
+0xeb 0xf0 0x00 0x00 0x00 0x2d
+
+# CHECK: stcy %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x72
+
+# CHECK: stcy %r0, -1
0xe3 0x00 0x0f 0xff 0xff 0x72
# CHECK: stcy %r0, 0
@@ -10201,6 +12076,54 @@
# CHECK: stey %f15, 0
0xed 0xf0 0x00 0x00 0x00 0x66
+# CHECK: stfh %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0xcb
+
+# CHECK: stfh %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0xcb
+
+# CHECK: stfh %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0xcb
+
+# CHECK: stfh %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0xcb
+
+# CHECK: stfh %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0xcb
+
+# CHECK: stfh %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0xcb
+
+# CHECK: stfh %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0xcb
+
+# CHECK: stfh %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0xcb
+
+# CHECK: stfh %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0xcb
+
+# CHECK: stfh %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0xcb
+
+# CHECK: stfle 0
+0xb2 0xb0 0x00 0x00
+
+# CHECK: stfle 0(%r1)
+0xb2 0xb0 0x10 0x00
+
+# CHECK: stfle 0(%r15)
+0xb2 0xb0 0xf0 0x00
+
+# CHECK: stfle 4095
+0xb2 0xb0 0x0f 0xff
+
+# CHECK: stfle 4095(%r1)
+0xb2 0xb0 0x1f 0xff
+
+# CHECK: stfle 4095(%r15)
+0xb2 0xb0 0xff 0xff
+
# CHECK: stfpc 0
0xb2 0x9c 0x00 0x00
@@ -10300,36 +12223,6 @@
# CHECK: sthh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xc7
-# CHECK: stfh %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0xcb
-
-# CHECK: stfh %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0xcb
-
-# CHECK: stfh %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0xcb
-
-# CHECK: stfh %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0xcb
-
-# CHECK: stfh %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0xcb
-
-# CHECK: stfh %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0xcb
-
-# CHECK: stfh %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0xcb
-
-# CHECK: stfh %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0xcb
-
-# CHECK: stfh %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0xcb
-
-# CHECK: stfh %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0xcb
-
# CHECK: sthy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x70
@@ -10507,135 +12400,6 @@
# CHECK: stmy %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0x90
-# CHECK: strag 0, 0
-0xe5 0x02 0x00 0x00 0x00 0x00
-
-# CHECK: strag 0, 4095
-0xe5 0x02 0x00 0x00 0x0f 0xff
-
-# CHECK: strag 0, 0(%r1)
-0xe5 0x02 0x00 0x00 0x10 0x00
-
-# CHECK: strag 0, 0(%r15)
-0xe5 0x02 0x00 0x00 0xf0 0x00
-
-# CHECK: strag 0(%r1), 4095(%r15)
-0xe5 0x02 0x10 0x00 0xff 0xff
-
-# CHECK: strag 4095(%r1), 0(%r15)
-0xe5 0x02 0x1f 0xff 0xf0 0x00
-
-# CHECK: strvg %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x2f
-
-# CHECK: strvg %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x2f
-
-# CHECK: strvg %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x2f
-
-# CHECK: strvg %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x2f
-
-# CHECK: strvg %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x2f
-
-# CHECK: strvg %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x2f
-
-# CHECK: strvg %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x2f
-
-# CHECK: strvg %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x2f
-
-# CHECK: strvg %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x2f
-
-# CHECK: strvg %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x2f
-
-# CHECK: strvh %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x3f
-
-# CHECK: strvh %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x3f
-
-# CHECK: strvh %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x3f
-
-# CHECK: strvh %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x3f
-
-# CHECK: strvh %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x3f
-
-# CHECK: strvh %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x3f
-
-# CHECK: strvh %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x3f
-
-# CHECK: strvh %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x3f
-
-# CHECK: strvh %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x3f
-
-# CHECK: strvh %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x3f
-
-# CHECK: strv %r0, -524288
-0xe3 0x00 0x00 0x00 0x80 0x3e
-
-# CHECK: strv %r0, -1
-0xe3 0x00 0x0f 0xff 0xff 0x3e
-
-# CHECK: strv %r0, 0
-0xe3 0x00 0x00 0x00 0x00 0x3e
-
-# CHECK: strv %r0, 1
-0xe3 0x00 0x00 0x01 0x00 0x3e
-
-# CHECK: strv %r0, 524287
-0xe3 0x00 0x0f 0xff 0x7f 0x3e
-
-# CHECK: strv %r0, 0(%r1)
-0xe3 0x00 0x10 0x00 0x00 0x3e
-
-# CHECK: strv %r0, 0(%r15)
-0xe3 0x00 0xf0 0x00 0x00 0x3e
-
-# CHECK: strv %r0, 524287(%r1,%r15)
-0xe3 0x01 0xff 0xff 0x7f 0x3e
-
-# CHECK: strv %r0, 524287(%r15,%r1)
-0xe3 0x0f 0x1f 0xff 0x7f 0x3e
-
-# CHECK: strv %r15, 0
-0xe3 0xf0 0x00 0x00 0x00 0x3e
-
-# CHECK: st %r0, 0
-0x50 0x00 0x00 0x00
-
-# CHECK: st %r0, 4095
-0x50 0x00 0x0f 0xff
-
-# CHECK: st %r0, 0(%r1)
-0x50 0x00 0x10 0x00
-
-# CHECK: st %r0, 0(%r15)
-0x50 0x00 0xf0 0x00
-
-# CHECK: st %r0, 4095(%r1,%r15)
-0x50 0x01 0xff 0xff
-
-# CHECK: st %r0, 4095(%r15,%r1)
-0x50 0x0f 0x1f 0xff
-
-# CHECK: st %r15, 0
-0x50 0xf0 0x00 0x00
-
# CHECK: stoc %r1, 2(%r3), 0
0xeb 0x10 0x30 0x02 0x00 0xf3
@@ -10762,26 +12526,113 @@
# CHECK: stpq %r14, 0
0xe3 0xe0 0x00 0x00 0x00 0x8e
-# CHECK: s %r0, 0
-0x5b 0x00 0x00 0x00
+# CHECK: strag 0, 0
+0xe5 0x02 0x00 0x00 0x00 0x00
-# CHECK: s %r0, 4095
-0x5b 0x00 0x0f 0xff
+# CHECK: strag 0, 4095
+0xe5 0x02 0x00 0x00 0x0f 0xff
-# CHECK: s %r0, 0(%r1)
-0x5b 0x00 0x10 0x00
+# CHECK: strag 0, 0(%r1)
+0xe5 0x02 0x00 0x00 0x10 0x00
-# CHECK: s %r0, 0(%r15)
-0x5b 0x00 0xf0 0x00
+# CHECK: strag 0, 0(%r15)
+0xe5 0x02 0x00 0x00 0xf0 0x00
-# CHECK: s %r0, 4095(%r1,%r15)
-0x5b 0x01 0xff 0xff
+# CHECK: strag 0(%r1), 4095(%r15)
+0xe5 0x02 0x10 0x00 0xff 0xff
-# CHECK: s %r0, 4095(%r15,%r1)
-0x5b 0x0f 0x1f 0xff
+# CHECK: strag 4095(%r1), 0(%r15)
+0xe5 0x02 0x1f 0xff 0xf0 0x00
-# CHECK: s %r15, 0
-0x5b 0xf0 0x00 0x00
+# CHECK: strv %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x3e
+
+# CHECK: strv %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x3e
+
+# CHECK: strv %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x3e
+
+# CHECK: strv %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x3e
+
+# CHECK: strv %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x3e
+
+# CHECK: strv %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x3e
+
+# CHECK: strv %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x3e
+
+# CHECK: strv %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x3e
+
+# CHECK: strv %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x3e
+
+# CHECK: strv %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x3e
+
+# CHECK: strvg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x2f
+
+# CHECK: strvg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x2f
+
+# CHECK: strvg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x2f
+
+# CHECK: strvg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x2f
+
+# CHECK: strvg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x2f
+
+# CHECK: strvg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x2f
+
+# CHECK: strvg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x2f
+
+# CHECK: strvg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x2f
+
+# CHECK: strvg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x2f
+
+# CHECK: strvg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x2f
+
+# CHECK: strvh %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x3f
+
+# CHECK: strvh %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x3f
+
+# CHECK: strvh %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x3f
+
+# CHECK: strvh %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x3f
+
+# CHECK: strvh %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x3f
+
+# CHECK: strvh %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x3f
+
+# CHECK: strvh %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x3f
+
+# CHECK: strvh %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x3f
+
+# CHECK: strvh %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x3f
+
+# CHECK: strvh %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x3f
# CHECK: sty %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x50
@@ -10867,9 +12718,6 @@
# CHECK: sy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x5b
-# CHECK: tam
-0x01 0x0b
-
# CHECK: tabort 0
0xb2 0xfc 0x00 0x00
@@ -10888,6 +12736,9 @@
# CHECK: tabort 4095(%r15)
0xb2 0xfc 0xff 0xff
+# CHECK: tam
+0x01 0x0b
+
# CHECK: tbegin 0, 0
0xe5 0x60 0x00 0x00 0x00 0x00
@@ -11119,24 +12970,411 @@
# CHECK: tmy 524287(%r15), 42
0xeb 0x2a 0xff 0xff 0x7f 0x51
+# CHECK: tp 0(1)
+0xeb 0x00 0x00 0x00 0x00 0xc0
+
+# CHECK: tp 0(1,%r1)
+0xeb 0x00 0x10 0x00 0x00 0xc0
+
+# CHECK: tp 0(1,%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xc0
+
+# CHECK: tp 4095(1,%r1)
+0xeb 0x00 0x1f 0xff 0x00 0xc0
+
+# CHECK: tp 4095(1,%r15)
+0xeb 0x00 0xff 0xff 0x00 0xc0
+
+# CHECK: tp 0(16,%r1)
+0xeb 0xf0 0x10 0x00 0x00 0xc0
+
+# CHECK: tp 0(16,%r15)
+0xeb 0xf0 0xf0 0x00 0x00 0xc0
+
+# CHECK: tr 0(1), 0
+0xdc 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: tr 0(1), 0(%r1)
+0xdc 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: tr 0(1), 0(%r15)
+0xdc 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: tr 0(1), 4095
+0xdc 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: tr 0(1), 4095(%r1)
+0xdc 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: tr 0(1), 4095(%r15)
+0xdc 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: tr 0(1,%r1), 0
+0xdc 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: tr 0(1,%r15), 0
+0xdc 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: tr 4095(1,%r1), 0
+0xdc 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: tr 4095(1,%r15), 0
+0xdc 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: tr 0(256,%r1), 0
+0xdc 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: tr 0(256,%r15), 0
+0xdc 0xff 0xf0 0x00 0x00 0x00
+
+# CHECK: tre %r0, %r0
+0xb2 0xa5 0x00 0x00
+
+# CHECK: tre %r0, %r15
+0xb2 0xa5 0x00 0x0f
+
+# CHECK: tre %r14, %r0
+0xb2 0xa5 0x00 0xe0
+
+# CHECK: tre %r6, %r8
+0xb2 0xa5 0x00 0x68
+
+# CHECK: troo %r0, %r0
+0xb9 0x93 0x00 0x00
+
+# CHECK: troo %r0, %r15
+0xb9 0x93 0x00 0x0f
+
+# CHECK: troo %r14, %r0
+0xb9 0x93 0x00 0xe0
+
+# CHECK: troo %r6, %r8
+0xb9 0x93 0x00 0x68
+
+# CHECK: troo %r4, %r12, 1
+0xb9 0x93 0x10 0x4c
+
+# CHECK: troo %r4, %r12, 15
+0xb9 0x93 0xf0 0x4c
+
+# CHECK: trot %r0, %r0
+0xb9 0x92 0x00 0x00
+
+# CHECK: trot %r0, %r15
+0xb9 0x92 0x00 0x0f
+
+# CHECK: trot %r14, %r0
+0xb9 0x92 0x00 0xe0
+
+# CHECK: trot %r6, %r8
+0xb9 0x92 0x00 0x68
+
+# CHECK: trot %r4, %r12, 1
+0xb9 0x92 0x10 0x4c
+
+# CHECK: trot %r4, %r12, 15
+0xb9 0x92 0xf0 0x4c
+
+# CHECK: trt 0(1), 0
+0xdd 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: trt 0(1), 0(%r1)
+0xdd 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: trt 0(1), 0(%r15)
+0xdd 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: trt 0(1), 4095
+0xdd 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: trt 0(1), 4095(%r1)
+0xdd 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: trt 0(1), 4095(%r15)
+0xdd 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: trt 0(1,%r1), 0
+0xdd 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: trt 0(1,%r15), 0
+0xdd 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: trt 4095(1,%r1), 0
+0xdd 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: trt 4095(1,%r15), 0
+0xdd 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: trt 0(256,%r1), 0
+0xdd 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: trt 0(256,%r15), 0
+0xdd 0xff 0xf0 0x00 0x00 0x00
+
+# CHECK: trte %r0, %r0
+0xb9 0xbf 0x00 0x00
+
+# CHECK: trte %r0, %r15
+0xb9 0xbf 0x00 0x0f
+
+# CHECK: trte %r14, %r0
+0xb9 0xbf 0x00 0xe0
+
+# CHECK: trte %r6, %r8
+0xb9 0xbf 0x00 0x68
+
+# CHECK: trte %r4, %r12, 1
+0xb9 0xbf 0x10 0x4c
+
+# CHECK: trte %r4, %r12, 15
+0xb9 0xbf 0xf0 0x4c
+
+# CHECK: trto %r0, %r0
+0xb9 0x91 0x00 0x00
+
+# CHECK: trto %r0, %r15
+0xb9 0x91 0x00 0x0f
+
+# CHECK: trto %r14, %r0
+0xb9 0x91 0x00 0xe0
+
+# CHECK: trto %r6, %r8
+0xb9 0x91 0x00 0x68
+
+# CHECK: trto %r4, %r12, 1
+0xb9 0x91 0x10 0x4c
+
+# CHECK: trto %r4, %r12, 15
+0xb9 0x91 0xf0 0x4c
+
+# CHECK: trtr 0(1), 0
+0xd0 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: trtr 0(1), 0(%r1)
+0xd0 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: trtr 0(1), 0(%r15)
+0xd0 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: trtr 0(1), 4095
+0xd0 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: trtr 0(1), 4095(%r1)
+0xd0 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: trtr 0(1), 4095(%r15)
+0xd0 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: trtr 0(1,%r1), 0
+0xd0 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: trtr 0(1,%r15), 0
+0xd0 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: trtr 4095(1,%r1), 0
+0xd0 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: trtr 4095(1,%r15), 0
+0xd0 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: trtr 0(256,%r1), 0
+0xd0 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: trtr 0(256,%r15), 0
+0xd0 0xff 0xf0 0x00 0x00 0x00
+
+# CHECK: trtre %r0, %r0
+0xb9 0xbd 0x00 0x00
+
+# CHECK: trtre %r0, %r15
+0xb9 0xbd 0x00 0x0f
+
+# CHECK: trtre %r14, %r0
+0xb9 0xbd 0x00 0xe0
+
+# CHECK: trtre %r6, %r8
+0xb9 0xbd 0x00 0x68
+
+# CHECK: trtre %r4, %r12, 1
+0xb9 0xbd 0x10 0x4c
+
+# CHECK: trtre %r4, %r12, 15
+0xb9 0xbd 0xf0 0x4c
+
+# CHECK: trtt %r0, %r0
+0xb9 0x90 0x00 0x00
+
+# CHECK: trtt %r0, %r15
+0xb9 0x90 0x00 0x0f
+
+# CHECK: trtt %r14, %r0
+0xb9 0x90 0x00 0xe0
+
+# CHECK: trtt %r6, %r8
+0xb9 0x90 0x00 0x68
+
+# CHECK: trtt %r4, %r12, 1
+0xb9 0x90 0x10 0x4c
+
+# CHECK: trtt %r4, %r12, 15
+0xb9 0x90 0xf0 0x4c
+
# CHECK: ts 0
0x93 0x00 0x00 0x00
# CHECK: ts 0(%r1)
0x93 0x00 0x10 0x00
-#CHECK: ts 0(%r15)
+# CHECK: ts 0(%r15)
0x93 0x00 0xf0 0x00
-#CHECK: ts 4095
+# CHECK: ts 4095
0x93 0x00 0x0f 0xff
-#CHECK: ts 4095(%r1)
+# CHECK: ts 4095(%r1)
0x93 0x00 0x1f 0xff
-#CHECK: ts 4095(%r15)
+# CHECK: ts 4095(%r15)
0x93 0x00 0xff 0xff
+# CHECK: unpk 0(1), 0(1)
+0xf3 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: unpk 0(1), 0(1,%r1)
+0xf3 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: unpk 0(1), 0(1,%r15)
+0xf3 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: unpk 0(1), 4095(1)
+0xf3 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: unpk 0(1), 4095(1,%r1)
+0xf3 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: unpk 0(1), 4095(1,%r15)
+0xf3 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: unpk 0(1,%r1), 0(1)
+0xf3 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: unpk 0(1,%r15), 0(1)
+0xf3 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: unpk 4095(1,%r1), 0(1)
+0xf3 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: unpk 4095(1,%r15), 0(1)
+0xf3 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: unpk 0(16,%r1), 0(1)
+0xf3 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: unpk 0(16,%r15), 0(1)
+0xf3 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: unpk 0(1), 0(16,%r1)
+0xf3 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: unpk 0(1), 0(16,%r15)
+0xf3 0x0f 0x00 0x00 0xf0 0x00
+
+# CHECK: unpka 0(1), 0
+0xea 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: unpka 0(1), 0(%r1)
+0xea 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: unpka 0(1), 0(%r15)
+0xea 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: unpka 0(1), 4095
+0xea 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: unpka 0(1), 4095(%r1)
+0xea 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: unpka 0(1), 4095(%r15)
+0xea 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: unpka 0(1,%r1), 0
+0xea 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: unpka 0(1,%r15), 0
+0xea 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: unpka 4095(1,%r1), 0
+0xea 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: unpka 4095(1,%r15), 0
+0xea 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: unpka 0(256,%r1), 0
+0xea 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: unpka 0(256,%r15), 0
+0xea 0xff 0xf0 0x00 0x00 0x00
+
+# CHECK: unpku 0(1), 0
+0xe2 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: unpku 0(1), 0(%r1)
+0xe2 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: unpku 0(1), 0(%r15)
+0xe2 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: unpku 0(1), 4095
+0xe2 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: unpku 0(1), 4095(%r1)
+0xe2 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: unpku 0(1), 4095(%r15)
+0xe2 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: unpku 0(1,%r1), 0
+0xe2 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: unpku 0(1,%r15), 0
+0xe2 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: unpku 4095(1,%r1), 0
+0xe2 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: unpku 4095(1,%r15), 0
+0xe2 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: unpku 0(256,%r1), 0
+0xe2 0xff 0x10 0x00 0x00 0x00
+
+# CHECK: unpku 0(256,%r15), 0
+0xe2 0xff 0xf0 0x00 0x00 0x00
+
+# CHECK: upt
+0x01 0x02
+
+# CHECK: x %r0, 0
+0x57 0x00 0x00 0x00
+
+# CHECK: x %r0, 4095
+0x57 0x00 0x0f 0xff
+
+# CHECK: x %r0, 0(%r1)
+0x57 0x00 0x10 0x00
+
+# CHECK: x %r0, 0(%r15)
+0x57 0x00 0xf0 0x00
+
+# CHECK: x %r0, 4095(%r1,%r15)
+0x57 0x01 0xff 0xff
+
+# CHECK: x %r0, 4095(%r15,%r1)
+0x57 0x0f 0x1f 0xff
+
+# CHECK: x %r15, 0
+0x57 0xf0 0x00 0x00
+
# CHECK: xc 0(1), 0
0xd7 0x00 0x00 0x00 0x00 0x00
@@ -11173,24 +13411,6 @@
# CHECK: xc 0(256,%r15), 0
0xd7 0xff 0xf0 0x00 0x00 0x00
-# CHECK: xgr %r0, %r0
-0xb9 0x82 0x00 0x00
-
-# CHECK: xgr %r0, %r15
-0xb9 0x82 0x00 0x0f
-
-# CHECK: xgr %r15, %r0
-0xb9 0x82 0x00 0xf0
-
-# CHECK: xgr %r7, %r8
-0xb9 0x82 0x00 0x78
-
-# CHECK: xgrk %r0, %r0, %r0
-0xb9 0xe7 0x00 0x00
-
-# CHECK: xgrk %r2, %r3, %r4
-0xb9 0xe7 0x40 0x23
-
# CHECK: xg %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x82
@@ -11221,23 +13441,23 @@
# CHECK: xg %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x82
-# CHECK: xihf %r0, 0
-0xc0 0x06 0x00 0x00 0x00 0x00
+# CHECK: xgr %r0, %r0
+0xb9 0x82 0x00 0x00
-# CHECK: xihf %r0, 4294967295
-0xc0 0x06 0xff 0xff 0xff 0xff
+# CHECK: xgr %r0, %r15
+0xb9 0x82 0x00 0x0f
-# CHECK: xihf %r15, 0
-0xc0 0xf6 0x00 0x00 0x00 0x00
+# CHECK: xgr %r15, %r0
+0xb9 0x82 0x00 0xf0
-# CHECK: xilf %r0, 0
-0xc0 0x07 0x00 0x00 0x00 0x00
+# CHECK: xgr %r7, %r8
+0xb9 0x82 0x00 0x78
-# CHECK: xilf %r0, 4294967295
-0xc0 0x07 0xff 0xff 0xff 0xff
+# CHECK: xgrk %r0, %r0, %r0
+0xb9 0xe7 0x00 0x00
-# CHECK: xilf %r15, 0
-0xc0 0xf7 0x00 0x00 0x00 0x00
+# CHECK: xgrk %r2, %r3, %r4
+0xb9 0xe7 0x40 0x23
# CHECK: xi 0, 0
0x97 0x00 0x00 0x00
@@ -11260,6 +13480,24 @@
# CHECK: xi 4095(%r15), 42
0x97 0x2a 0xff 0xff
+# CHECK: xihf %r0, 0
+0xc0 0x06 0x00 0x00 0x00 0x00
+
+# CHECK: xihf %r0, 4294967295
+0xc0 0x06 0xff 0xff 0xff 0xff
+
+# CHECK: xihf %r15, 0
+0xc0 0xf6 0x00 0x00 0x00 0x00
+
+# CHECK: xilf %r0, 0
+0xc0 0x07 0x00 0x00 0x00 0x00
+
+# CHECK: xilf %r0, 4294967295
+0xc0 0x07 0xff 0xff 0xff 0xff
+
+# CHECK: xilf %r15, 0
+0xc0 0xf7 0x00 0x00 0x00 0x00
+
# CHECK: xiy -524288, 0
0xeb 0x00 0x00 0x00 0x80 0x57
@@ -11308,27 +13546,6 @@
# CHECK: xrk %r2, %r3, %r4
0xb9 0xf7 0x40 0x23
-# CHECK: x %r0, 0
-0x57 0x00 0x00 0x00
-
-# CHECK: x %r0, 4095
-0x57 0x00 0x0f 0xff
-
-# CHECK: x %r0, 0(%r1)
-0x57 0x00 0x10 0x00
-
-# CHECK: x %r0, 0(%r15)
-0x57 0x00 0xf0 0x00
-
-# CHECK: x %r0, 4095(%r1,%r15)
-0x57 0x01 0xff 0xff
-
-# CHECK: x %r0, 4095(%r15,%r1)
-0x57 0x0f 0x1f 0xff
-
-# CHECK: x %r15, 0
-0x57 0xf0 0x00 0x00
-
# CHECK: xy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x57
@@ -11358,3 +13575,45 @@
# CHECK: xy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x57
+
+# CHECK: zap 0(1), 0(1)
+0xf8 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: zap 0(1), 0(1,%r1)
+0xf8 0x00 0x00 0x00 0x10 0x00
+
+# CHECK: zap 0(1), 0(1,%r15)
+0xf8 0x00 0x00 0x00 0xf0 0x00
+
+# CHECK: zap 0(1), 4095(1)
+0xf8 0x00 0x00 0x00 0x0f 0xff
+
+# CHECK: zap 0(1), 4095(1,%r1)
+0xf8 0x00 0x00 0x00 0x1f 0xff
+
+# CHECK: zap 0(1), 4095(1,%r15)
+0xf8 0x00 0x00 0x00 0xff 0xff
+
+# CHECK: zap 0(1,%r1), 0(1)
+0xf8 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: zap 0(1,%r15), 0(1)
+0xf8 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: zap 4095(1,%r1), 0(1)
+0xf8 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: zap 4095(1,%r15), 0(1)
+0xf8 0x00 0xff 0xff 0x00 0x00
+
+# CHECK: zap 0(16,%r1), 0(1)
+0xf8 0xf0 0x10 0x00 0x00 0x00
+
+# CHECK: zap 0(16,%r15), 0(1)
+0xf8 0xf0 0xf0 0x00 0x00 0x00
+
+# CHECK: zap 0(1), 0(16,%r1)
+0xf8 0x0f 0x00 0x00 0x10 0x00
+
+# CHECK: zap 0(1), 0(16,%r15)
+0xf8 0x0f 0x00 0x00 0xf0 0x00
diff --git a/test/MC/SystemZ/insn-bad-z13.s b/test/MC/SystemZ/insn-bad-z13.s
index db2de118bf36..82f47feeb8a9 100644
--- a/test/MC/SystemZ/insn-bad-z13.s
+++ b/test/MC/SystemZ/insn-bad-z13.s
@@ -5,6 +5,89 @@
# RUN: FileCheck < %t %s
#CHECK: error: invalid operand
+#CHECK: lcbb %r0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: lcbb %r0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: lcbb %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: lcbb %r0, 4096, 0
+#CHECK: error: invalid use of vector addressing
+#CHECK: lcbb %r0, 0(%v1,%r2), 0
+
+ lcbb %r0, 0, -1
+ lcbb %r0, 0, 16
+ lcbb %r0, -1, 0
+ lcbb %r0, 4096, 0
+ lcbb %r0, 0(%v1,%r2), 0
+
+#CHECK: error: invalid operand
+#CHECK: llzrgf %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: llzrgf %r0, 524288
+
+ llzrgf %r0, -524289
+ llzrgf %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: locfh %r0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: locfh %r0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: locfh %r0, -524289, 1
+#CHECK: error: invalid operand
+#CHECK: locfh %r0, 524288, 1
+#CHECK: error: invalid use of indexed addressing
+#CHECK: locfh %r0, 0(%r1,%r2), 1
+
+ locfh %r0, 0, -1
+ locfh %r0, 0, 16
+ locfh %r0, -524289, 1
+ locfh %r0, 524288, 1
+ locfh %r0, 0(%r1,%r2), 1
+
+#CHECK: error: invalid operand
+#CHECK: locfhr %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: locfhr %r0, %r0, 16
+
+ locfhr %r0, %r0, -1
+ locfhr %r0, %r0, 16
+
+#CHECK: error: invalid operand
+#CHECK: locghie %r0, 66000
+#CHECK: error: invalid operand
+#CHECK: locghie %f0, 0
+#CHECK: error: invalid operand
+#CHECK: locghie 0, %r0
+
+ locghie %r0, 66000
+ locghie %f0, 0
+ locghie 0, %r0
+
+#CHECK: error: invalid operand
+#CHECK: lochhie %r0, 66000
+#CHECK: error: invalid operand
+#CHECK: lochhie %f0, 0
+#CHECK: error: invalid operand
+#CHECK: lochhie 0, %r0
+
+ lochhie %r0, 66000
+ lochhie %f0, 0
+ lochhie 0, %r0
+
+#CHECK: error: invalid operand
+#CHECK: lochie %r0, 66000
+#CHECK: error: invalid operand
+#CHECK: lochie %f0, 0
+#CHECK: error: invalid operand
+#CHECK: lochie 0, %r0
+
+ lochie %r0, 66000
+ lochie %f0, 0
+ lochie 0, %r0
+
+#CHECK: error: invalid operand
#CHECK: lzrf %r0, -524289
#CHECK: error: invalid operand
#CHECK: lzrf %r0, 524288
@@ -20,30 +103,30 @@
lzrg %r0, -524289
lzrg %r0, 524288
-#CHECK: error: invalid operand
-#CHECK: llzrgf %r0, -524289
-#CHECK: error: invalid operand
-#CHECK: llzrgf %r0, 524288
+#CHECK: error: invalid register pair
+#CHECK: ppno %r1, %r2
+#CHECK: error: invalid register pair
+#CHECK: ppno %r2, %r1
- llzrgf %r0, -524289
- llzrgf %r0, 524288
+ ppno %r1, %r2
+ ppno %r2, %r1
#CHECK: error: invalid operand
-#CHECK: lcbb %r0, 0, -1
+#CHECK: stocfh %r0, 0, -1
#CHECK: error: invalid operand
-#CHECK: lcbb %r0, 0, 16
+#CHECK: stocfh %r0, 0, 16
#CHECK: error: invalid operand
-#CHECK: lcbb %r0, -1, 0
+#CHECK: stocfh %r0, -524289, 1
#CHECK: error: invalid operand
-#CHECK: lcbb %r0, 4096, 0
-#CHECK: error: invalid use of vector addressing
-#CHECK: lcbb %r0, 0(%v1,%r2), 0
+#CHECK: stocfh %r0, 524288, 1
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stocfh %r0, 0(%r1,%r2), 1
- lcbb %r0, 0, -1
- lcbb %r0, 0, 16
- lcbb %r0, -1, 0
- lcbb %r0, 4096, 0
- lcbb %r0, 0(%v1,%r2), 0
+ stocfh %r0, 0, -1
+ stocfh %r0, 0, 16
+ stocfh %r0, -524289, 1
+ stocfh %r0, 524288, 1
+ stocfh %r0, 0(%r1,%r2), 1
#CHECK: error: invalid operand
#CHECK: vcdg %v0, %v0, 0, 0, -1
@@ -474,6 +557,20 @@
vfaef %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
+#CHECK: vfaefs %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfaefs %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfaefs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfaefs %v0, %v0, %v0, 0, 0
+
+ vfaefs %v0, %v0, %v0, -1
+ vfaefs %v0, %v0, %v0, 16
+ vfaefs %v0, %v0
+ vfaefs %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
#CHECK: vfaeh %v0, %v0, %v0, -1
#CHECK: error: invalid operand
#CHECK: vfaeh %v0, %v0, %v0, 16
@@ -488,18 +585,60 @@
vfaeh %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
-#CHECK: vfaezh %v0, %v0, %v0, -1
+#CHECK: vfaehs %v0, %v0, %v0, -1
#CHECK: error: invalid operand
-#CHECK: vfaezh %v0, %v0, %v0, 16
+#CHECK: vfaehs %v0, %v0, %v0, 16
#CHECK: error: too few operands
-#CHECK: vfaezh %v0, %v0
+#CHECK: vfaehs %v0, %v0
#CHECK: error: invalid operand
-#CHECK: vfaezh %v0, %v0, %v0, 0, 0
+#CHECK: vfaehs %v0, %v0, %v0, 0, 0
- vfaezh %v0, %v0, %v0, -1
- vfaezh %v0, %v0, %v0, 16
- vfaezh %v0, %v0
- vfaezh %v0, %v0, %v0, 0, 0
+ vfaehs %v0, %v0, %v0, -1
+ vfaehs %v0, %v0, %v0, 16
+ vfaehs %v0, %v0
+ vfaehs %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfaezb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfaezb %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfaezb %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfaezb %v0, %v0, %v0, 0, 0
+
+ vfaezb %v0, %v0, %v0, -1
+ vfaezb %v0, %v0, %v0, 16
+ vfaezb %v0, %v0
+ vfaezb %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfaezbs %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfaezbs %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfaezbs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfaezbs %v0, %v0, %v0, 0, 0
+
+ vfaezbs %v0, %v0, %v0, -1
+ vfaezbs %v0, %v0, %v0, 16
+ vfaezbs %v0, %v0
+ vfaezbs %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfaezf %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfaezf %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfaezf %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfaezf %v0, %v0, %v0, 0, 0
+
+ vfaezf %v0, %v0, %v0, -1
+ vfaezf %v0, %v0, %v0, 16
+ vfaezf %v0, %v0
+ vfaezf %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
#CHECK: vfaezfs %v0, %v0, %v0, -1
@@ -516,6 +655,34 @@
vfaezfs %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
+#CHECK: vfaezh %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfaezh %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfaezh %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfaezh %v0, %v0, %v0, 0, 0
+
+ vfaezh %v0, %v0, %v0, -1
+ vfaezh %v0, %v0, %v0, 16
+ vfaezh %v0, %v0
+ vfaezh %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfaezhs %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfaezhs %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfaezhs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfaezhs %v0, %v0, %v0, 0, 0
+
+ vfaezhs %v0, %v0, %v0, -1
+ vfaezhs %v0, %v0, %v0, 16
+ vfaezhs %v0, %v0
+ vfaezhs %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
#CHECK: vfee %v0, %v0, %v0, 0, -1
#CHECK: error: invalid operand
#CHECK: vfee %v0, %v0, %v0, 0, 16
@@ -549,6 +716,14 @@
vfeeb %v0, %v0
vfeeb %v0, %v0, %v0, 0, 0
+#CHECK: error: too few operands
+#CHECK: vfeebs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfeebs %v0, %v0, %v0, 0
+
+ vfeebs %v0, %v0
+ vfeebs %v0, %v0, %v0, 0
+
#CHECK: error: invalid operand
#CHECK: vfeef %v0, %v0, %v0, -1
#CHECK: error: invalid operand
@@ -563,6 +738,14 @@
vfeef %v0, %v0
vfeef %v0, %v0, %v0, 0, 0
+#CHECK: error: too few operands
+#CHECK: vfeefs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfeefs %v0, %v0, %v0, 0
+
+ vfeefs %v0, %v0
+ vfeefs %v0, %v0, %v0, 0
+
#CHECK: error: invalid operand
#CHECK: vfeeh %v0, %v0, %v0, -1
#CHECK: error: invalid operand
@@ -578,22 +761,6 @@
vfeeh %v0, %v0, %v0, 0, 0
#CHECK: error: too few operands
-#CHECK: vfeebs %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfeebs %v0, %v0, %v0, 0
-
- vfeebs %v0, %v0
- vfeebs %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfeefs %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfeefs %v0, %v0, %v0, 0
-
- vfeefs %v0, %v0
- vfeefs %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
#CHECK: vfeehs %v0, %v0
#CHECK: error: invalid operand
#CHECK: vfeehs %v0, %v0, %v0, 0
@@ -610,6 +777,14 @@
vfeezb %v0, %v0, %v0, 0
#CHECK: error: too few operands
+#CHECK: vfeezbs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfeezbs %v0, %v0, %v0, 0
+
+ vfeezbs %v0, %v0
+ vfeezbs %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
#CHECK: vfeezf %v0, %v0
#CHECK: error: invalid operand
#CHECK: vfeezf %v0, %v0, %v0, 0
@@ -618,6 +793,14 @@
vfeezf %v0, %v0, %v0, 0
#CHECK: error: too few operands
+#CHECK: vfeezfs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfeezfs %v0, %v0, %v0, 0
+
+ vfeezfs %v0, %v0
+ vfeezfs %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
#CHECK: vfeezh %v0, %v0
#CHECK: error: invalid operand
#CHECK: vfeezh %v0, %v0, %v0, 0
@@ -626,28 +809,146 @@
vfeezh %v0, %v0, %v0, 0
#CHECK: error: too few operands
-#CHECK: vfeezbs %v0, %v0
+#CHECK: vfeezhs %v0, %v0
#CHECK: error: invalid operand
-#CHECK: vfeezbs %v0, %v0, %v0, 0
+#CHECK: vfeezhs %v0, %v0, %v0, 0
- vfeezbs %v0, %v0
- vfeezbs %v0, %v0, %v0, 0
+ vfeezhs %v0, %v0
+ vfeezhs %v0, %v0, %v0, 0
+#CHECK: error: invalid operand
+#CHECK: vfene %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vfene %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vfene %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vfene %v0, %v0, %v0, 16, 0
#CHECK: error: too few operands
-#CHECK: vfeezfs %v0, %v0
+#CHECK: vfene %v0, %v0, %v0
#CHECK: error: invalid operand
-#CHECK: vfeezfs %v0, %v0, %v0, 0
+#CHECK: vfene %v0, %v0, %v0, 0, 0, 0
- vfeezfs %v0, %v0
- vfeezfs %v0, %v0, %v0, 0
+ vfene %v0, %v0, %v0, 0, -1
+ vfene %v0, %v0, %v0, 0, 16
+ vfene %v0, %v0, %v0, -1, 0
+ vfene %v0, %v0, %v0, 16, 0
+ vfene %v0, %v0, %v0
+ vfene %v0, %v0, %v0, 0, 0, 0
+#CHECK: error: invalid operand
+#CHECK: vfeneb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfeneb %v0, %v0, %v0, 16
#CHECK: error: too few operands
-#CHECK: vfeezhs %v0, %v0
+#CHECK: vfeneb %v0, %v0
#CHECK: error: invalid operand
-#CHECK: vfeezhs %v0, %v0, %v0, 0
+#CHECK: vfeneb %v0, %v0, %v0, 0, 0
- vfeezhs %v0, %v0
- vfeezhs %v0, %v0, %v0, 0
+ vfeneb %v0, %v0, %v0, -1
+ vfeneb %v0, %v0, %v0, 16
+ vfeneb %v0, %v0
+ vfeneb %v0, %v0, %v0, 0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenebs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenebs %v0, %v0, %v0, 0
+
+ vfenebs %v0, %v0
+ vfenebs %v0, %v0, %v0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfenef %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfenef %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfenef %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenef %v0, %v0, %v0, 0, 0
+
+ vfenef %v0, %v0, %v0, -1
+ vfenef %v0, %v0, %v0, 16
+ vfenef %v0, %v0
+ vfenef %v0, %v0, %v0, 0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenefs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenefs %v0, %v0, %v0, 0
+
+ vfenefs %v0, %v0
+ vfenefs %v0, %v0, %v0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfeneh %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfeneh %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vfeneh %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfeneh %v0, %v0, %v0, 0, 0
+
+ vfeneh %v0, %v0, %v0, -1
+ vfeneh %v0, %v0, %v0, 16
+ vfeneh %v0, %v0
+ vfeneh %v0, %v0, %v0, 0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenehs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenehs %v0, %v0, %v0, 0
+
+ vfenehs %v0, %v0
+ vfenehs %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenezb %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenezb %v0, %v0, %v0, 0
+
+ vfenezb %v0, %v0
+ vfenezb %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenezbs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenezbs %v0, %v0, %v0, 0
+
+ vfenezbs %v0, %v0
+ vfenezbs %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenezf %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenezf %v0, %v0, %v0, 0
+
+ vfenezf %v0, %v0
+ vfenezf %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenezfs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenezfs %v0, %v0, %v0, 0
+
+ vfenezfs %v0, %v0
+ vfenezfs %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenezh %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenezh %v0, %v0, %v0, 0
+
+ vfenezh %v0, %v0
+ vfenezh %v0, %v0, %v0, 0
+
+#CHECK: error: too few operands
+#CHECK: vfenezhs %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vfenezhs %v0, %v0, %v0, 0
+
+ vfenezhs %v0, %v0
+ vfenezhs %v0, %v0, %v0, 0
#CHECK: error: invalid operand
#CHECK: vfi %v0, %v0, 0, 0, -1
@@ -869,6 +1170,14 @@
vistrb %v0
vistrb %v0, %v0, 0, 0
+#CHECK: error: too few operands
+#CHECK: vistrbs %v0
+#CHECK: error: invalid operand
+#CHECK: vistrbs %v0, %v0, 0
+
+ vistrbs %v0
+ vistrbs %v0, %v0, 0
+
#CHECK: error: invalid operand
#CHECK: vistrf %v0, %v0, -1
#CHECK: error: invalid operand
@@ -883,6 +1192,14 @@
vistrf %v0
vistrf %v0, %v0, 0, 0
+#CHECK: error: too few operands
+#CHECK: vistrfs %v0
+#CHECK: error: invalid operand
+#CHECK: vistrfs %v0, %v0, 0
+
+ vistrfs %v0
+ vistrfs %v0, %v0, 0
+
#CHECK: error: invalid operand
#CHECK: vistrh %v0, %v0, -1
#CHECK: error: invalid operand
@@ -898,22 +1215,6 @@
vistrh %v0, %v0, 0, 0
#CHECK: error: too few operands
-#CHECK: vistrbs %v0
-#CHECK: error: invalid operand
-#CHECK: vistrbs %v0, %v0, 0
-
- vistrbs %v0
- vistrbs %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vistrfs %v0
-#CHECK: error: invalid operand
-#CHECK: vistrfs %v0, %v0, 0
-
- vistrfs %v0
- vistrfs %v0, %v0, 0
-
-#CHECK: error: too few operands
#CHECK: vistrhs %v0
#CHECK: error: invalid operand
#CHECK: vistrhs %v0, %v0, 0
@@ -1371,132 +1672,6 @@
vlvgh %v0, %r0, 0(%r0)
#CHECK: error: invalid operand
-#CHECK: vfene %v0, %v0, %v0, 0, -1
-#CHECK: error: invalid operand
-#CHECK: vfene %v0, %v0, %v0, 0, 16
-#CHECK: error: invalid operand
-#CHECK: vfene %v0, %v0, %v0, -1, 0
-#CHECK: error: invalid operand
-#CHECK: vfene %v0, %v0, %v0, 16, 0
-#CHECK: error: too few operands
-#CHECK: vfene %v0, %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfene %v0, %v0, %v0, 0, 0, 0
-
- vfene %v0, %v0, %v0, 0, -1
- vfene %v0, %v0, %v0, 0, 16
- vfene %v0, %v0, %v0, -1, 0
- vfene %v0, %v0, %v0, 16, 0
- vfene %v0, %v0, %v0
- vfene %v0, %v0, %v0, 0, 0, 0
-
-#CHECK: error: invalid operand
-#CHECK: vfeneb %v0, %v0, %v0, -1
-#CHECK: error: invalid operand
-#CHECK: vfeneb %v0, %v0, %v0, 16
-#CHECK: error: too few operands
-#CHECK: vfeneb %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfeneb %v0, %v0, %v0, 0, 0
-
- vfeneb %v0, %v0, %v0, -1
- vfeneb %v0, %v0, %v0, 16
- vfeneb %v0, %v0
- vfeneb %v0, %v0, %v0, 0, 0
-
-#CHECK: error: invalid operand
-#CHECK: vfenef %v0, %v0, %v0, -1
-#CHECK: error: invalid operand
-#CHECK: vfenef %v0, %v0, %v0, 16
-#CHECK: error: too few operands
-#CHECK: vfenef %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenef %v0, %v0, %v0, 0, 0
-
- vfenef %v0, %v0, %v0, -1
- vfenef %v0, %v0, %v0, 16
- vfenef %v0, %v0
- vfenef %v0, %v0, %v0, 0, 0
-
-#CHECK: error: invalid operand
-#CHECK: vfeneh %v0, %v0, %v0, -1
-#CHECK: error: invalid operand
-#CHECK: vfeneh %v0, %v0, %v0, 16
-#CHECK: error: too few operands
-#CHECK: vfeneh %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfeneh %v0, %v0, %v0, 0, 0
-
- vfeneh %v0, %v0, %v0, -1
- vfeneh %v0, %v0, %v0, 16
- vfeneh %v0, %v0
- vfeneh %v0, %v0, %v0, 0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenebs %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenebs %v0, %v0, %v0, 0
-
- vfenebs %v0, %v0
- vfenebs %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenefs %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenefs %v0, %v0, %v0, 0
-
- vfenefs %v0, %v0
- vfenefs %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenehs %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenehs %v0, %v0, %v0, 0
-
- vfenehs %v0, %v0
- vfenehs %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenezb %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenezb %v0, %v0, %v0, 0
-
- vfenezb %v0, %v0
- vfenezb %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenezf %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenezf %v0, %v0, %v0, 0
-
- vfenezf %v0, %v0
- vfenezf %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenezh %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenezh %v0, %v0, %v0, 0
-
- vfenezh %v0, %v0
- vfenezh %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenezbs %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenezbs %v0, %v0, %v0, 0
-
- vfenezbs %v0, %v0
- vfenezbs %v0, %v0, %v0, 0
-
-#CHECK: error: too few operands
-#CHECK: vfenezfs %v0, %v0
-#CHECK: error: invalid operand
-#CHECK: vfenezfs %v0, %v0, %v0, 0
-
- vfenezfs %v0, %v0
- vfenezfs %v0, %v0, %v0, 0
-
-#CHECK: error: invalid operand
#CHECK: vpdi %v0, %v0, %v0, -1
#CHECK: error: invalid operand
#CHECK: vpdi %v0, %v0, %v0, 16
@@ -1755,12 +1930,12 @@
#CHECK: error: invalid operand
#CHECK: vstrc %v0, %v0, %v0, %v0, 0, 0, 0
- vstrc %v0, %v0, %v0, %v0, 0, -1
- vstrc %v0, %v0, %v0, %v0, 0, 16
- vstrc %v0, %v0, %v0, %v0, -1, 0
- vstrc %v0, %v0, %v0, %v0, 16, 0
- vstrc %v0, %v0, %v0, %v0
- vstrc %v0, %v0, %v0, %v0, 0, 0, 0
+ vstrc %v0, %v0, %v0, %v0, 0, -1
+ vstrc %v0, %v0, %v0, %v0, 0, 16
+ vstrc %v0, %v0, %v0, %v0, -1, 0
+ vstrc %v0, %v0, %v0, %v0, 16, 0
+ vstrc %v0, %v0, %v0, %v0
+ vstrc %v0, %v0, %v0, %v0, 0, 0, 0
#CHECK: error: invalid operand
#CHECK: vstrcb %v0, %v0, %v0, %v0, -1
@@ -1771,10 +1946,10 @@
#CHECK: error: invalid operand
#CHECK: vstrcb %v0, %v0, %v0, %v0, 0, 0
- vstrcb %v0, %v0, %v0, %v0, -1
- vstrcb %v0, %v0, %v0, %v0, 16
- vstrcb %v0, %v0, %v0
- vstrcb %v0, %v0, %v0, %v0, 0, 0
+ vstrcb %v0, %v0, %v0, %v0, -1
+ vstrcb %v0, %v0, %v0, %v0, 16
+ vstrcb %v0, %v0, %v0
+ vstrcb %v0, %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
#CHECK: vstrcbs %v0, %v0, %v0, %v0, -1
@@ -1785,10 +1960,10 @@
#CHECK: error: invalid operand
#CHECK: vstrcbs %v0, %v0, %v0, %v0, 0, 0
- vstrcbs %v0, %v0, %v0, %v0, -1
- vstrcbs %v0, %v0, %v0, %v0, 16
- vstrcbs %v0, %v0, %v0
- vstrcbs %v0, %v0, %v0, %v0, 0, 0
+ vstrcbs %v0, %v0, %v0, %v0, -1
+ vstrcbs %v0, %v0, %v0, %v0, 16
+ vstrcbs %v0, %v0, %v0
+ vstrcbs %v0, %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
#CHECK: vstrcf %v0, %v0, %v0, %v0, -1
@@ -1799,10 +1974,24 @@
#CHECK: error: invalid operand
#CHECK: vstrcf %v0, %v0, %v0, %v0, 0, 0
- vstrcf %v0, %v0, %v0, %v0, -1
- vstrcf %v0, %v0, %v0, %v0, 16
- vstrcf %v0, %v0, %v0
- vstrcf %v0, %v0, %v0, %v0, 0, 0
+ vstrcf %v0, %v0, %v0, %v0, -1
+ vstrcf %v0, %v0, %v0, %v0, 16
+ vstrcf %v0, %v0, %v0
+ vstrcf %v0, %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrcfs %v0, %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrcfs %v0, %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vstrcfs %v0, %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vstrcfs %v0, %v0, %v0, %v0, 0, 0
+
+ vstrcfs %v0, %v0, %v0, %v0, -1
+ vstrcfs %v0, %v0, %v0, %v0, 16
+ vstrcfs %v0, %v0, %v0
+ vstrcfs %v0, %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
#CHECK: vstrch %v0, %v0, %v0, %v0, -1
@@ -1813,24 +2002,66 @@
#CHECK: error: invalid operand
#CHECK: vstrch %v0, %v0, %v0, %v0, 0, 0
- vstrch %v0, %v0, %v0, %v0, -1
- vstrch %v0, %v0, %v0, %v0, 16
- vstrch %v0, %v0, %v0
- vstrch %v0, %v0, %v0, %v0, 0, 0
+ vstrch %v0, %v0, %v0, %v0, -1
+ vstrch %v0, %v0, %v0, %v0, 16
+ vstrch %v0, %v0, %v0
+ vstrch %v0, %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
-#CHECK: vstrczh %v0, %v0, %v0, %v0, -1
+#CHECK: vstrchs %v0, %v0, %v0, %v0, -1
#CHECK: error: invalid operand
-#CHECK: vstrczh %v0, %v0, %v0, %v0, 16
+#CHECK: vstrchs %v0, %v0, %v0, %v0, 16
#CHECK: error: too few operands
-#CHECK: vstrczh %v0, %v0, %v0
+#CHECK: vstrchs %v0, %v0, %v0
#CHECK: error: invalid operand
-#CHECK: vstrczh %v0, %v0, %v0, %v0, 0, 0
+#CHECK: vstrchs %v0, %v0, %v0, %v0, 0, 0
- vstrczh %v0, %v0, %v0, %v0, -1
- vstrczh %v0, %v0, %v0, %v0, 16
- vstrczh %v0, %v0, %v0
- vstrczh %v0, %v0, %v0, %v0, 0, 0
+ vstrchs %v0, %v0, %v0, %v0, -1
+ vstrchs %v0, %v0, %v0, %v0, 16
+ vstrchs %v0, %v0, %v0
+ vstrchs %v0, %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrczb %v0, %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrczb %v0, %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vstrczb %v0, %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vstrczb %v0, %v0, %v0, %v0, 0, 0
+
+ vstrczb %v0, %v0, %v0, %v0, -1
+ vstrczb %v0, %v0, %v0, %v0, 16
+ vstrczb %v0, %v0, %v0
+ vstrczb %v0, %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrczbs %v0, %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrczbs %v0, %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vstrczbs %v0, %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vstrczbs %v0, %v0, %v0, %v0, 0, 0
+
+ vstrczbs %v0, %v0, %v0, %v0, -1
+ vstrczbs %v0, %v0, %v0, %v0, 16
+ vstrczbs %v0, %v0, %v0
+ vstrczbs %v0, %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrczf %v0, %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrczf %v0, %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vstrczf %v0, %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vstrczf %v0, %v0, %v0, %v0, 0, 0
+
+ vstrczf %v0, %v0, %v0, %v0, -1
+ vstrczf %v0, %v0, %v0, %v0, 16
+ vstrczf %v0, %v0, %v0
+ vstrczf %v0, %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
#CHECK: vstrczfs %v0, %v0, %v0, %v0, -1
@@ -1841,10 +2072,38 @@
#CHECK: error: invalid operand
#CHECK: vstrczfs %v0, %v0, %v0, %v0, 0, 0
- vstrczfs %v0, %v0, %v0, %v0, -1
- vstrczfs %v0, %v0, %v0, %v0, 16
- vstrczfs %v0, %v0, %v0
- vstrczfs %v0, %v0, %v0, %v0, 0, 0
+ vstrczfs %v0, %v0, %v0, %v0, -1
+ vstrczfs %v0, %v0, %v0, %v0, 16
+ vstrczfs %v0, %v0, %v0
+ vstrczfs %v0, %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrczh %v0, %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrczh %v0, %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vstrczh %v0, %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vstrczh %v0, %v0, %v0, %v0, 0, 0
+
+ vstrczh %v0, %v0, %v0, %v0, -1
+ vstrczh %v0, %v0, %v0, %v0, 16
+ vstrczh %v0, %v0, %v0
+ vstrczh %v0, %v0, %v0, %v0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrczhs %v0, %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrczhs %v0, %v0, %v0, %v0, 16
+#CHECK: error: too few operands
+#CHECK: vstrczhs %v0, %v0, %v0
+#CHECK: error: invalid operand
+#CHECK: vstrczhs %v0, %v0, %v0, %v0, 0, 0
+
+ vstrczhs %v0, %v0, %v0, %v0, -1
+ vstrczhs %v0, %v0, %v0, %v0, 16
+ vstrczhs %v0, %v0, %v0
+ vstrczhs %v0, %v0, %v0, %v0, 0, 0
#CHECK: error: invalid operand
#CHECK: wcdgb %v0, %v0, 0, -1
@@ -1937,79 +2196,4 @@
wledb %v0, %v0, 0, 16
wledb %v0, %v0, -1, 0
wledb %v0, %v0, 16, 0
-
-#CHECK: error: invalid operand
-#CHECK: lochie %r0, 66000
-#CHECK: error: invalid operand
-#CHECK: lochie %f0, 0
-#CHECK: error: invalid operand
-#CHECK: lochie 0, %r0
-
- lochie %r0, 66000
- lochie %f0, 0
- lochie 0, %r0
-
-#CHECK: error: invalid operand
-#CHECK: locghie %r0, 66000
-#CHECK: error: invalid operand
-#CHECK: locghie %f0, 0
-#CHECK: error: invalid operand
-#CHECK: locghie 0, %r0
-
- locghie %r0, 66000
- locghie %f0, 0
- locghie 0, %r0
-
-#CHECK: error: invalid operand
-#CHECK: lochhie %r0, 66000
-#CHECK: error: invalid operand
-#CHECK: lochhie %f0, 0
-#CHECK: error: invalid operand
-#CHECK: lochhie 0, %r0
-
- lochhie %r0, 66000
- lochhie %f0, 0
- lochhie 0, %r0
-
-#CHECK: error: invalid operand
-#CHECK: locfh %r0,0,-1
-#CHECK: error: invalid operand
-#CHECK: locfh %r0,0,16
-#CHECK: error: invalid operand
-#CHECK: locfh %r0,-524289,1
-#CHECK: error: invalid operand
-#CHECK: locfh %r0,524288,1
-#CHECK: error: invalid use of indexed addressing
-#CHECK: locfh %r0,0(%r1,%r2),1
-
- locfh %r0,0,-1
- locfh %r0,0,16
- locfh %r0,-524289,1
- locfh %r0,524288,1
- locfh %r0,0(%r1,%r2),1
-
-#CHECK: error: invalid operand
-#CHECK: locfhr %r0,%r0,-1
-#CHECK: error: invalid operand
-#CHECK: locfhr %r0,%r0,16
-
- locfhr %r0,%r0,-1
- locfhr %r0,%r0,16
-
-#CHECK: error: invalid operand
-#CHECK: stocfh %r0,0,-1
-#CHECK: error: invalid operand
-#CHECK: stocfh %r0,0,16
-#CHECK: error: invalid operand
-#CHECK: stocfh %r0,-524289,1
-#CHECK: error: invalid operand
-#CHECK: stocfh %r0,524288,1
-#CHECK: error: invalid use of indexed addressing
-#CHECK: stocfh %r0,0(%r1,%r2),1
-
- stocfh %r0,0,-1
- stocfh %r0,0,16
- stocfh %r0,-524289,1
- stocfh %r0,524288,1
- stocfh %r0,0(%r1,%r2),1
diff --git a/test/MC/SystemZ/insn-bad-z196.s b/test/MC/SystemZ/insn-bad-z196.s
index e370f10eefb4..04c19ff6319c 100644
--- a/test/MC/SystemZ/insn-bad-z196.s
+++ b/test/MC/SystemZ/insn-bad-z196.s
@@ -503,6 +503,33 @@
fixbra %f0, 0, %f2, 0
fixbra %f2, 0, %f0, 0
+#CHECK: error: invalid register pair
+#CHECK: kmctr %r1, %r2, %r4
+#CHECK: error: invalid register pair
+#CHECK: kmctr %r2, %r1, %r4
+#CHECK: error: invalid register pair
+#CHECK: kmctr %r2, %r4, %r1
+
+ kmctr %r1, %r2, %r4
+ kmctr %r2, %r1, %r4
+ kmctr %r2, %r4, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: kmf %r1, %r2
+#CHECK: error: invalid register pair
+#CHECK: kmf %r2, %r1
+
+ kmf %r1, %r2
+ kmf %r2, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: kmo %r1, %r2
+#CHECK: error: invalid register pair
+#CHECK: kmo %r2, %r1
+
+ kmo %r1, %r2
+ kmo %r2, %r1
+
#CHECK: error: invalid operand
#CHECK: laa %r0, %r0, -524289
#CHECK: error: invalid operand
@@ -757,11 +784,6 @@
locr %r0,%r0,-1
locr %r0,%r0,16
-#CHECK: error: instruction requires: execution-hint
-#CHECK: niai 0, 0
-
- niai 0, 0
-
#CHECK: error: invalid register pair
#CHECK: lpd %r1, 0, 0
#CHECK: error: invalid use of indexed addressing
@@ -802,6 +824,11 @@
lpdg %r2, 0(%r1), -1(%r15)
lpdg %r2, 0(%r1), 4096(%r15)
+#CHECK: error: instruction requires: execution-hint
+#CHECK: niai 0, 0
+
+ niai 0, 0
+
#CHECK: error: instruction requires: transactional-execution
#CHECK: ntstg %r0, 524287(%r1,%r15)
@@ -933,14 +960,6 @@
stch %r0, 524288
#CHECK: error: invalid operand
-#CHECK: sthh %r0, -524289
-#CHECK: error: invalid operand
-#CHECK: sthh %r0, 524288
-
- sthh %r0, -524289
- sthh %r0, 524288
-
-#CHECK: error: invalid operand
#CHECK: stfh %r0, -524289
#CHECK: error: invalid operand
#CHECK: stfh %r0, 524288
@@ -949,6 +968,14 @@
stfh %r0, 524288
#CHECK: error: invalid operand
+#CHECK: sthh %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: sthh %r0, 524288
+
+ sthh %r0, -524289
+ sthh %r0, 524288
+
+#CHECK: error: invalid operand
#CHECK: stoc %r0,0,-1
#CHECK: error: invalid operand
#CHECK: stoc %r0,0,16
diff --git a/test/MC/SystemZ/insn-bad-zEC12.s b/test/MC/SystemZ/insn-bad-zEC12.s
index 53dbd638e716..4bc3be3292e4 100644
--- a/test/MC/SystemZ/insn-bad-zEC12.s
+++ b/test/MC/SystemZ/insn-bad-zEC12.s
@@ -63,31 +63,6 @@
bprp 0, 0, 0x1000000
#CHECK: error: invalid operand
-#CHECK: clt %r0, -1, 0
-#CHECK: error: invalid operand
-#CHECK: clt %r0, 16, 0
-#CHECK: error: invalid operand
-#CHECK: clt %r0, 12, -524289
-#CHECK: error: invalid operand
-#CHECK: clt %r0, 12, 524288
-#CHECK: error: invalid use of indexed addressing
-#CHECK: clt %r0, 12, 0(%r1,%r2)
-
- clt %r0, -1, 0
- clt %r0, 16, 0
- clt %r0, 12, -524289
- clt %r0, 12, 524288
- clt %r0, 12, 0(%r1,%r2)
-
-#CHECK: error: invalid instruction
-#CHECK: clto %r0, 0
-#CHECK: error: invalid instruction
-#CHECK: cltno %r0, 0
-
- clto %r0, 0
- cltno %r0, 0
-
-#CHECK: error: invalid operand
#CHECK: clgt %r0, -1, 0
#CHECK: error: invalid operand
#CHECK: clgt %r0, 16, 0
@@ -105,12 +80,37 @@
clgt %r0, 12, 0(%r1,%r2)
#CHECK: error: invalid instruction
-#CHECK: clgto %r0, 0
-#CHECK: error: invalid instruction
#CHECK: clgtno %r0, 0
+#CHECK: error: invalid instruction
+#CHECK: clgto %r0, 0
- clgto %r0, 0
clgtno %r0, 0
+ clgto %r0, 0
+
+#CHECK: error: invalid operand
+#CHECK: clt %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: clt %r0, 16, 0
+#CHECK: error: invalid operand
+#CHECK: clt %r0, 12, -524289
+#CHECK: error: invalid operand
+#CHECK: clt %r0, 12, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: clt %r0, 12, 0(%r1,%r2)
+
+ clt %r0, -1, 0
+ clt %r0, 16, 0
+ clt %r0, 12, -524289
+ clt %r0, 12, 524288
+ clt %r0, 12, 0(%r1,%r2)
+
+#CHECK: error: invalid instruction
+#CHECK: cltno %r0, 0
+#CHECK: error: invalid instruction
+#CHECK: clto %r0, 0
+
+ cltno %r0, 0
+ clto %r0, 0
#CHECK: error: invalid operand
#CHECK: lat %r0, -524289
@@ -120,6 +120,11 @@
lat %r0, -524289
lat %r0, 524288
+#CHECK: error: instruction requires: vector
+#CHECK: lcbb %r0, 0, 0
+
+ lcbb %r0, 0, 0
+
#CHECK: error: invalid operand
#CHECK: lfhat %r0, -524289
#CHECK: error: invalid operand
@@ -152,10 +157,15 @@
llgtat %r0, -524289
llgtat %r0, 524288
-#CHECK: error: instruction requires: vector
-#CHECK: lcbb %r0, 0, 0
+#CHECK: error: instruction requires: load-store-on-cond-2
+#CHECK: locghio %r11, 42
- lcbb %r0, 0, 0
+ locghio %r11, 42
+
+#CHECK: error: instruction requires: load-store-on-cond-2
+#CHECK: lochio %r11, 42
+
+ lochio %r11, 42
#CHECK: error: invalid operand
#CHECK: niai -1, 0
@@ -187,6 +197,11 @@
ppa %r0, %r0, -1
ppa %r0, %r0, 16
+#CHECK: error: instruction requires: message-security-assist-extension5
+#CHECK: ppno %r2, %r4
+
+ ppno %r2, %r4
+
#CHECK: error: invalid operand
#CHECK: risbgn %r0,%r0,0,0,-1
#CHECK: error: invalid operand
@@ -337,28 +352,28 @@
#CHECK: error: instruction requires: vector
#CHECK: vceqb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
+#CHECK: vceqbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
#CHECK: vceqf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
+#CHECK: vceqfs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
#CHECK: vceqg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vceqh %v0, %v0, %v0
+#CHECK: vceqgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vceqbs %v0, %v0, %v0
+#CHECK: vceqh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vceqhs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vceqfs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vceqgs %v0, %v0, %v0
vceqb %v0, %v0, %v0
+ vceqbs %v0, %v0, %v0
vceqf %v0, %v0, %v0
+ vceqfs %v0, %v0, %v0
vceqg %v0, %v0, %v0
+ vceqgs %v0, %v0, %v0
vceqh %v0, %v0, %v0
- vceqbs %v0, %v0, %v0
vceqhs %v0, %v0, %v0
- vceqfs %v0, %v0, %v0
- vceqgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vcgdb %v0, %v0, 0, 0
@@ -368,54 +383,54 @@
#CHECK: error: instruction requires: vector
#CHECK: vchb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
+#CHECK: vchbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
#CHECK: vchf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
+#CHECK: vchfs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
#CHECK: vchg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vchh %v0, %v0, %v0
+#CHECK: vchgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vchbs %v0, %v0, %v0
+#CHECK: vchh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vchhs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vchfs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vchgs %v0, %v0, %v0
vchb %v0, %v0, %v0
+ vchbs %v0, %v0, %v0
vchf %v0, %v0, %v0
+ vchfs %v0, %v0, %v0
vchg %v0, %v0, %v0
+ vchgs %v0, %v0, %v0
vchh %v0, %v0, %v0
- vchbs %v0, %v0, %v0
vchhs %v0, %v0, %v0
- vchfs %v0, %v0, %v0
- vchgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vchlb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
+#CHECK: vchlbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
#CHECK: vchlf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
+#CHECK: vchlfs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
#CHECK: vchlg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vchlh %v0, %v0, %v0
+#CHECK: vchlgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vchlbs %v0, %v0, %v0
+#CHECK: vchlh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vchlhs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vchlfs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vchlgs %v0, %v0, %v0
vchlb %v0, %v0, %v0
+ vchlbs %v0, %v0, %v0
vchlf %v0, %v0, %v0
+ vchlfs %v0, %v0, %v0
vchlg %v0, %v0, %v0
+ vchlgs %v0, %v0, %v0
vchlh %v0, %v0, %v0
- vchlbs %v0, %v0, %v0
vchlhs %v0, %v0, %v0
- vchlfs %v0, %v0, %v0
- vchlgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vcksm %v0, %v0, %v0
@@ -470,20 +485,6 @@
vech %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: verimb %v0, %v0, %v0, 0
-#CHECK: error: instruction requires: vector
-#CHECK: verimf %v0, %v0, %v0, 0
-#CHECK: error: instruction requires: vector
-#CHECK: verimg %v0, %v0, %v0, 0
-#CHECK: error: instruction requires: vector
-#CHECK: verimh %v0, %v0, %v0, 0
-
- verimb %v0, %v0, %v0, 0
- verimf %v0, %v0, %v0, 0
- verimg %v0, %v0, %v0, 0
- verimh %v0, %v0, %v0, 0
-
-#CHECK: error: instruction requires: vector
#CHECK: veclb %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: veclf %v0, %v0
@@ -498,18 +499,18 @@
veclh %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: verllvb %v0, %v0, %v0
+#CHECK: verimb %v0, %v0, %v0, 0
#CHECK: error: instruction requires: vector
-#CHECK: verllvf %v0, %v0, %v0
+#CHECK: verimf %v0, %v0, %v0, 0
#CHECK: error: instruction requires: vector
-#CHECK: verllvg %v0, %v0, %v0
+#CHECK: verimg %v0, %v0, %v0, 0
#CHECK: error: instruction requires: vector
-#CHECK: verllvh %v0, %v0, %v0
+#CHECK: verimh %v0, %v0, %v0, 0
- verllvb %v0, %v0, %v0
- verllvf %v0, %v0, %v0
- verllvg %v0, %v0, %v0
- verllvh %v0, %v0, %v0
+ verimb %v0, %v0, %v0, 0
+ verimf %v0, %v0, %v0, 0
+ verimg %v0, %v0, %v0, 0
+ verimh %v0, %v0, %v0, 0
#CHECK: error: instruction requires: vector
#CHECK: verllb %v0, %v0, 0
@@ -526,18 +527,18 @@
verllh %v0, %v0, 0
#CHECK: error: instruction requires: vector
-#CHECK: veslvb %v0, %v0, %v0
+#CHECK: verllvb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: veslvf %v0, %v0, %v0
+#CHECK: verllvf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: veslvg %v0, %v0, %v0
+#CHECK: verllvg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: veslvh %v0, %v0, %v0
+#CHECK: verllvh %v0, %v0, %v0
- veslvb %v0, %v0, %v0
- veslvf %v0, %v0, %v0
- veslvg %v0, %v0, %v0
- veslvh %v0, %v0, %v0
+ verllvb %v0, %v0, %v0
+ verllvf %v0, %v0, %v0
+ verllvg %v0, %v0, %v0
+ verllvh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: veslb %v0, %v0, 0
@@ -554,18 +555,18 @@
veslh %v0, %v0, 0
#CHECK: error: instruction requires: vector
-#CHECK: vesravb %v0, %v0, %v0
+#CHECK: veslvb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vesravf %v0, %v0, %v0
+#CHECK: veslvf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vesravg %v0, %v0, %v0
+#CHECK: veslvg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vesravh %v0, %v0, %v0
+#CHECK: veslvh %v0, %v0, %v0
- vesravb %v0, %v0, %v0
- vesravf %v0, %v0, %v0
- vesravg %v0, %v0, %v0
- vesravh %v0, %v0, %v0
+ veslvb %v0, %v0, %v0
+ veslvf %v0, %v0, %v0
+ veslvg %v0, %v0, %v0
+ veslvh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vesrab %v0, %v0, 0
@@ -582,18 +583,18 @@
vesrah %v0, %v0, 0
#CHECK: error: instruction requires: vector
-#CHECK: vesrlvb %v0, %v0, %v0
+#CHECK: vesravb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vesrlvf %v0, %v0, %v0
+#CHECK: vesravf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vesrlvg %v0, %v0, %v0
+#CHECK: vesravg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vesrlvh %v0, %v0, %v0
+#CHECK: vesravh %v0, %v0, %v0
- vesrlvb %v0, %v0, %v0
- vesrlvf %v0, %v0, %v0
- vesrlvg %v0, %v0, %v0
- vesrlvh %v0, %v0, %v0
+ vesravb %v0, %v0, %v0
+ vesravf %v0, %v0, %v0
+ vesravg %v0, %v0, %v0
+ vesravh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vesrlb %v0, %v0, 0
@@ -610,142 +611,156 @@
vesrlh %v0, %v0, 0
#CHECK: error: instruction requires: vector
-#CHECK: vfadb %v0, %v0, %v0
-
- vfadb %v0, %v0, %v0
-
+#CHECK: vesrlvb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfcedb %v0, %v0, %v0
-#CHECK: vfcedbs %v0, %v0, %v0
-
- vfcedb %v0, %v0, %v0
- vfcedbs %v0, %v0, %v0
-
+#CHECK: vesrlvf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfchdb %v0, %v0, %v0
-#CHECK: vfchdbs %v0, %v0, %v0
+#CHECK: vesrlvg %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vesrlvh %v0, %v0, %v0
- vfchdb %v0, %v0, %v0
- vfchdbs %v0, %v0, %v0
+ vesrlvb %v0, %v0, %v0
+ vesrlvf %v0, %v0, %v0
+ vesrlvg %v0, %v0, %v0
+ vesrlvh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfddb %v0, %v0, %v0
+#CHECK: vfadb %v0, %v0, %v0
- vfddb %v0, %v0, %v0
+ vfadb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfaeb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfaezb %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
#CHECK: vfaebs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfaezbs %v0, %v0, %v0
+#CHECK: vfaef %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfaeh %v0, %v0, %v0
+#CHECK: vfaefs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfaezh %v0, %v0, %v0
+#CHECK: vfaeh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfaehs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfaezhs %v0, %v0, %v0
+#CHECK: vfaezb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfaef %v0, %v0, %v0
+#CHECK: vfaezbs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfaezf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfaefs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
#CHECK: vfaezfs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vfaezh %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vfaezhs %v0, %v0, %v0
vfaeb %v0, %v0, %v0
- vfaezb %v0, %v0, %v0
vfaebs %v0, %v0, %v0
- vfaezbs %v0, %v0, %v0
+ vfaef %v0, %v0, %v0
+ vfaefs %v0, %v0, %v0
vfaeh %v0, %v0, %v0
- vfaezh %v0, %v0, %v0
vfaehs %v0, %v0, %v0
- vfaezhs %v0, %v0, %v0
- vfaef %v0, %v0, %v0
+ vfaezb %v0, %v0, %v0
+ vfaezbs %v0, %v0, %v0
vfaezf %v0, %v0, %v0
- vfaefs %v0, %v0, %v0
vfaezfs %v0, %v0, %v0
+ vfaezh %v0, %v0, %v0
+ vfaezhs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeeb %v0, %v0, %v0
+#CHECK: vfcedb %v0, %v0, %v0
+#CHECK: vfcedbs %v0, %v0, %v0
+
+ vfcedb %v0, %v0, %v0
+ vfcedbs %v0, %v0, %v0
+
#CHECK: error: instruction requires: vector
-#CHECK: vfeezb %v0, %v0, %v0
+#CHECK: vfchdb %v0, %v0, %v0
+#CHECK: vfchdbs %v0, %v0, %v0
+
+ vfchdb %v0, %v0, %v0
+ vfchdbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector
+#CHECK: vfddb %v0, %v0, %v0
+
+ vfddb %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector
+#CHECK: vfeeb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfeebs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeezbs %v0, %v0, %v0
+#CHECK: vfeef %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeeh %v0, %v0, %v0
+#CHECK: vfeefs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeezh %v0, %v0, %v0
+#CHECK: vfeeh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfeehs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeezhs %v0, %v0, %v0
+#CHECK: vfeezb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeef %v0, %v0, %v0
+#CHECK: vfeezbs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfeezf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeefs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
#CHECK: vfeezfs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vfeezh %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vfeezhs %v0, %v0, %v0
vfeeb %v0, %v0, %v0
- vfeezb %v0, %v0, %v0
vfeebs %v0, %v0, %v0
- vfeezbs %v0, %v0, %v0
+ vfeef %v0, %v0, %v0
+ vfeefs %v0, %v0, %v0
vfeeh %v0, %v0, %v0
- vfeezh %v0, %v0, %v0
vfeehs %v0, %v0, %v0
- vfeezhs %v0, %v0, %v0
- vfeef %v0, %v0, %v0
+ vfeezb %v0, %v0, %v0
+ vfeezbs %v0, %v0, %v0
vfeezf %v0, %v0, %v0
- vfeefs %v0, %v0, %v0
vfeezfs %v0, %v0, %v0
+ vfeezh %v0, %v0, %v0
+ vfeezhs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfeneb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfenezb %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
#CHECK: vfenebs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfenezbs %v0, %v0, %v0
+#CHECK: vfenef %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfeneh %v0, %v0, %v0
+#CHECK: vfenefs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfenezh %v0, %v0, %v0
+#CHECK: vfeneh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfenehs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfenezhs %v0, %v0, %v0
+#CHECK: vfenezb %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfenef %v0, %v0, %v0
+#CHECK: vfenezbs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfenezf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vfenefs %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
#CHECK: vfenezfs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vfenezh %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vfenezhs %v0, %v0, %v0
vfeneb %v0, %v0, %v0
- vfenezb %v0, %v0, %v0
vfenebs %v0, %v0, %v0
- vfenezbs %v0, %v0, %v0
+ vfenef %v0, %v0, %v0
+ vfenefs %v0, %v0, %v0
vfeneh %v0, %v0, %v0
- vfenezh %v0, %v0, %v0
vfenehs %v0, %v0, %v0
- vfenezhs %v0, %v0, %v0
- vfenef %v0, %v0, %v0
+ vfenezb %v0, %v0, %v0
+ vfenezbs %v0, %v0, %v0
vfenezf %v0, %v0, %v0
- vfenefs %v0, %v0, %v0
vfenezfs %v0, %v0, %v0
+ vfenezh %v0, %v0, %v0
+ vfenezhs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vfidb %v0, %v0, 0, 0
@@ -753,26 +768,6 @@
vfidb %v0, %v0, 0, 0
#CHECK: error: instruction requires: vector
-#CHECK: vistrb %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vistrbs %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vistrh %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vistrhs %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vistrf %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vistrfs %v0, %v0
-
- vistrb %v0, %v0
- vistrbs %v0, %v0
- vistrh %v0, %v0
- vistrhs %v0, %v0
- vistrf %v0, %v0
- vistrfs %v0, %v0
-
-#CHECK: error: instruction requires: vector
#CHECK: vflcdb %v0, %v0
vflcdb %v0, %v0
@@ -873,6 +868,26 @@
vgmh %v0, 0, 0
#CHECK: error: instruction requires: vector
+#CHECK: vistrb %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vistrbs %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vistrf %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vistrfs %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vistrh %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vistrhs %v0, %v0
+
+ vistrb %v0, %v0
+ vistrbs %v0, %v0
+ vistrf %v0, %v0
+ vistrfs %v0, %v0
+ vistrh %v0, %v0
+ vistrhs %v0, %v0
+
+#CHECK: error: instruction requires: vector
#CHECK: vl %v0, 0
vl %v0, 0
@@ -1309,44 +1324,44 @@
vpkh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpksf %v0, %v0, %v0
+#CHECK: vpklsf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpksg %v0, %v0, %v0
+#CHECK: vpklsfs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpksh %v0, %v0, %v0
+#CHECK: vpklsg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpksfs %v0, %v0, %v0
+#CHECK: vpklsgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpksgs %v0, %v0, %v0
+#CHECK: vpklsh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpkshs %v0, %v0, %v0
+#CHECK: vpklshs %v0, %v0, %v0
- vpksf %v0, %v0, %v0
- vpksg %v0, %v0, %v0
- vpksh %v0, %v0, %v0
- vpksfs %v0, %v0, %v0
- vpksgs %v0, %v0, %v0
- vpkshs %v0, %v0, %v0
+ vpklsf %v0, %v0, %v0
+ vpklsfs %v0, %v0, %v0
+ vpklsg %v0, %v0, %v0
+ vpklsgs %v0, %v0, %v0
+ vpklsh %v0, %v0, %v0
+ vpklshs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpklsf %v0, %v0, %v0
+#CHECK: vpksf %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpklsg %v0, %v0, %v0
+#CHECK: vpksfs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpklsh %v0, %v0, %v0
+#CHECK: vpksg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpklsfs %v0, %v0, %v0
+#CHECK: vpksgs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpklsgs %v0, %v0, %v0
+#CHECK: vpksh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vpklshs %v0, %v0, %v0
+#CHECK: vpkshs %v0, %v0, %v0
- vpklsf %v0, %v0, %v0
- vpklsg %v0, %v0, %v0
- vpklsh %v0, %v0, %v0
- vpklsfs %v0, %v0, %v0
- vpklsgs %v0, %v0, %v0
- vpklshs %v0, %v0, %v0
+ vpksf %v0, %v0, %v0
+ vpksfs %v0, %v0, %v0
+ vpksg %v0, %v0, %v0
+ vpksgs %v0, %v0, %v0
+ vpksh %v0, %v0, %v0
+ vpkshs %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vpopct %v0, %v0, 0
@@ -1502,40 +1517,48 @@
#CHECK: error: instruction requires: vector
#CHECK: vstrcb %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vstrczb %v0, %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
#CHECK: vstrcbs %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vstrczbs %v0, %v0, %v0, %v0
+#CHECK: vstrcf %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vstrch %v0, %v0, %v0, %v0
+#CHECK: vstrcfs %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vstrczh %v0, %v0, %v0, %v0
+#CHECK: vstrch %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vstrchs %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vstrczhs %v0, %v0, %v0, %v0
+#CHECK: vstrczb %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vstrcf %v0, %v0, %v0, %v0
+#CHECK: vstrczbs %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vstrczf %v0, %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vstrcfs %v0, %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
#CHECK: vstrczfs %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vstrczh %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vstrczhs %v0, %v0, %v0, %v0
vstrcb %v0, %v0, %v0, %v0
- vstrczb %v0, %v0, %v0, %v0
vstrcbs %v0, %v0, %v0, %v0
- vstrczbs %v0, %v0, %v0, %v0
+ vstrcf %v0, %v0, %v0, %v0
+ vstrcfs %v0, %v0, %v0, %v0
vstrch %v0, %v0, %v0, %v0
- vstrczh %v0, %v0, %v0, %v0
vstrchs %v0, %v0, %v0, %v0
- vstrczhs %v0, %v0, %v0, %v0
- vstrcf %v0, %v0, %v0, %v0
+ vstrczb %v0, %v0, %v0, %v0
+ vstrczbs %v0, %v0, %v0, %v0
vstrczf %v0, %v0, %v0, %v0
- vstrcfs %v0, %v0, %v0, %v0
vstrczfs %v0, %v0, %v0, %v0
+ vstrczh %v0, %v0, %v0, %v0
+ vstrczhs %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector
+#CHECK: vsumb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vsumh %v0, %v0, %v0
+
+ vsumb %v0, %v0, %v0
+ vsumh %v0, %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vsumgh %v0, %v0, %v0
@@ -1554,14 +1577,6 @@
vsumqg %v0, %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vsumb %v0, %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vsumh %v0, %v0, %v0
-
- vsumb %v0, %v0, %v0
- vsumh %v0, %v0, %v0
-
-#CHECK: error: instruction requires: vector
#CHECK: vtm %v0, %v0
vtm %v0, %v0
@@ -1578,17 +1593,6 @@
vuphh %v0, %v0
#CHECK: error: instruction requires: vector
-#CHECK: vuplhb %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vuplhf %v0, %v0
-#CHECK: error: instruction requires: vector
-#CHECK: vuplhh %v0, %v0
-
- vuplhb %v0, %v0
- vuplhf %v0, %v0
- vuplhh %v0, %v0
-
-#CHECK: error: instruction requires: vector
#CHECK: vuplb %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vuplf %v0, %v0
@@ -1600,6 +1604,17 @@
vuplhw %v0, %v0
#CHECK: error: instruction requires: vector
+#CHECK: vuplhb %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vuplhf %v0, %v0
+#CHECK: error: instruction requires: vector
+#CHECK: vuplhh %v0, %v0
+
+ vuplhb %v0, %v0
+ vuplhf %v0, %v0
+ vuplhh %v0, %v0
+
+#CHECK: error: instruction requires: vector
#CHECK: vupllb %v0, %v0
#CHECK: error: instruction requires: vector
#CHECK: vupllf %v0, %v0
@@ -1741,13 +1756,3 @@
wledb %v0, %v0, 0, 0
-#CHECK: error: instruction requires: load-store-on-cond-2
-#CHECK: lochio %r11, 42
-
- lochio %r11, 42
-
-#CHECK: error: instruction requires: load-store-on-cond-2
-#CHECK: locghio %r11, 42
-
- locghio %r11, 42
-
diff --git a/test/MC/SystemZ/insn-bad.s b/test/MC/SystemZ/insn-bad.s
index 018070a74dfc..b96c661ae3da 100644
--- a/test/MC/SystemZ/insn-bad.s
+++ b/test/MC/SystemZ/insn-bad.s
@@ -167,16 +167,6 @@
alfi %r0, -1
alfi %r0, (1 << 32)
-#CHECK: error: instruction requires: distinct-ops
-#CHECK: alghsik %r1, %r2, 3
-
- alghsik %r1, %r2, 3
-
-#CHECK: error: instruction requires: distinct-ops
-#CHECK: alhsik %r1, %r2, 3
-
- alhsik %r1, %r2, 3
-
#CHECK: error: invalid operand
#CHECK: alg %r0, -524289
#CHECK: error: invalid operand
@@ -202,16 +192,60 @@
algfi %r0, (1 << 32)
#CHECK: error: instruction requires: distinct-ops
+#CHECK: alghsik %r1, %r2, 3
+
+ alghsik %r1, %r2, 3
+
+#CHECK: error: instruction requires: distinct-ops
#CHECK: algrk %r2,%r3,%r4
algrk %r2,%r3,%r4
#CHECK: error: instruction requires: distinct-ops
+#CHECK: alhsik %r1, %r2, 3
+
+ alhsik %r1, %r2, 3
+
+#CHECK: error: instruction requires: distinct-ops
#CHECK: alrk %r2,%r3,%r4
alrk %r2,%r3,%r4
#CHECK: error: invalid operand
+#CHECK: algsi -524289, 0
+#CHECK: error: invalid operand
+#CHECK: algsi 524288, 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: algsi 0(%r1,%r2), 0
+#CHECK: error: invalid operand
+#CHECK: algsi 0, -129
+#CHECK: error: invalid operand
+#CHECK: algsi 0, 128
+
+ algsi -524289, 0
+ algsi 524288, 0
+ algsi 0(%r1,%r2), 0
+ algsi 0, -129
+ algsi 0, 128
+
+#CHECK: error: invalid operand
+#CHECK: alsi -524289, 0
+#CHECK: error: invalid operand
+#CHECK: alsi 524288, 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: alsi 0(%r1,%r2), 0
+#CHECK: error: invalid operand
+#CHECK: alsi 0, -129
+#CHECK: error: invalid operand
+#CHECK: alsi 0, 128
+
+ alsi -524289, 0
+ alsi 524288, 0
+ alsi 0(%r1,%r2), 0
+ alsi 0, -129
+ alsi 0, 128
+
+#CHECK: error: invalid operand
#CHECK: aly %r0, -524289
#CHECK: error: invalid operand
#CHECK: aly %r0, 524288
@@ -219,6 +253,59 @@
aly %r0, -524289
aly %r0, 524288
+#CHECK: error: missing length in address
+#CHECK: ap 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: ap 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: ap 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: ap 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: ap 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: ap 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: ap 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: ap 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: ap -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: ap 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: ap 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: ap 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: ap 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: ap 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: ap 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: ap 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: ap 0(-), 0(1)
+
+ ap 0, 0(1)
+ ap 0(1), 0
+ ap 0(%r1), 0(1,%r1)
+ ap 0(1,%r1), 0(%r1)
+ ap 0(0,%r1), 0(1,%r1)
+ ap 0(1,%r1), 0(0,%r1)
+ ap 0(17,%r1), 0(1,%r1)
+ ap 0(1,%r1), 0(17,%r1)
+ ap -1(1,%r1), 0(1,%r1)
+ ap 4096(1,%r1), 0(1,%r1)
+ ap 0(1,%r1), -1(1,%r1)
+ ap 0(1,%r1), 4096(1,%r1)
+ ap 0(1,%r0), 0(1,%r1)
+ ap 0(1,%r1), 0(1,%r0)
+ ap 0(%r1,%r2), 0(1,%r1)
+ ap 0(1,%r2), 0(%r1,%r2)
+ ap 0(-), 0(1)
+
#CHECK: error: instruction requires: distinct-ops
#CHECK: ark %r2,%r3,%r4
@@ -296,6 +383,22 @@
bcr -1, %r1
bcr 16, %r1
+#CHECK: error: invalid operand
+#CHECK: bct %r0, -1
+#CHECK: error: invalid operand
+#CHECK: bct %r0, 4096
+
+ bct %r0, -1
+ bct %r0, 4096
+
+#CHECK: error: invalid operand
+#CHECK: bctg %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: bctg %r0, 524288
+
+ bctg %r0, -524289
+ bctg %r0, 524288
+
#CHECK: error: offset out of range
#CHECK: bras %r0, -0x100002
#CHECK: error: offset out of range
@@ -374,22 +477,6 @@
brcl -1, bar
brcl 16, bar
-#CHECK: error: invalid operand
-#CHECK: bct %r0, -1
-#CHECK: error: invalid operand
-#CHECK: bct %r0, 4096
-
- bct %r0, -1
- bct %r0, 4096
-
-#CHECK: error: invalid operand
-#CHECK: bctg %r0, -524289
-#CHECK: error: invalid operand
-#CHECK: bctg %r0, 524288
-
- bctg %r0, -524289
- bctg %r0, 524288
-
#CHECK: error: offset out of range
#CHECK: brct %r0, -0x100002
#CHECK: error: offset out of range
@@ -423,25 +510,6 @@
brcth %r0, 0
-#CHECK: error: invalid operand
-#CHECK: bxh %r0, %r0, 4096
-#CHECK: error: invalid use of indexed addressing
-#CHECK: bxh %r0, %r0, 0(%r1,%r2)
-
- bxh %r0, %r0, 4096
- bxh %r0, %r0, 0(%r1,%r2)
-
-#CHECK: error: invalid operand
-#CHECK: bxhg %r0, %r0, -524289
-#CHECK: error: invalid operand
-#CHECK: bxhg %r0, %r0, 524288
-#CHECK: error: invalid use of indexed addressing
-#CHECK: bxhg %r0, %r0, 0(%r1,%r2)
-
- bxhg %r0, %r0, -524289
- bxhg %r0, %r0, 524288
- bxhg %r0, %r0, 0(%r1,%r2)
-
#CHECK: error: offset out of range
#CHECK: brxh %r0, %r2, -0x100002
#CHECK: error: offset out of range
@@ -470,25 +538,6 @@
brxhg %r0, %r2, 1
brxhg %r0, %r2, 0x10000
-#CHECK: error: invalid operand
-#CHECK: bxle %r0, %r0, 4096
-#CHECK: error: invalid use of indexed addressing
-#CHECK: bxle %r0, %r0, 0(%r1,%r2)
-
- bxle %r0, %r0, 4096
- bxle %r0, %r0, 0(%r1,%r2)
-
-#CHECK: error: invalid operand
-#CHECK: bxhg %r0, %r0, -524289
-#CHECK: error: invalid operand
-#CHECK: bxhg %r0, %r0, 524288
-#CHECK: error: invalid use of indexed addressing
-#CHECK: bxhg %r0, %r0, 0(%r1,%r2)
-
- bxhg %r0, %r0, -524289
- bxhg %r0, %r0, 524288
- bxhg %r0, %r0, 0(%r1,%r2)
-
#CHECK: error: offset out of range
#CHECK: brxle %r0, %r2, -0x100002
#CHECK: error: offset out of range
@@ -518,6 +567,44 @@
brxlg %r0, %r2, 0x10000
#CHECK: error: invalid operand
+#CHECK: bxh %r0, %r0, 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: bxh %r0, %r0, 0(%r1,%r2)
+
+ bxh %r0, %r0, 4096
+ bxh %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: bxhg %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: bxhg %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: bxhg %r0, %r0, 0(%r1,%r2)
+
+ bxhg %r0, %r0, -524289
+ bxhg %r0, %r0, 524288
+ bxhg %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: bxle %r0, %r0, 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: bxle %r0, %r0, 0(%r1,%r2)
+
+ bxle %r0, %r0, 4096
+ bxle %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: bxleg %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: bxleg %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: bxleg %r0, %r0, 0(%r1,%r2)
+
+ bxleg %r0, %r0, -524289
+ bxleg %r0, %r0, 524288
+ bxleg %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: c %r0, -1
#CHECK: error: invalid operand
#CHECK: c %r0, 4096
@@ -633,6 +720,17 @@
celgbr %f0, 0, %r0, 0
#CHECK: error: invalid operand
+#CHECK: cfc -1
+#CHECK: error: invalid operand
+#CHECK: cfc 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: cfc 0(%r1,%r2)
+
+ cfc -1
+ cfc 4096
+ cfc 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: cfdbr %r0, -1, %f0
#CHECK: error: invalid operand
#CHECK: cfdbr %r0, 16, %f0
@@ -819,26 +917,26 @@
cgij %r0, 0, 0, 0x10000
#CHECK: error: invalid instruction
-#CHECK: cgijo %r0, 0, 0, 0
-#CHECK: error: invalid instruction
#CHECK: cgijno %r0, 0, 0, 0
+#CHECK: error: invalid instruction
+#CHECK: cgijo %r0, 0, 0, 0
- cgijo %r0, 0, 0, 0
cgijno %r0, 0, 0, 0
+ cgijo %r0, 0, 0, 0
#CHECK: error: invalid operand
#CHECK: cgit %r0, -32769
#CHECK: error: invalid operand
#CHECK: cgit %r0, 32768
#CHECK: error: invalid instruction
-#CHECK: cgito %r0, 0
-#CHECK: error: invalid instruction
#CHECK: cgitno %r0, 0
+#CHECK: error: invalid instruction
+#CHECK: cgito %r0, 0
cgit %r0, -32769
cgit %r0, 32768
- cgito %r0, 0
cgitno %r0, 0
+ cgito %r0, 0
#CHECK: error: offset out of range
#CHECK: cgrj %r0, %r0, 0, -0x100002
@@ -855,12 +953,12 @@
cgrj %r0, %r0, 0, 0x10000
#CHECK: error: invalid instruction
-#CHECK: cgrjo %r0, %r0, 0, 0
-#CHECK: error: invalid instruction
#CHECK: cgrjno %r0, %r0, 0, 0
+#CHECK: error: invalid instruction
+#CHECK: cgrjo %r0, %r0, 0, 0
- cgrjo %r0, %r0, 0, 0
cgrjno %r0, %r0, 0, 0
+ cgrjo %r0, %r0, 0, 0
#CHECK: error: offset out of range
#CHECK: cgrl %r0, -0x1000000002
@@ -877,12 +975,12 @@
cgrl %r0, 0x100000000
#CHECK: error: invalid instruction
-#CHECK: cgrto %r0, %r0
-#CHECK: error: invalid instruction
#CHECK: cgrtno %r0, %r0
+#CHECK: error: invalid instruction
+#CHECK: cgrto %r0, %r0
- cgrto %r0, %r0
cgrtno %r0, %r0
+ cgrto %r0, %r0
#CHECK: error: invalid operand
#CHECK: cgxbr %r0, -1, %f0
@@ -1008,26 +1106,31 @@
cij %r0, 0, 0, 0x10000
#CHECK: error: invalid instruction
-#CHECK: cijo %r0, 0, 0, 0
-#CHECK: error: invalid instruction
#CHECK: cijno %r0, 0, 0, 0
+#CHECK: error: invalid instruction
+#CHECK: cijo %r0, 0, 0, 0
- cijo %r0, 0, 0, 0
cijno %r0, 0, 0, 0
+ cijo %r0, 0, 0, 0
#CHECK: error: invalid operand
#CHECK: cit %r0, -32769
#CHECK: error: invalid operand
#CHECK: cit %r0, 32768
#CHECK: error: invalid instruction
-#CHECK: cito %r0, 0
-#CHECK: error: invalid instruction
#CHECK: citno %r0, 0
+#CHECK: error: invalid instruction
+#CHECK: cito %r0, 0
cit %r0, -32769
cit %r0, 32768
- cito %r0, 0
citno %r0, 0
+ cito %r0, 0
+
+#CHECK: error: invalid register pair
+#CHECK: cksm %r0, %r1
+
+ cksm %r0, %r1
#CHECK: error: invalid operand
#CHECK: cl %r0, -1
@@ -1081,10 +1184,41 @@
clc 0(1,%r2), 0(%r1,%r2)
clc 0(-), 0
-#CHECK: error: instruction requires: high-word
-#CHECK: clhf %r0, 0
+#CHECK: error: invalid register pair
+#CHECK: clcl %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: clcl %r0, %r1
- clhf %r0, 0
+ clcl %r1, %r0
+ clcl %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: clcle %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: clcle %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: clcle %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: clcle %r0, %r0, 4096
+
+ clcle %r1, %r0, 0
+ clcle %r0, %r1, 0
+ clcle %r0, %r0, -1
+ clcle %r0, %r0, 4096
+
+#CHECK: error: invalid register pair
+#CHECK: clclu %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: clclu %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: clclu %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: clclu %r0, %r0, 524288
+
+ clclu %r1, %r0, 0
+ clclu %r0, %r1, 0
+ clclu %r0, %r0, -524289
+ clclu %r0, %r0, 524288
#CHECK: error: instruction requires: fp-extension
#CHECK: clfdbr %r0, 0, %f0, 0
@@ -1126,14 +1260,14 @@
#CHECK: error: invalid operand
#CHECK: clfit %r0, 65536
#CHECK: error: invalid instruction
-#CHECK: clfito %r0, 0
-#CHECK: error: invalid instruction
#CHECK: clfitno %r0, 0
+#CHECK: error: invalid instruction
+#CHECK: clfito %r0, 0
clfit %r0, -1
clfit %r0, 65536
- clfito %r0, 0
clfitno %r0, 0
+ clfito %r0, 0
#CHECK: error: instruction requires: fp-extension
#CHECK: clfxbr %r0, 0, %f0, 0
@@ -1148,20 +1282,6 @@
clg %r0, -524289
clg %r0, 524288
-#CHECK: error: invalid operand
-#CHECK: clgit %r0, -1
-#CHECK: error: invalid operand
-#CHECK: clgit %r0, 65536
-#CHECK: error: invalid instruction
-#CHECK: clgito %r0, 0
-#CHECK: error: invalid instruction
-#CHECK: clgitno %r0, 0
-
- clgit %r0, -1
- clgit %r0, 65536
- clgito %r0, 0
- clgitno %r0, 0
-
#CHECK: error: instruction requires: fp-extension
#CHECK: clgdbr %r0, 0, %f0, 0
@@ -1256,12 +1376,26 @@
clgij %r0, 0, 0, 0x10000
#CHECK: error: invalid instruction
-#CHECK: clgijo %r0, 0, 0, 0
-#CHECK: error: invalid instruction
#CHECK: clgijno %r0, 0, 0, 0
+#CHECK: error: invalid instruction
+#CHECK: clgijo %r0, 0, 0, 0
- clgijo %r0, 0, 0, 0
clgijno %r0, 0, 0, 0
+ clgijo %r0, 0, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: clgit %r0, -1
+#CHECK: error: invalid operand
+#CHECK: clgit %r0, 65536
+#CHECK: error: invalid instruction
+#CHECK: clgitno %r0, 0
+#CHECK: error: invalid instruction
+#CHECK: clgito %r0, 0
+
+ clgit %r0, -1
+ clgit %r0, 65536
+ clgitno %r0, 0
+ clgito %r0, 0
#CHECK: error: offset out of range
#CHECK: clgrj %r0, %r0, 0, -0x100002
@@ -1292,18 +1426,23 @@
clgrl %r0, 0x100000000
#CHECK: error: invalid instruction
-#CHECK: clgrto %r0, %r0
-#CHECK: error: invalid instruction
#CHECK: clgrtno %r0, %r0
+#CHECK: error: invalid instruction
+#CHECK: clgrto %r0, %r0
- clgrto %r0, %r0
clgrtno %r0, %r0
+ clgrto %r0, %r0
#CHECK: error: instruction requires: fp-extension
#CHECK: clgxbr %r0, 0, %f0, 0
clgxbr %r0, 0, %f0, 0
+#CHECK: error: instruction requires: high-word
+#CHECK: clhf %r0, 0
+
+ clhf %r0, 0
+
#CHECK: error: invalid operand
#CHECK: clhhsi -1, 0
#CHECK: error: invalid operand
@@ -1380,12 +1519,12 @@
clij %r0, 0, 0, 0x10000
#CHECK: error: invalid instruction
-#CHECK: clijo %r0, 0, 0, 0
-#CHECK: error: invalid instruction
#CHECK: clijno %r0, 0, 0, 0
+#CHECK: error: invalid instruction
+#CHECK: clijo %r0, 0, 0, 0
- clijo %r0, 0, 0, 0
clijno %r0, 0, 0, 0
+ clijo %r0, 0, 0, 0
#CHECK: error: invalid operand
#CHECK: cliy -524289, 0
@@ -1404,6 +1543,48 @@
cliy 0, -1
cliy 0, 256
+#CHECK: error: invalid operand
+#CHECK: clm %r0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: clm %r0, 0, 4096
+#CHECK: error: invalid operand
+#CHECK: clm %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: clm %r0, 16, 0
+
+ clm %r0, 0, -1
+ clm %r0, 0, 4096
+ clm %r0, -1, 0
+ clm %r0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: clmh %r0, 0, -524289
+#CHECK: error: invalid operand
+#CHECK: clmh %r0, 0, 524288
+#CHECK: error: invalid operand
+#CHECK: clmh %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: clmh %r0, 16, 0
+
+ clmh %r0, 0, -524289
+ clmh %r0, 0, 524288
+ clmh %r0, -1, 0
+ clmh %r0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: clmy %r0, 0, -524289
+#CHECK: error: invalid operand
+#CHECK: clmy %r0, 0, 524288
+#CHECK: error: invalid operand
+#CHECK: clmy %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: clmy %r0, 16, 0
+
+ clmy %r0, 0, -524289
+ clmy %r0, 0, 524288
+ clmy %r0, -1, 0
+ clmy %r0, 16, 0
+
#CHECK: error: offset out of range
#CHECK: clrj %r0, %r0, 0, -0x100002
#CHECK: error: offset out of range
@@ -1419,12 +1600,12 @@
clrj %r0, %r0, 0, 0x10000
#CHECK: error: invalid instruction
-#CHECK: clrjo %r0, %r0, 0, 0
-#CHECK: error: invalid instruction
#CHECK: clrjno %r0, %r0, 0, 0
+#CHECK: error: invalid instruction
+#CHECK: clrjo %r0, %r0, 0, 0
- clrjo %r0, %r0, 0, 0
clrjno %r0, %r0, 0, 0
+ clrjo %r0, %r0, 0, 0
#CHECK: error: offset out of range
#CHECK: clrl %r0, -0x1000000002
@@ -1441,12 +1622,12 @@
clrl %r0, 0x100000000
#CHECK: error: invalid instruction
-#CHECK: clrto %r0, %r0
-#CHECK: error: invalid instruction
#CHECK: clrtno %r0, %r0
+#CHECK: error: invalid instruction
+#CHECK: clrto %r0, %r0
- clrto %r0, %r0
clrtno %r0, %r0
+ clrto %r0, %r0
#CHECK: error: invalid operand
#CHECK: cly %r0, -524289
@@ -1456,6 +1637,67 @@
cly %r0, -524289
cly %r0, 524288
+#CHECK: error: invalid register pair
+#CHECK: cmpsc %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cmpsc %r0, %r1
+
+ cmpsc %r1, %r0
+ cmpsc %r0, %r1
+
+#CHECK: error: missing length in address
+#CHECK: cp 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: cp 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: cp 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: cp 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: cp 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: cp 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: cp 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: cp 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: cp -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: cp 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: cp 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: cp 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: cp 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: cp 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: cp 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: cp 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: cp 0(-), 0(1)
+
+ cp 0, 0(1)
+ cp 0(1), 0
+ cp 0(%r1), 0(1,%r1)
+ cp 0(1,%r1), 0(%r1)
+ cp 0(0,%r1), 0(1,%r1)
+ cp 0(1,%r1), 0(0,%r1)
+ cp 0(17,%r1), 0(1,%r1)
+ cp 0(1,%r1), 0(17,%r1)
+ cp -1(1,%r1), 0(1,%r1)
+ cp 4096(1,%r1), 0(1,%r1)
+ cp 0(1,%r1), -1(1,%r1)
+ cp 0(1,%r1), 4096(1,%r1)
+ cp 0(1,%r0), 0(1,%r1)
+ cp 0(1,%r1), 0(1,%r0)
+ cp 0(%r1,%r2), 0(1,%r1)
+ cp 0(1,%r2), 0(%r1,%r2)
+ cp 0(-), 0(1)
+
#CHECK: error: offset out of range
#CHECK: crj %r0, %r0, 0, -0x100002
#CHECK: error: offset out of range
@@ -1471,12 +1713,12 @@
crj %r0, %r0, 0, 0x10000
#CHECK: error: invalid instruction
-#CHECK: crjo %r0, %r0, 0, 0
-#CHECK: error: invalid instruction
#CHECK: crjno %r0, %r0, 0, 0
+#CHECK: error: invalid instruction
+#CHECK: crjo %r0, %r0, 0, 0
- crjo %r0, %r0, 0, 0
crjno %r0, %r0, 0, 0
+ crjo %r0, %r0, 0, 0
#CHECK: error: offset out of range
#CHECK: crl %r0, -0x1000000002
@@ -1493,12 +1735,12 @@
crl %r0, 0x100000000
#CHECK: error: invalid instruction
-#CHECK: crto %r0, %r0
-#CHECK: error: invalid instruction
#CHECK: crtno %r0, %r0
+#CHECK: error: invalid instruction
+#CHECK: crto %r0, %r0
- crto %r0, %r0
crtno %r0, %r0
+ crto %r0, %r0
#CHECK: error: invalid operand
#CHECK: cs %r0, %r0, -1
@@ -1522,17 +1764,6 @@
csg %r0, %r0, 524288
csg %r0, %r0, 0(%r1,%r2)
-#CHECK: error: invalid operand
-#CHECK: csy %r0, %r0, -524289
-#CHECK: error: invalid operand
-#CHECK: csy %r0, %r0, 524288
-#CHECK: error: invalid use of indexed addressing
-#CHECK: csy %r0, %r0, 0(%r1,%r2)
-
- csy %r0, %r0, -524289
- csy %r0, %r0, 524288
- csy %r0, %r0, 0(%r1,%r2)
-
#CHECK: error: invalid use of indexed addressing
#CHECK: csst 160(%r1,%r15), 160(%r15), %r2
#CHECK: error: invalid operand
@@ -1550,6 +1781,173 @@
csst 0(%r1), -1(%r15), %r2
csst 0(%r1), 4096(%r15), %r2
+#CHECK: error: invalid operand
+#CHECK: csy %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: csy %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: csy %r0, %r0, 0(%r1,%r2)
+
+ csy %r0, %r0, -524289
+ csy %r0, %r0, 524288
+ csy %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid register pair
+#CHECK: cu12 %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cu12 %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: cu12 %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: cu12 %r2, %r4, 16
+
+ cu12 %r1, %r0
+ cu12 %r0, %r1
+ cu12 %r2, %r4, -1
+ cu12 %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: cu14 %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cu14 %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: cu14 %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: cu14 %r2, %r4, 16
+
+ cu14 %r1, %r0
+ cu14 %r0, %r1
+ cu14 %r2, %r4, -1
+ cu14 %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: cu21 %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cu21 %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: cu21 %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: cu21 %r2, %r4, 16
+
+ cu21 %r1, %r0
+ cu21 %r0, %r1
+ cu21 %r2, %r4, -1
+ cu21 %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: cu24 %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cu24 %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: cu24 %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: cu24 %r2, %r4, 16
+
+ cu24 %r1, %r0
+ cu24 %r0, %r1
+ cu24 %r2, %r4, -1
+ cu24 %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: cu41 %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cu41 %r0, %r1
+
+ cu41 %r1, %r0
+ cu41 %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: cu42 %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cu42 %r0, %r1
+
+ cu42 %r1, %r0
+ cu42 %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: cuse %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cuse %r0, %r1
+
+ cuse %r1, %r0
+ cuse %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: cutfu %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cutfu %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: cutfu %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: cutfu %r2, %r4, 16
+
+ cutfu %r1, %r0
+ cutfu %r0, %r1
+ cutfu %r2, %r4, -1
+ cutfu %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: cuutf %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: cuutf %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: cuutf %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: cuutf %r2, %r4, 16
+
+ cuutf %r1, %r0
+ cuutf %r0, %r1
+ cuutf %r2, %r4, -1
+ cuutf %r2, %r4, 16
+
+#CHECK: error: invalid operand
+#CHECK: cvb %r0, -1
+#CHECK: error: invalid operand
+#CHECK: cvb %r0, 4096
+
+ cvb %r0, -1
+ cvb %r0, 4096
+
+#CHECK: error: invalid operand
+#CHECK: cvbg %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: cvbg %r0, 524288
+
+ cvbg %r0, -524289
+ cvbg %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: cvby %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: cvby %r0, 524288
+
+ cvby %r0, -524289
+ cvby %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: cvd %r0, -1
+#CHECK: error: invalid operand
+#CHECK: cvd %r0, 4096
+
+ cvd %r0, -1
+ cvd %r0, 4096
+
+#CHECK: error: invalid operand
+#CHECK: cvdg %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: cvdg %r0, 524288
+
+ cvdg %r0, -524289
+ cvdg %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: cvdy %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: cvdy %r0, 524288
+
+ cvdy %r0, -524289
+ cvdy %r0, 524288
+
#CHECK: error: invalid register pair
#CHECK: cxbr %f0, %f2
#CHECK: error: invalid register pair
@@ -1597,6 +1995,17 @@
cy %r0, 524288
#CHECK: error: invalid operand
+#CHECK: d %r0, -1
+#CHECK: error: invalid operand
+#CHECK: d %r0, 4096
+#CHECK: error: invalid register pair
+#CHECK: d %r1, 0
+
+ d %r0, -1
+ d %r0, 4096
+ d %r1, 0
+
+#CHECK: error: invalid operand
#CHECK: ddb %f0, -1
#CHECK: error: invalid operand
#CHECK: ddb %f0, 4096
@@ -1613,6 +2022,22 @@
deb %f0, 4096
#CHECK: error: invalid operand
+#CHECK: didbr %f0, %f0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: didbr %f0, %f0, %f0, 16
+
+ didbr %f0, %f0, %f0, -1
+ didbr %f0, %f0, %f0, 16
+
+#CHECK: error: invalid operand
+#CHECK: diebr %f0, %f0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: diebr %f0, %f0, %f0, 16
+
+ diebr %f0, %f0, %f0, -1
+ diebr %f0, %f0, %f0, 16
+
+#CHECK: error: invalid operand
#CHECK: dl %r0, -524289
#CHECK: error: invalid operand
#CHECK: dl %r0, 524288
@@ -1623,6 +2048,11 @@
dl %r0, 524288
dl %r1, 0
+#CHECK: error: invalid register pair
+#CHECK: dr %r1, %r0
+
+ dr %r1, %r0
+
#CHECK: error: invalid operand
#CHECK: dlg %r0, -524289
#CHECK: error: invalid operand
@@ -1644,6 +2074,59 @@
dlr %r1, %r0
+#CHECK: error: missing length in address
+#CHECK: dp 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: dp 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: dp 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: dp 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: dp 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: dp 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: dp 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: dp 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: dp -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: dp 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: dp 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: dp 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: dp 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: dp 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: dp 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: dp 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: dp 0(-), 0(1)
+
+ dp 0, 0(1)
+ dp 0(1), 0
+ dp 0(%r1), 0(1,%r1)
+ dp 0(1,%r1), 0(%r1)
+ dp 0(0,%r1), 0(1,%r1)
+ dp 0(1,%r1), 0(0,%r1)
+ dp 0(17,%r1), 0(1,%r1)
+ dp 0(1,%r1), 0(17,%r1)
+ dp -1(1,%r1), 0(1,%r1)
+ dp 4096(1,%r1), 0(1,%r1)
+ dp 0(1,%r1), -1(1,%r1)
+ dp 0(1,%r1), 4096(1,%r1)
+ dp 0(1,%r0), 0(1,%r1)
+ dp 0(1,%r1), 0(1,%r0)
+ dp 0(%r1,%r2), 0(1,%r1)
+ dp 0(1,%r2), 0(%r1,%r2)
+ dp 0(-), 0(1)
+
#CHECK: error: invalid operand
#CHECK: dsg %r0, -524289
#CHECK: error: invalid operand
@@ -1685,12 +2168,15 @@
dxbr %f2, %f0
#CHECK: error: invalid operand
-#CHECK: ex %r0, -1
+#CHECK: ecag %r0, %r0, -524289
#CHECK: error: invalid operand
-#CHECK: ex %r0, 4096
+#CHECK: ecag %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: ecag %r0, %r0, 0(%r1,%r2)
- ex %r0, -1
- ex %r0, 4096
+ ecag %r0, %r0, -524289
+ ecag %r0, %r0, 524288
+ ecag %r0, %r0, 0(%r1,%r2)
#CHECK: error: invalid use of indexed addressing
#CHECK: ectg 160(%r1,%r15),160(%r15), %r2
@@ -1709,6 +2195,102 @@
ectg 0(%r1),-1(%r15), %r2
ectg 0(%r1),4096(%r15), %r2
+#CHECK: error: missing length in address
+#CHECK: ed 0, 0
+#CHECK: error: missing length in address
+#CHECK: ed 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: ed 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: ed 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: ed 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: ed -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: ed 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: ed 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: ed 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: ed 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: ed 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: ed 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: ed 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: ed 0(-), 0
+
+ ed 0, 0
+ ed 0(%r1), 0(%r1)
+ ed 0(1,%r1), 0(2,%r1)
+ ed 0(0,%r1), 0(%r1)
+ ed 0(257,%r1), 0(%r1)
+ ed -1(1,%r1), 0(%r1)
+ ed 4096(1,%r1), 0(%r1)
+ ed 0(1,%r1), -1(%r1)
+ ed 0(1,%r1), 4096(%r1)
+ ed 0(1,%r0), 0(%r1)
+ ed 0(1,%r1), 0(%r0)
+ ed 0(%r1,%r2), 0(%r1)
+ ed 0(1,%r2), 0(%r1,%r2)
+ ed 0(-), 0
+
+#CHECK: error: missing length in address
+#CHECK: edmk 0, 0
+#CHECK: error: missing length in address
+#CHECK: edmk 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: edmk 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: edmk 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: edmk 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: edmk -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: edmk 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: edmk 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: edmk 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: edmk 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: edmk 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: edmk 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: edmk 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: edmk 0(-), 0
+
+ edmk 0, 0
+ edmk 0(%r1), 0(%r1)
+ edmk 0(1,%r1), 0(2,%r1)
+ edmk 0(0,%r1), 0(%r1)
+ edmk 0(257,%r1), 0(%r1)
+ edmk -1(1,%r1), 0(%r1)
+ edmk 4096(1,%r1), 0(%r1)
+ edmk 0(1,%r1), -1(%r1)
+ edmk 0(1,%r1), 4096(%r1)
+ edmk 0(1,%r0), 0(%r1)
+ edmk 0(1,%r1), 0(%r0)
+ edmk 0(%r1,%r2), 0(%r1)
+ edmk 0(1,%r2), 0(%r1,%r2)
+ edmk 0(-), 0
+
+#CHECK: error: invalid operand
+#CHECK: ex %r0, -1
+#CHECK: error: invalid operand
+#CHECK: ex %r0, 4096
+
+ ex %r0, -1
+ ex %r0, 4096
+
#CHECK: error: invalid operand
#CHECK: fidbr %f0, -1, %f0
#CHECK: error: invalid operand
@@ -1866,6 +2448,76 @@
iill %r0, 0x10000
#CHECK: error: invalid operand
+#CHECK: kdb %f0, -1
+#CHECK: error: invalid operand
+#CHECK: kdb %f0, 4096
+
+ kdb %f0, -1
+ kdb %f0, 4096
+
+#CHECK: error: invalid operand
+#CHECK: keb %f0, -1
+#CHECK: error: invalid operand
+#CHECK: keb %f0, 4096
+
+ keb %f0, -1
+ keb %f0, 4096
+
+#CHECK: error: invalid register pair
+#CHECK: kimd %r0, %r1
+
+ kimd %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: klmd %r0, %r1
+
+ klmd %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: km %r1, %r2
+#CHECK: error: invalid register pair
+#CHECK: km %r2, %r1
+
+ km %r1, %r2
+ km %r2, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: kmac %r0, %r1
+
+ kmac %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: kmc %r1, %r2
+#CHECK: error: invalid register pair
+#CHECK: kmc %r2, %r1
+
+ kmc %r1, %r2
+ kmc %r2, %r1
+
+#CHECK: error: instruction requires: message-security-assist-extension4
+#CHECK: kmctr %r2, %r4, %r6
+
+ kmctr %r2, %r4, %r6
+
+#CHECK: error: instruction requires: message-security-assist-extension4
+#CHECK: kmf %r2, %r4
+
+ kmf %r2, %r4
+
+#CHECK: error: instruction requires: message-security-assist-extension4
+#CHECK: kmo %r2, %r4
+
+ kmo %r2, %r4
+
+#CHECK: error: invalid register pair
+#CHECK: kxbr %f0, %f2
+#CHECK: error: invalid register pair
+#CHECK: kxbr %f2, %f0
+
+ kxbr %f0, %f2
+ kxbr %f2, %f0
+
+#CHECK: error: invalid operand
#CHECK: l %r0, -1
#CHECK: error: invalid operand
#CHECK: l %r0, 4096
@@ -1881,14 +2533,6 @@
la %r0, -1
la %r0, 4096
-#CHECK: error: invalid operand
-#CHECK: lae %r0, -1
-#CHECK: error: invalid operand
-#CHECK: lae %r0, 4096
-
- lae %r0, -1
- lae %r0, 4096
-
#CHECK: error: instruction requires: interlocked-access1
#CHECK: laa %r1, %r2, 100(%r3)
laa %r1, %r2, 100(%r3)
@@ -1906,6 +2550,14 @@
laalg %r1, %r2, 100(%r3)
#CHECK: error: invalid operand
+#CHECK: lae %r0, -1
+#CHECK: error: invalid operand
+#CHECK: lae %r0, 4096
+
+ lae %r0, -1
+ lae %r0, 4096
+
+#CHECK: error: invalid operand
#CHECK: laey %r0, -524289
#CHECK: error: invalid operand
#CHECK: laey %r0, 524288
@@ -1948,14 +2600,6 @@
#CHECK: laog %r1, %r2, 100(%r3)
laog %r1, %r2, 100(%r3)
-#CHECK: error: instruction requires: interlocked-access1
-#CHECK: lax %r1, %r2, 100(%r3)
- lax %r1, %r2, 100(%r3)
-
-#CHECK: error: instruction requires: interlocked-access1
-#CHECK: laxg %r1, %r2, 100(%r3)
- laxg %r1, %r2, 100(%r3)
-
#CHECK: error: offset out of range
#CHECK: larl %r0, -0x1000000002
#CHECK: error: offset out of range
@@ -1970,6 +2614,14 @@
larl %r0, 1
larl %r0, 0x100000000
+#CHECK: error: instruction requires: interlocked-access1
+#CHECK: lax %r1, %r2, 100(%r3)
+ lax %r1, %r2, 100(%r3)
+
+#CHECK: error: instruction requires: interlocked-access1
+#CHECK: laxg %r1, %r2, 100(%r3)
+ laxg %r1, %r2, 100(%r3)
+
#CHECK: error: invalid operand
#CHECK: lay %r0, -524289
#CHECK: error: invalid operand
@@ -2070,11 +2722,6 @@
ley %f0, -524289
ley %f0, 524288
-#CHECK: error: instruction requires: high-word
-#CHECK: lfh %r0, 0
-
- lfh %r0, 0
-
#CHECK: error: invalid operand
#CHECK: lfas -1
#CHECK: error: invalid operand
@@ -2086,6 +2733,11 @@
lfas 4096
lfas 0(%r1,%r2)
+#CHECK: error: instruction requires: high-word
+#CHECK: lfh %r0, 0
+
+ lfh %r0, 0
+
#CHECK: error: invalid operand
#CHECK: lfpc -1
#CHECK: error: invalid operand
@@ -2258,14 +2910,6 @@
llgc %r0, 524288
#CHECK: error: invalid operand
-#CHECK: llgt %r0, -524289
-#CHECK: error: invalid operand
-#CHECK: llgt %r0, 524288
-
- llgt %r0, -524289
- llgt %r0, 524288
-
-#CHECK: error: invalid operand
#CHECK: llgf %r0, -524289
#CHECK: error: invalid operand
#CHECK: llgf %r0, 524288
@@ -2310,6 +2954,14 @@
llghrl %r0, 0x100000000
#CHECK: error: invalid operand
+#CHECK: llgt %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: llgt %r0, 524288
+
+ llgt %r0, -524289
+ llgt %r0, 524288
+
+#CHECK: error: invalid operand
#CHECK: llh %r0, -524289
#CHECK: error: invalid operand
#CHECK: llh %r0, 524288
@@ -2392,6 +3044,23 @@
lm %r0, %r0, 4096
lm %r0, %r0, 0(%r1,%r2)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lmd %r2, %r4, 160(%r1,%r15), 160(%r15)
+#CHECK: error: invalid operand
+#CHECK: lmd %r2, %r4, -1(%r1), 160(%r15)
+#CHECK: error: invalid operand
+#CHECK: lmd %r2, %r4, 4096(%r1), 160(%r15)
+#CHECK: error: invalid operand
+#CHECK: lmd %r2, %r4, 0(%r1), -1(%r15)
+#CHECK: error: invalid operand
+#CHECK: lmd %r2, %r4, 0(%r1), 4096(%r15)
+
+ lmd %r2, %r4, 160(%r1,%r15), 160(%r15)
+ lmd %r2, %r4, -1(%r1), 160(%r15)
+ lmd %r2, %r4, 4096(%r1), 160(%r15)
+ lmd %r2, %r4, 0(%r1), -1(%r15)
+ lmd %r2, %r4, 0(%r1), 4096(%r15)
+
#CHECK: error: invalid operand
#CHECK: lmg %r0, %r0, -524289
#CHECK: error: invalid operand
@@ -2544,6 +3213,17 @@
lzxr %f2
#CHECK: error: invalid operand
+#CHECK: m %r0, -1
+#CHECK: error: invalid operand
+#CHECK: m %r0, 4096
+#CHECK: error: invalid register pair
+#CHECK: m %r1, 0
+
+ m %r0, -1
+ m %r0, 4096
+ m %r1, 0
+
+#CHECK: error: invalid operand
#CHECK: madb %f0, %f0, -1
#CHECK: error: invalid operand
#CHECK: madb %f0, %f0, 4096
@@ -2560,6 +3240,23 @@
maeb %f0, %f0, 4096
#CHECK: error: invalid operand
+#CHECK: mc -1, 0
+#CHECK: error: invalid operand
+#CHECK: mc 4096, 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mc 0(%r1,%r2), 0
+#CHECK: error: invalid operand
+#CHECK: mc 0, -1
+#CHECK: error: invalid operand
+#CHECK: mc 0, 256
+
+ mc -1, 0
+ mc 4096, 0
+ mc 0(%r1,%r2), 0
+ mc 0, -1
+ mc 0, 256
+
+#CHECK: error: invalid operand
#CHECK: mdb %f0, -1
#CHECK: error: invalid operand
#CHECK: mdb %f0, 4096
@@ -2584,6 +3281,17 @@
meeb %f0, 4096
#CHECK: error: invalid operand
+#CHECK: mfy %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: mfy %r0, 524288
+#CHECK: error: invalid register pair
+#CHECK: mfy %r1, 0
+
+ mfy %r0, -524289
+ mfy %r0, 524288
+ mfy %r1, 0
+
+#CHECK: error: invalid operand
#CHECK: mghi %r0, -32769
#CHECK: error: invalid operand
#CHECK: mghi %r0, 32768
@@ -2622,6 +3330,17 @@
mhy %r0, 524288
#CHECK: error: invalid operand
+#CHECK: ml %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: ml %r0, 524288
+#CHECK: error: invalid register pair
+#CHECK: ml %r1, 0
+
+ ml %r0, -524289
+ ml %r0, 524288
+ ml %r1, 0
+
+#CHECK: error: invalid operand
#CHECK: mlg %r0, -524289
#CHECK: error: invalid operand
#CHECK: mlg %r0, 524288
@@ -2637,6 +3356,69 @@
mlgr %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: mlr %r1, %r0
+
+ mlr %r1, %r0
+
+#CHECK: error: missing length in address
+#CHECK: mp 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: mp 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: mp 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: mp 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mp 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mp 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: mp 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mp 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: mp -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mp 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mp 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mp 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mp 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mp 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mp 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mp 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: mp 0(-), 0(1)
+
+ mp 0, 0(1)
+ mp 0(1), 0
+ mp 0(%r1), 0(1,%r1)
+ mp 0(1,%r1), 0(%r1)
+ mp 0(0,%r1), 0(1,%r1)
+ mp 0(1,%r1), 0(0,%r1)
+ mp 0(17,%r1), 0(1,%r1)
+ mp 0(1,%r1), 0(17,%r1)
+ mp -1(1,%r1), 0(1,%r1)
+ mp 4096(1,%r1), 0(1,%r1)
+ mp 0(1,%r1), -1(1,%r1)
+ mp 0(1,%r1), 4096(1,%r1)
+ mp 0(1,%r0), 0(1,%r1)
+ mp 0(1,%r1), 0(1,%r0)
+ mp 0(%r1,%r2), 0(1,%r1)
+ mp 0(1,%r2), 0(%r1,%r2)
+ mp 0(-), 0(1)
+
+#CHECK: error: invalid register pair
+#CHECK: mr %r1, %r0
+
+ mr %r1, %r0
+
#CHECK: error: invalid operand
#CHECK: ms %r0, -1
#CHECK: error: invalid operand
@@ -2745,6 +3527,50 @@
mvc 0(1,%r2), 0(%r1,%r2)
mvc 0(-), 0
+#CHECK: error: missing length in address
+#CHECK: mvcin 0, 0
+#CHECK: error: missing length in address
+#CHECK: mvcin 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: mvcin 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvcin 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvcin 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvcin -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvcin 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvcin 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvcin 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvcin 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvcin 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvcin 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvcin 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: mvcin 0(-), 0
+
+ mvcin 0, 0
+ mvcin 0(%r1), 0(%r1)
+ mvcin 0(1,%r1), 0(2,%r1)
+ mvcin 0(0,%r1), 0(%r1)
+ mvcin 0(257,%r1), 0(%r1)
+ mvcin -1(1,%r1), 0(%r1)
+ mvcin 4096(1,%r1), 0(%r1)
+ mvcin 0(1,%r1), -1(%r1)
+ mvcin 0(1,%r1), 4096(%r1)
+ mvcin 0(1,%r0), 0(%r1)
+ mvcin 0(1,%r1), 0(%r0)
+ mvcin 0(%r1,%r2), 0(%r1)
+ mvcin 0(1,%r2), 0(%r1,%r2)
+ mvcin 0(-), 0
+
#CHECK: error: invalid use of length addressing
#CHECK: mvck 0(%r1,%r1), 0(2,%r1), %r3
#CHECK: error: invalid operand
@@ -2774,6 +3600,42 @@
mvck 0(%r1,%r2), 0(%r1,%r2), %r3
mvck 0(-), 0, %r3
+#CHECK: error: invalid register pair
+#CHECK: mvcl %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: mvcl %r0, %r1
+
+ mvcl %r1, %r0
+ mvcl %r0, %r1
+
+#CHECK: error: invalid register pair
+#CHECK: mvcle %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: mvcle %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: mvcle %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: mvcle %r0, %r0, 4096
+
+ mvcle %r1, %r0, 0
+ mvcle %r0, %r1, 0
+ mvcle %r0, %r0, -1
+ mvcle %r0, %r0, 4096
+
+#CHECK: error: invalid register pair
+#CHECK: mvclu %r1, %r0
+#CHECK: error: invalid register pair
+#CHECK: mvclu %r0, %r1
+#CHECK: error: invalid operand
+#CHECK: mvclu %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: mvclu %r0, %r0, 524288
+
+ mvclu %r1, %r0, 0
+ mvclu %r0, %r1, 0
+ mvclu %r0, %r0, -524289
+ mvclu %r0, %r0, 524288
+
#CHECK: error: invalid operand
#CHECK: mvghi -1, 0
#CHECK: error: invalid operand
@@ -2859,6 +3721,147 @@
mviy 0, -1
mviy 0, 256
+#CHECK: error: missing length in address
+#CHECK: mvn 0, 0
+#CHECK: error: missing length in address
+#CHECK: mvn 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: mvn 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvn 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvn 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvn -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvn 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvn 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvn 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvn 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvn 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvn 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvn 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: mvn 0(-), 0
+
+ mvn 0, 0
+ mvn 0(%r1), 0(%r1)
+ mvn 0(1,%r1), 0(2,%r1)
+ mvn 0(0,%r1), 0(%r1)
+ mvn 0(257,%r1), 0(%r1)
+ mvn -1(1,%r1), 0(%r1)
+ mvn 4096(1,%r1), 0(%r1)
+ mvn 0(1,%r1), -1(%r1)
+ mvn 0(1,%r1), 4096(%r1)
+ mvn 0(1,%r0), 0(%r1)
+ mvn 0(1,%r1), 0(%r0)
+ mvn 0(%r1,%r2), 0(%r1)
+ mvn 0(1,%r2), 0(%r1,%r2)
+ mvn 0(-), 0
+
+#CHECK: error: missing length in address
+#CHECK: mvo 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: mvo 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: mvo 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: mvo 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvo 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvo 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvo 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvo 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvo 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: mvo 0(-), 0(1)
+
+ mvo 0, 0(1)
+ mvo 0(1), 0
+ mvo 0(%r1), 0(1,%r1)
+ mvo 0(1,%r1), 0(%r1)
+ mvo 0(0,%r1), 0(1,%r1)
+ mvo 0(1,%r1), 0(0,%r1)
+ mvo 0(17,%r1), 0(1,%r1)
+ mvo 0(1,%r1), 0(17,%r1)
+ mvo -1(1,%r1), 0(1,%r1)
+ mvo 4096(1,%r1), 0(1,%r1)
+ mvo 0(1,%r1), -1(1,%r1)
+ mvo 0(1,%r1), 4096(1,%r1)
+ mvo 0(1,%r0), 0(1,%r1)
+ mvo 0(1,%r1), 0(1,%r0)
+ mvo 0(%r1,%r2), 0(1,%r1)
+ mvo 0(1,%r2), 0(%r1,%r2)
+ mvo 0(-), 0(1)
+
+#CHECK: error: missing length in address
+#CHECK: mvz 0, 0
+#CHECK: error: missing length in address
+#CHECK: mvz 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: mvz 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: mvz 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvz 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvz -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvz 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvz 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: mvz 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvz 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: mvz 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvz 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvz 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: mvz 0(-), 0
+
+ mvz 0, 0
+ mvz 0(%r1), 0(%r1)
+ mvz 0(1,%r1), 0(2,%r1)
+ mvz 0(0,%r1), 0(%r1)
+ mvz 0(257,%r1), 0(%r1)
+ mvz -1(1,%r1), 0(%r1)
+ mvz 4096(1,%r1), 0(%r1)
+ mvz 0(1,%r1), -1(%r1)
+ mvz 0(1,%r1), 4096(%r1)
+ mvz 0(1,%r0), 0(%r1)
+ mvz 0(1,%r1), 0(%r0)
+ mvz 0(%r1,%r2), 0(%r1)
+ mvz 0(1,%r2), 0(%r1,%r2)
+ mvz 0(-), 0
+
#CHECK: error: invalid register pair
#CHECK: mxbr %f0, %f2
#CHECK: error: invalid register pair
@@ -3203,6 +4206,64 @@
oy %r0, -524289
oy %r0, 524288
+#CHECK: error: missing length in address
+#CHECK: pack 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: pack 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: pack 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: pack 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: pack 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pack 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: pack 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pack 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: pack -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pack 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pack 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pack 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: pack 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: pack 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: pack 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: pack 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: pack 0(-), 0(1)
+
+ pack 0, 0(1)
+ pack 0(1), 0
+ pack 0(%r1), 0(1,%r1)
+ pack 0(1,%r1), 0(%r1)
+ pack 0(0,%r1), 0(1,%r1)
+ pack 0(1,%r1), 0(0,%r1)
+ pack 0(17,%r1), 0(1,%r1)
+ pack 0(1,%r1), 0(17,%r1)
+ pack -1(1,%r1), 0(1,%r1)
+ pack 4096(1,%r1), 0(1,%r1)
+ pack 0(1,%r1), -1(1,%r1)
+ pack 0(1,%r1), 4096(1,%r1)
+ pack 0(1,%r0), 0(1,%r1)
+ pack 0(1,%r1), 0(1,%r0)
+ pack 0(%r1,%r2), 0(1,%r1)
+ pack 0(1,%r2), 0(%r1,%r2)
+ pack 0(-), 0(1)
+
+#CHECK: error: instruction requires: message-security-assist-extension4
+#CHECK: pcc
+
+ pcc
+
#CHECK: error: invalid operand
#CHECK: pfd -1, 0
#CHECK: error: invalid operand
@@ -3237,6 +4298,94 @@
pfdrl 1, 1
pfdrl 1, 0x100000000
+#CHECK: error: missing length in address
+#CHECK: pka 0, 0
+#CHECK: error: missing length in address
+#CHECK: pka 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: pka 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: pka 0(%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: pka 0(%r1), 0(257,%r1)
+#CHECK: error: invalid operand
+#CHECK: pka -1(%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pka 4096(%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pka 0(%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pka 0(%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: pka 0(%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: pka 0(%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: pka 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: pka 0(%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: pka 0, 0(-)
+
+ pka 0, 0
+ pka 0(%r1), 0(%r1)
+ pka 0(1,%r1), 0(2,%r1)
+ pka 0(%r1), 0(0,%r1)
+ pka 0(%r1), 0(257,%r1)
+ pka -1(%r1), 0(1,%r1)
+ pka 4096(%r1), 0(1,%r1)
+ pka 0(%r1), -1(1,%r1)
+ pka 0(%r1), 4096(1,%r1)
+ pka 0(%r0), 0(1,%r1)
+ pka 0(%r1), 0(1,%r0)
+ pka 0(%r1,%r2), 0(1,%r1)
+ pka 0(%r2), 0(%r1,%r2)
+ pka 0, 0(-)
+
+#CHECK: error: missing length in address
+#CHECK: pku 0, 0
+#CHECK: error: missing length in address
+#CHECK: pku 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: pku 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: pku 0(%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: pku 0(%r1), 0(257,%r1)
+#CHECK: error: invalid operand
+#CHECK: pku -1(%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pku 4096(%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pku 0(%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: pku 0(%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: pku 0(%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: pku 0(%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: pku 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: pku 0(%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: pku 0, 0(-)
+
+ pku 0, 0
+ pku 0(%r1), 0(%r1)
+ pku 0(1,%r1), 0(2,%r1)
+ pku 0(%r1), 0(0,%r1)
+ pku 0(%r1), 0(257,%r1)
+ pku -1(%r1), 0(1,%r1)
+ pku 4096(%r1), 0(1,%r1)
+ pku 0(%r1), -1(1,%r1)
+ pku 0(%r1), 4096(1,%r1)
+ pku 0(%r0), 0(1,%r1)
+ pku 0(%r1), 0(1,%r0)
+ pku 0(%r1,%r2), 0(1,%r1)
+ pku 0(%r2), 0(%r1,%r2)
+ pku 0, 0(-)
+
#CHECK: error: invalid use of indexed addressing
#CHECK: plo %r2, 160(%r1,%r15), %r4, 160(%r15)
#CHECK: error: invalid operand
@@ -3294,6 +4443,34 @@
risblg %r1, %r2, 0, 0, 0
#CHECK: error: invalid operand
+#CHECK: rll %r0,%r0,-524289
+#CHECK: error: invalid operand
+#CHECK: rll %r0,%r0,524288
+#CHECK: error: %r0 used in an address
+#CHECK: rll %r0,%r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: rll %r0,%r0,0(%r1,%r2)
+
+ rll %r0,%r0,-524289
+ rll %r0,%r0,524288
+ rll %r0,%r0,0(%r0)
+ rll %r0,%r0,0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: rllg %r0,%r0,-524289
+#CHECK: error: invalid operand
+#CHECK: rllg %r0,%r0,524288
+#CHECK: error: %r0 used in an address
+#CHECK: rllg %r0,%r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: rllg %r0,%r0,0(%r1,%r2)
+
+ rllg %r0,%r0,-524289
+ rllg %r0,%r0,524288
+ rllg %r0,%r0,0(%r0)
+ rllg %r0,%r0,0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: rnsbg %r0,%r0,0,0,-1
#CHECK: error: invalid operand
#CHECK: rnsbg %r0,%r0,0,0,64
@@ -3354,34 +4531,6 @@
rxsbg %r0,%r0,256,0,0
#CHECK: error: invalid operand
-#CHECK: rll %r0,%r0,-524289
-#CHECK: error: invalid operand
-#CHECK: rll %r0,%r0,524288
-#CHECK: error: %r0 used in an address
-#CHECK: rll %r0,%r0,0(%r0)
-#CHECK: error: invalid use of indexed addressing
-#CHECK: rll %r0,%r0,0(%r1,%r2)
-
- rll %r0,%r0,-524289
- rll %r0,%r0,524288
- rll %r0,%r0,0(%r0)
- rll %r0,%r0,0(%r1,%r2)
-
-#CHECK: error: invalid operand
-#CHECK: rllg %r0,%r0,-524289
-#CHECK: error: invalid operand
-#CHECK: rllg %r0,%r0,524288
-#CHECK: error: %r0 used in an address
-#CHECK: rllg %r0,%r0,0(%r0)
-#CHECK: error: invalid use of indexed addressing
-#CHECK: rllg %r0,%r0,0(%r1,%r2)
-
- rllg %r0,%r0,-524289
- rllg %r0,%r0,524288
- rllg %r0,%r0,0(%r0)
- rllg %r0,%r0,0(%r1,%r2)
-
-#CHECK: error: invalid operand
#CHECK: s %r0, -1
#CHECK: error: invalid operand
#CHECK: s %r0, 4096
@@ -3451,6 +4600,39 @@
sl %r0, 4096
#CHECK: error: invalid operand
+#CHECK: sla %r0,-1
+#CHECK: error: invalid operand
+#CHECK: sla %r0,4096
+#CHECK: error: %r0 used in an address
+#CHECK: sla %r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sla %r0,0(%r1,%r2)
+
+ sla %r0,-1
+ sla %r0,4096
+ sla %r0,0(%r0)
+ sla %r0,0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: slag %r0,%r0,-524289
+#CHECK: error: invalid operand
+#CHECK: slag %r0,%r0,524288
+#CHECK: error: %r0 used in an address
+#CHECK: slag %r0,%r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: slag %r0,%r0,0(%r1,%r2)
+
+ slag %r0,%r0,-524289
+ slag %r0,%r0,524288
+ slag %r0,%r0,0(%r0)
+ slag %r0,%r0,0(%r1,%r2)
+
+#CHECK: error: instruction requires: distinct-ops
+#CHECK: slak %r2,%r3,4(%r5)
+
+ slak %r2,%r3,4(%r5)
+
+#CHECK: error: invalid operand
#CHECK: slb %r0, -524289
#CHECK: error: invalid operand
#CHECK: slb %r0, 524288
@@ -3466,6 +4648,40 @@
slbg %r0, -524289
slbg %r0, 524288
+#CHECK: error: invalid register pair
+#CHECK: slda %r1,0
+#CHECK: error: invalid operand
+#CHECK: slda %r0,-1
+#CHECK: error: invalid operand
+#CHECK: slda %r0,4096
+#CHECK: error: %r0 used in an address
+#CHECK: slda %r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: slda %r0,0(%r1,%r2)
+
+ slda %r1,0
+ slda %r0,-1
+ slda %r0,4096
+ slda %r0,0(%r0)
+ slda %r0,0(%r1,%r2)
+
+#CHECK: error: invalid register pair
+#CHECK: sldl %r1,0
+#CHECK: error: invalid operand
+#CHECK: sldl %r0,-1
+#CHECK: error: invalid operand
+#CHECK: sldl %r0,4096
+#CHECK: error: %r0 used in an address
+#CHECK: sldl %r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sldl %r0,0(%r1,%r2)
+
+ sldl %r1,0
+ sldl %r0,-1
+ sldl %r0,4096
+ sldl %r0,0(%r0)
+ sldl %r0,0(%r1,%r2)
+
#CHECK: error: invalid operand
#CHECK: slfi %r0, -1
#CHECK: error: invalid operand
@@ -3504,25 +4720,6 @@
slgrk %r2,%r3,%r4
#CHECK: error: invalid operand
-#CHECK: sla %r0,-1
-#CHECK: error: invalid operand
-#CHECK: sla %r0,4096
-#CHECK: error: %r0 used in an address
-#CHECK: sla %r0,0(%r0)
-#CHECK: error: invalid use of indexed addressing
-#CHECK: sla %r0,0(%r1,%r2)
-
- sla %r0,-1
- sla %r0,4096
- sla %r0,0(%r0)
- sla %r0,0(%r1,%r2)
-
-#CHECK: error: instruction requires: distinct-ops
-#CHECK: slak %r2,%r3,4(%r5)
-
- slak %r2,%r3,4(%r5)
-
-#CHECK: error: invalid operand
#CHECK: sll %r0,-1
#CHECK: error: invalid operand
#CHECK: sll %r0,4096
@@ -3568,6 +4765,59 @@
sly %r0, -524289
sly %r0, 524288
+#CHECK: error: missing length in address
+#CHECK: sp 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: sp 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: sp 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: sp 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: sp 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: sp 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: sp 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: sp 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: sp -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: sp 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: sp 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: sp 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: sp 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: sp 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sp 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sp 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: sp 0(-), 0(1)
+
+ sp 0, 0(1)
+ sp 0(1), 0
+ sp 0(%r1), 0(1,%r1)
+ sp 0(1,%r1), 0(%r1)
+ sp 0(0,%r1), 0(1,%r1)
+ sp 0(1,%r1), 0(0,%r1)
+ sp 0(17,%r1), 0(1,%r1)
+ sp 0(1,%r1), 0(17,%r1)
+ sp -1(1,%r1), 0(1,%r1)
+ sp 4096(1,%r1), 0(1,%r1)
+ sp 0(1,%r1), -1(1,%r1)
+ sp 0(1,%r1), 4096(1,%r1)
+ sp 0(1,%r0), 0(1,%r1)
+ sp 0(1,%r1), 0(1,%r0)
+ sp 0(%r1,%r2), 0(1,%r1)
+ sp 0(1,%r2), 0(%r1,%r2)
+ sp 0(-), 0(1)
+
#CHECK: error: invalid operand
#CHECK: sqdb %f0, -1
#CHECK: error: invalid operand
@@ -3625,6 +4875,40 @@
srak %r2,%r3,4(%r5)
+#CHECK: error: invalid register pair
+#CHECK: srda %r1,0
+#CHECK: error: invalid operand
+#CHECK: srda %r0,-1
+#CHECK: error: invalid operand
+#CHECK: srda %r0,4096
+#CHECK: error: %r0 used in an address
+#CHECK: srda %r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: srda %r0,0(%r1,%r2)
+
+ srda %r1,0
+ srda %r0,-1
+ srda %r0,4096
+ srda %r0,0(%r0)
+ srda %r0,0(%r1,%r2)
+
+#CHECK: error: invalid register pair
+#CHECK: srdl %r1,0
+#CHECK: error: invalid operand
+#CHECK: srdl %r0,-1
+#CHECK: error: invalid operand
+#CHECK: srdl %r0,4096
+#CHECK: error: %r0 used in an address
+#CHECK: srdl %r0,0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: srdl %r0,0(%r1,%r2)
+
+ srdl %r1,0
+ srdl %r0,-1
+ srdl %r0,4096
+ srdl %r0,0(%r0)
+ srdl %r0,0(%r1,%r2)
+
#CHECK: error: instruction requires: distinct-ops
#CHECK: srk %r2,%r3,%r4
@@ -3690,6 +4974,56 @@
srnmt 4096
srnmt 0(%r1,%r2)
+#CHECK: error: missing length in address
+#CHECK: srp 0, 0, 0
+#CHECK: error: missing length in address
+#CHECK: srp 0(%r1), 0(%r1), 0
+#CHECK: error: invalid use of length addressing
+#CHECK: srp 0(1,%r1), 0(2,%r1), 0
+#CHECK: error: invalid operand
+#CHECK: srp 0(0,%r1), 0(%r1), 0
+#CHECK: error: invalid operand
+#CHECK: srp 0(17,%r1), 0(%r1), 0
+#CHECK: error: invalid operand
+#CHECK: srp -1(1,%r1), 0(%r1), 0
+#CHECK: error: invalid operand
+#CHECK: srp 4096(1,%r1), 0(%r1), 0
+#CHECK: error: invalid operand
+#CHECK: srp 0(1,%r1), -1(%r1), 0
+#CHECK: error: invalid operand
+#CHECK: srp 0(1,%r1), 4096(%r1), 0
+#CHECK: error: %r0 used in an address
+#CHECK: srp 0(1,%r0), 0(%r1), 0
+#CHECK: error: %r0 used in an address
+#CHECK: srp 0(1,%r1), 0(%r0), 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: srp 0(%r1,%r2), 0(%r1), 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: srp 0(1,%r2), 0(%r1,%r2), 0
+#CHECK: error: invalid operand
+#CHECK: srp 0(1), 0, -1
+#CHECK: error: invalid operand
+#CHECK: srp 0(1), 0, 16
+#CHECK: error: unknown token in expression
+#CHECK: srp 0(-), 0, 0
+
+ srp 0, 0, 0
+ srp 0(%r1), 0(%r1), 0
+ srp 0(1,%r1), 0(2,%r1), 0
+ srp 0(0,%r1), 0(%r1), 0
+ srp 0(17,%r1), 0(%r1), 0
+ srp -1(1,%r1), 0(%r1), 0
+ srp 4096(1,%r1), 0(%r1), 0
+ srp 0(1,%r1), -1(%r1), 0
+ srp 0(1,%r1), 4096(%r1), 0
+ srp 0(1,%r0), 0(%r1), 0
+ srp 0(1,%r1), 0(%r0), 0
+ srp 0(%r1,%r2), 0(%r1), 0
+ srp 0(1,%r2), 0(%r1,%r2), 0
+ srp 0(1), 0, -1
+ srp 0(1), 0, 16
+ srp 0(-), 0, 0
+
#CHECK: error: invalid operand
#CHECK: st %r0, -1
#CHECK: error: invalid operand
@@ -3731,6 +5065,48 @@
stch %r0, 0
#CHECK: error: invalid operand
+#CHECK: stcm %r0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: stcm %r0, 0, 4096
+#CHECK: error: invalid operand
+#CHECK: stcm %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: stcm %r0, 16, 0
+
+ stcm %r0, 0, -1
+ stcm %r0, 0, 4096
+ stcm %r0, -1, 0
+ stcm %r0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, 0, -524289
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, 0, 524288
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, 16, 0
+
+ stcmy %r0, 0, -524289
+ stcmy %r0, 0, 524288
+ stcmy %r0, -1, 0
+ stcmy %r0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, 0, -524289
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, 0, 524288
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: stcmy %r0, 16, 0
+
+ stcmy %r0, 0, -524289
+ stcmy %r0, 0, 524288
+ stcmy %r0, -1, 0
+ stcmy %r0, 16, 0
+
+#CHECK: error: invalid operand
#CHECK: stcy %r0, -524289
#CHECK: error: invalid operand
#CHECK: stcy %r0, 524288
@@ -3770,6 +5146,11 @@
stey %f0, -524289
stey %f0, 524288
+#CHECK: error: instruction requires: high-word
+#CHECK: stfh %r0, 0
+
+ stfh %r0, 0
+
#CHECK: error: invalid operand
#CHECK: stfpc -1
#CHECK: error: invalid operand
@@ -3838,11 +5219,6 @@
sthy %r0, -524289
sthy %r0, 524288
-#CHECK: error: instruction requires: high-word
-#CHECK: stfh %r0, 0
-
- stfh %r0, 0
-
#CHECK: error: invalid operand
#CHECK: stm %r0, %r0, 4096
#CHECK: error: invalid use of indexed addressing
@@ -4008,6 +5384,14 @@
tm 0, 256
#CHECK: error: invalid operand
+#CHECK: tmh %r0, -1
+#CHECK: error: invalid operand
+#CHECK: tmh %r0, 0x10000
+
+ tmh %r0, -1
+ tmh %r0, 0x10000
+
+#CHECK: error: invalid operand
#CHECK: tmhh %r0, -1
#CHECK: error: invalid operand
#CHECK: tmhh %r0, 0x10000
@@ -4024,12 +5408,12 @@
tmhl %r0, 0x10000
#CHECK: error: invalid operand
-#CHECK: tmh %r0, -1
+#CHECK: tml %r0, -1
#CHECK: error: invalid operand
-#CHECK: tmh %r0, 0x10000
+#CHECK: tml %r0, 0x10000
- tmh %r0, -1
- tmh %r0, 0x10000
+ tml %r0, -1
+ tml %r0, 0x10000
#CHECK: error: invalid operand
#CHECK: tmlh %r0, -1
@@ -4040,14 +5424,6 @@
tmlh %r0, 0x10000
#CHECK: error: invalid operand
-#CHECK: tml %r0, -1
-#CHECK: error: invalid operand
-#CHECK: tml %r0, 0x10000
-
- tml %r0, -1
- tml %r0, 0x10000
-
-#CHECK: error: invalid operand
#CHECK: tmll %r0, -1
#CHECK: error: invalid operand
#CHECK: tmll %r0, 0x10000
@@ -4072,6 +5448,238 @@
tmy 0, -1
tmy 0, 256
+#CHECK: error: missing length in address
+#CHECK: tp 0
+#CHECK: error: missing length in address
+#CHECK: tp 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: tp 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: tp 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: tp -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: tp 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: tp 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: tp 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: tp 0(-)
+
+ tp 0
+ tp 0(%r1)
+ tp 0(0,%r1)
+ tp 0(17,%r1)
+ tp -1(1,%r1)
+ tp 4096(1,%r1)
+ tp 0(1,%r0)
+ tp 0(%r1,%r2)
+ tp 0(-)
+
+#CHECK: error: missing length in address
+#CHECK: tr 0, 0
+#CHECK: error: missing length in address
+#CHECK: tr 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: tr 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: tr 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: tr 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: tr -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: tr 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: tr 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: tr 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: tr 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: tr 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: tr 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: tr 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: tr 0(-), 0
+
+ tr 0, 0
+ tr 0(%r1), 0(%r1)
+ tr 0(1,%r1), 0(2,%r1)
+ tr 0(0,%r1), 0(%r1)
+ tr 0(257,%r1), 0(%r1)
+ tr -1(1,%r1), 0(%r1)
+ tr 4096(1,%r1), 0(%r1)
+ tr 0(1,%r1), -1(%r1)
+ tr 0(1,%r1), 4096(%r1)
+ tr 0(1,%r0), 0(%r1)
+ tr 0(1,%r1), 0(%r0)
+ tr 0(%r1,%r2), 0(%r1)
+ tr 0(1,%r2), 0(%r1,%r2)
+ tr 0(-), 0
+
+#CHECK: error: invalid register pair
+#CHECK: tre %r1, %r0
+
+ tre %r1, %r0
+
+#CHECK: error: invalid register pair
+#CHECK: troo %r1, %r0
+#CHECK: error: invalid operand
+#CHECK: troo %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: troo %r2, %r4, 16
+
+ troo %r1, %r0
+ troo %r2, %r4, -1
+ troo %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: trot %r1, %r0
+#CHECK: error: invalid operand
+#CHECK: trot %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: trot %r2, %r4, 16
+
+ trot %r1, %r0
+ trot %r2, %r4, -1
+ trot %r2, %r4, 16
+
+#CHECK: error: missing length in address
+#CHECK: trt 0, 0
+#CHECK: error: missing length in address
+#CHECK: trt 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: trt 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: trt 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trt 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trt -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trt 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trt 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: trt 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: trt 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: trt 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: trt 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: trt 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: trt 0(-), 0
+
+ trt 0, 0
+ trt 0(%r1), 0(%r1)
+ trt 0(1,%r1), 0(2,%r1)
+ trt 0(0,%r1), 0(%r1)
+ trt 0(257,%r1), 0(%r1)
+ trt -1(1,%r1), 0(%r1)
+ trt 4096(1,%r1), 0(%r1)
+ trt 0(1,%r1), -1(%r1)
+ trt 0(1,%r1), 4096(%r1)
+ trt 0(1,%r0), 0(%r1)
+ trt 0(1,%r1), 0(%r0)
+ trt 0(%r1,%r2), 0(%r1)
+ trt 0(1,%r2), 0(%r1,%r2)
+ trt 0(-), 0
+
+#CHECK: error: invalid register pair
+#CHECK: trte %r1, %r0
+#CHECK: error: invalid operand
+#CHECK: trte %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: trte %r2, %r4, 16
+
+ trte %r1, %r0
+ trte %r2, %r4, -1
+ trte %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: trto %r1, %r0
+#CHECK: error: invalid operand
+#CHECK: trto %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: trto %r2, %r4, 16
+
+ trto %r1, %r0
+ trto %r2, %r4, -1
+ trto %r2, %r4, 16
+
+#CHECK: error: missing length in address
+#CHECK: trtr 0, 0
+#CHECK: error: missing length in address
+#CHECK: trtr 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: trtr 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: trtr 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trtr 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trtr -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trtr 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: trtr 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: trtr 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: trtr 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: trtr 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: trtr 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: trtr 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: trtr 0(-), 0
+
+ trtr 0, 0
+ trtr 0(%r1), 0(%r1)
+ trtr 0(1,%r1), 0(2,%r1)
+ trtr 0(0,%r1), 0(%r1)
+ trtr 0(257,%r1), 0(%r1)
+ trtr -1(1,%r1), 0(%r1)
+ trtr 4096(1,%r1), 0(%r1)
+ trtr 0(1,%r1), -1(%r1)
+ trtr 0(1,%r1), 4096(%r1)
+ trtr 0(1,%r0), 0(%r1)
+ trtr 0(1,%r1), 0(%r0)
+ trtr 0(%r1,%r2), 0(%r1)
+ trtr 0(1,%r2), 0(%r1,%r2)
+ trtr 0(-), 0
+
+#CHECK: error: invalid register pair
+#CHECK: trtre %r1, %r0
+#CHECK: error: invalid operand
+#CHECK: trtre %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: trtre %r2, %r4, 16
+
+ trtre %r1, %r0
+ trtre %r2, %r4, -1
+ trtre %r2, %r4, 16
+
+#CHECK: error: invalid register pair
+#CHECK: trtt %r1, %r0
+#CHECK: error: invalid operand
+#CHECK: trtt %r2, %r4, -1
+#CHECK: error: invalid operand
+#CHECK: trtt %r2, %r4, 16
+
+ trtt %r1, %r0
+ trtt %r2, %r4, -1
+ trtt %r2, %r4, 16
+
#CHECK: error: invalid operand
#CHECK: ts -1
#CHECK: error: invalid operand
@@ -4083,6 +5691,147 @@
ts 4096
ts 0(%r1,%r2)
+#CHECK: error: missing length in address
+#CHECK: unpk 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: unpk 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: unpk 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: unpk 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpk 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: unpk 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: unpk 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: unpk 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: unpk 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: unpk 0(-), 0(1)
+
+ unpk 0, 0(1)
+ unpk 0(1), 0
+ unpk 0(%r1), 0(1,%r1)
+ unpk 0(1,%r1), 0(%r1)
+ unpk 0(0,%r1), 0(1,%r1)
+ unpk 0(1,%r1), 0(0,%r1)
+ unpk 0(17,%r1), 0(1,%r1)
+ unpk 0(1,%r1), 0(17,%r1)
+ unpk -1(1,%r1), 0(1,%r1)
+ unpk 4096(1,%r1), 0(1,%r1)
+ unpk 0(1,%r1), -1(1,%r1)
+ unpk 0(1,%r1), 4096(1,%r1)
+ unpk 0(1,%r0), 0(1,%r1)
+ unpk 0(1,%r1), 0(1,%r0)
+ unpk 0(%r1,%r2), 0(1,%r1)
+ unpk 0(1,%r2), 0(%r1,%r2)
+ unpk 0(-), 0(1)
+
+#CHECK: error: missing length in address
+#CHECK: unpka 0, 0
+#CHECK: error: missing length in address
+#CHECK: unpka 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: unpka 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpka 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpka 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpka -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpka 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpka 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpka 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: unpka 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: unpka 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: unpka 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: unpka 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: unpka 0(-), 0
+
+ unpka 0, 0
+ unpka 0(%r1), 0(%r1)
+ unpka 0(1,%r1), 0(2,%r1)
+ unpka 0(0,%r1), 0(%r1)
+ unpka 0(257,%r1), 0(%r1)
+ unpka -1(1,%r1), 0(%r1)
+ unpka 4096(1,%r1), 0(%r1)
+ unpka 0(1,%r1), -1(%r1)
+ unpka 0(1,%r1), 4096(%r1)
+ unpka 0(1,%r0), 0(%r1)
+ unpka 0(1,%r1), 0(%r0)
+ unpka 0(%r1,%r2), 0(%r1)
+ unpka 0(1,%r2), 0(%r1,%r2)
+ unpka 0(-), 0
+
+#CHECK: error: missing length in address
+#CHECK: unpku 0, 0
+#CHECK: error: missing length in address
+#CHECK: unpku 0(%r1), 0(%r1)
+#CHECK: error: invalid use of length addressing
+#CHECK: unpku 0(1,%r1), 0(2,%r1)
+#CHECK: error: invalid operand
+#CHECK: unpku 0(0,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpku 0(257,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpku -1(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpku 4096(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpku 0(1,%r1), -1(%r1)
+#CHECK: error: invalid operand
+#CHECK: unpku 0(1,%r1), 4096(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: unpku 0(1,%r0), 0(%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: unpku 0(1,%r1), 0(%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: unpku 0(%r1,%r2), 0(%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: unpku 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: unpku 0(-), 0
+
+ unpku 0, 0
+ unpku 0(%r1), 0(%r1)
+ unpku 0(1,%r1), 0(2,%r1)
+ unpku 0(0,%r1), 0(%r1)
+ unpku 0(257,%r1), 0(%r1)
+ unpku -1(1,%r1), 0(%r1)
+ unpku 4096(1,%r1), 0(%r1)
+ unpku 0(1,%r1), -1(%r1)
+ unpku 0(1,%r1), 4096(%r1)
+ unpku 0(1,%r0), 0(%r1)
+ unpku 0(1,%r1), 0(%r0)
+ unpku 0(%r1,%r2), 0(%r1)
+ unpku 0(1,%r2), 0(%r1,%r2)
+ unpku 0(-), 0
+
#CHECK: error: invalid operand
#CHECK: x %r0, -1
#CHECK: error: invalid operand
@@ -4210,3 +5959,56 @@
xy %r0, -524289
xy %r0, 524288
+
+#CHECK: error: missing length in address
+#CHECK: zap 0, 0(1)
+#CHECK: error: missing length in address
+#CHECK: zap 0(1), 0
+#CHECK: error: missing length in address
+#CHECK: zap 0(%r1), 0(1,%r1)
+#CHECK: error: missing length in address
+#CHECK: zap 0(1,%r1), 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: zap 0(0,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: zap 0(1,%r1), 0(0,%r1)
+#CHECK: error: invalid operand
+#CHECK: zap 0(17,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: zap 0(1,%r1), 0(17,%r1)
+#CHECK: error: invalid operand
+#CHECK: zap -1(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: zap 4096(1,%r1), 0(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: zap 0(1,%r1), -1(1,%r1)
+#CHECK: error: invalid operand
+#CHECK: zap 0(1,%r1), 4096(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: zap 0(1,%r0), 0(1,%r1)
+#CHECK: error: %r0 used in an address
+#CHECK: zap 0(1,%r1), 0(1,%r0)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: zap 0(%r1,%r2), 0(1,%r1)
+#CHECK: error: invalid use of indexed addressing
+#CHECK: zap 0(1,%r2), 0(%r1,%r2)
+#CHECK: error: unknown token in expression
+#CHECK: zap 0(-), 0(1)
+
+ zap 0, 0(1)
+ zap 0(1), 0
+ zap 0(%r1), 0(1,%r1)
+ zap 0(1,%r1), 0(%r1)
+ zap 0(0,%r1), 0(1,%r1)
+ zap 0(1,%r1), 0(0,%r1)
+ zap 0(17,%r1), 0(1,%r1)
+ zap 0(1,%r1), 0(17,%r1)
+ zap -1(1,%r1), 0(1,%r1)
+ zap 4096(1,%r1), 0(1,%r1)
+ zap 0(1,%r1), -1(1,%r1)
+ zap 0(1,%r1), 4096(1,%r1)
+ zap 0(1,%r0), 0(1,%r1)
+ zap 0(1,%r1), 0(1,%r0)
+ zap 0(%r1,%r2), 0(1,%r1)
+ zap 0(1,%r2), 0(%r1,%r2)
+ zap 0(-), 0(1)
diff --git a/test/MC/SystemZ/insn-good-z13.s b/test/MC/SystemZ/insn-good-z13.s
index 4fd6a664a29d..cbfcfa9a89af 100644
--- a/test/MC/SystemZ/insn-good-z13.s
+++ b/test/MC/SystemZ/insn-good-z13.s
@@ -4,16 +4,264 @@
# RUN: llvm-mc -triple s390x-linux-gnu -mcpu=arch11 -show-encoding %s \
# RUN: | FileCheck %s
-#CHECK: lzrf %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3b]
-#CHECK: lzrf %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3b]
-#CHECK: lzrf %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3b]
-#CHECK: lzrf %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x3b]
-#CHECK: lzrf %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x3b]
-#CHECK: lzrf %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x3b]
-#CHECK: lzrf %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x3b]
-#CHECK: lzrf %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x3b]
-#CHECK: lzrf %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x3b]
-#CHECK: lzrf %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x3b]
+#CHECK: lcbb %r0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x27]
+#CHECK: lcbb %r0, 0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x27]
+#CHECK: lcbb %r0, 4095, 0 # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x27]
+#CHECK: lcbb %r0, 0(%r15), 0 # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x27]
+#CHECK: lcbb %r0, 0(%r15,%r1), 0 # encoding: [0xe7,0x0f,0x10,0x00,0x00,0x27]
+#CHECK: lcbb %r15, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x27]
+#CHECK: lcbb %r2, 1383(%r3,%r4), 8 # encoding: [0xe7,0x23,0x45,0x67,0x80,0x27]
+
+ lcbb %r0, 0, 0
+ lcbb %r0, 0, 15
+ lcbb %r0, 4095, 0
+ lcbb %r0, 0(%r15), 0
+ lcbb %r0, 0(%r15,%r1), 0
+ lcbb %r15, 0, 0
+ lcbb %r2, 1383(%r3,%r4), 8
+
+#CHECK: llzrgf %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3a]
+#CHECK: llzrgf %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3a]
+#CHECK: llzrgf %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3a]
+#CHECK: llzrgf %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x3a]
+#CHECK: llzrgf %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x3a]
+#CHECK: llzrgf %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x3a]
+#CHECK: llzrgf %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x3a]
+#CHECK: llzrgf %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x3a]
+#CHECK: llzrgf %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x3a]
+#CHECK: llzrgf %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x3a]
+
+ llzrgf %r0, -524288
+ llzrgf %r0, -1
+ llzrgf %r0, 0
+ llzrgf %r0, 1
+ llzrgf %r0, 524287
+ llzrgf %r0, 0(%r1)
+ llzrgf %r0, 0(%r15)
+ llzrgf %r0, 524287(%r1,%r15)
+ llzrgf %r0, 524287(%r15,%r1)
+ llzrgf %r15, 0
+
+#CHECK: lochi %r11, 42, 0 # encoding: [0xec,0xb0,0x00,0x2a,0x00,0x42]
+#CHECK: lochio %r11, 42 # encoding: [0xec,0xb1,0x00,0x2a,0x00,0x42]
+#CHECK: lochih %r11, 42 # encoding: [0xec,0xb2,0x00,0x2a,0x00,0x42]
+#CHECK: lochinle %r11, 42 # encoding: [0xec,0xb3,0x00,0x2a,0x00,0x42]
+#CHECK: lochil %r11, -1 # encoding: [0xec,0xb4,0xff,0xff,0x00,0x42]
+#CHECK: lochinhe %r11, 42 # encoding: [0xec,0xb5,0x00,0x2a,0x00,0x42]
+#CHECK: lochilh %r11, -1 # encoding: [0xec,0xb6,0xff,0xff,0x00,0x42]
+#CHECK: lochine %r11, 0 # encoding: [0xec,0xb7,0x00,0x00,0x00,0x42]
+#CHECK: lochie %r11, 0 # encoding: [0xec,0xb8,0x00,0x00,0x00,0x42]
+#CHECK: lochinlh %r11, 42 # encoding: [0xec,0xb9,0x00,0x2a,0x00,0x42]
+#CHECK: lochihe %r11, 255 # encoding: [0xec,0xba,0x00,0xff,0x00,0x42]
+#CHECK: lochinl %r11, 255 # encoding: [0xec,0xbb,0x00,0xff,0x00,0x42]
+#CHECK: lochile %r11, 32767 # encoding: [0xec,0xbc,0x7f,0xff,0x00,0x42]
+#CHECK: lochinh %r11, 32767 # encoding: [0xec,0xbd,0x7f,0xff,0x00,0x42]
+#CHECK: lochino %r11, 32512 # encoding: [0xec,0xbe,0x7f,0x00,0x00,0x42]
+#CHECK: lochi %r11, 32512, 15 # encoding: [0xec,0xbf,0x7f,0x00,0x00,0x42]
+
+ lochi %r11, 42, 0
+ lochio %r11, 42
+ lochih %r11, 42
+ lochinle %r11, 42
+ lochil %r11, -1
+ lochinhe %r11, 42
+ lochilh %r11, -1
+ lochine %r11, 0
+ lochie %r11, 0
+ lochinlh %r11, 42
+ lochihe %r11, 255
+ lochinl %r11, 255
+ lochile %r11, 32767
+ lochinh %r11, 32767
+ lochino %r11, 32512
+ lochi %r11, 32512, 15
+
+#CHECK: locghi %r11, 42, 0 # encoding: [0xec,0xb0,0x00,0x2a,0x00,0x46]
+#CHECK: locghio %r11, 42 # encoding: [0xec,0xb1,0x00,0x2a,0x00,0x46]
+#CHECK: locghih %r11, 42 # encoding: [0xec,0xb2,0x00,0x2a,0x00,0x46]
+#CHECK: locghinle %r11, 42 # encoding: [0xec,0xb3,0x00,0x2a,0x00,0x46]
+#CHECK: locghil %r11, -1 # encoding: [0xec,0xb4,0xff,0xff,0x00,0x46]
+#CHECK: locghinhe %r11, 42 # encoding: [0xec,0xb5,0x00,0x2a,0x00,0x46]
+#CHECK: locghilh %r11, -1 # encoding: [0xec,0xb6,0xff,0xff,0x00,0x46]
+#CHECK: locghine %r11, 0 # encoding: [0xec,0xb7,0x00,0x00,0x00,0x46]
+#CHECK: locghie %r11, 0 # encoding: [0xec,0xb8,0x00,0x00,0x00,0x46]
+#CHECK: locghinlh %r11, 42 # encoding: [0xec,0xb9,0x00,0x2a,0x00,0x46]
+#CHECK: locghihe %r11, 255 # encoding: [0xec,0xba,0x00,0xff,0x00,0x46]
+#CHECK: locghinl %r11, 255 # encoding: [0xec,0xbb,0x00,0xff,0x00,0x46]
+#CHECK: locghile %r11, 32767 # encoding: [0xec,0xbc,0x7f,0xff,0x00,0x46]
+#CHECK: locghinh %r11, 32767 # encoding: [0xec,0xbd,0x7f,0xff,0x00,0x46]
+#CHECK: locghino %r11, 32512 # encoding: [0xec,0xbe,0x7f,0x00,0x00,0x46]
+#CHECK: locghi %r11, 32512, 15 # encoding: [0xec,0xbf,0x7f,0x00,0x00,0x46]
+
+ locghi %r11, 42, 0
+ locghio %r11, 42
+ locghih %r11, 42
+ locghinle %r11, 42
+ locghil %r11, -1
+ locghinhe %r11, 42
+ locghilh %r11, -1
+ locghine %r11, 0
+ locghie %r11, 0
+ locghinlh %r11, 42
+ locghihe %r11, 255
+ locghinl %r11, 255
+ locghile %r11, 32767
+ locghinh %r11, 32767
+ locghino %r11, 32512
+ locghi %r11, 32512, 15
+
+#CHECK: lochhi %r11, 42, 0 # encoding: [0xec,0xb0,0x00,0x2a,0x00,0x4e]
+#CHECK: lochhio %r11, 42 # encoding: [0xec,0xb1,0x00,0x2a,0x00,0x4e]
+#CHECK: lochhih %r11, 42 # encoding: [0xec,0xb2,0x00,0x2a,0x00,0x4e]
+#CHECK: lochhinle %r11, 42 # encoding: [0xec,0xb3,0x00,0x2a,0x00,0x4e]
+#CHECK: lochhil %r11, -1 # encoding: [0xec,0xb4,0xff,0xff,0x00,0x4e]
+#CHECK: lochhinhe %r11, 42 # encoding: [0xec,0xb5,0x00,0x2a,0x00,0x4e]
+#CHECK: lochhilh %r11, -1 # encoding: [0xec,0xb6,0xff,0xff,0x00,0x4e]
+#CHECK: lochhine %r11, 0 # encoding: [0xec,0xb7,0x00,0x00,0x00,0x4e]
+#CHECK: lochhie %r11, 0 # encoding: [0xec,0xb8,0x00,0x00,0x00,0x4e]
+#CHECK: lochhinlh %r11, 42 # encoding: [0xec,0xb9,0x00,0x2a,0x00,0x4e]
+#CHECK: lochhihe %r11, 255 # encoding: [0xec,0xba,0x00,0xff,0x00,0x4e]
+#CHECK: lochhinl %r11, 255 # encoding: [0xec,0xbb,0x00,0xff,0x00,0x4e]
+#CHECK: lochhile %r11, 32767 # encoding: [0xec,0xbc,0x7f,0xff,0x00,0x4e]
+#CHECK: lochhinh %r11, 32767 # encoding: [0xec,0xbd,0x7f,0xff,0x00,0x4e]
+#CHECK: lochhino %r11, 32512 # encoding: [0xec,0xbe,0x7f,0x00,0x00,0x4e]
+#CHECK: lochhi %r11, 32512, 15 # encoding: [0xec,0xbf,0x7f,0x00,0x00,0x4e]
+
+ lochhi %r11, 42, 0
+ lochhio %r11, 42
+ lochhih %r11, 42
+ lochhinle %r11, 42
+ lochhil %r11, -1
+ lochhinhe %r11, 42
+ lochhilh %r11, -1
+ lochhine %r11, 0
+ lochhie %r11, 0
+ lochhinlh %r11, 42
+ lochhihe %r11, 255
+ lochhinl %r11, 255
+ lochhile %r11, 32767
+ lochhinh %r11, 32767
+ lochhino %r11, 32512
+ lochhi %r11, 32512, 15
+
+#CHECK: locfh %r0, 0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe0]
+#CHECK: locfh %r0, 0, 15 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe0]
+#CHECK: locfh %r0, -524288, 0 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe0]
+#CHECK: locfh %r0, 524287, 0 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe0]
+#CHECK: locfh %r0, 0(%r1), 0 # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe0]
+#CHECK: locfh %r0, 0(%r15), 0 # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe0]
+#CHECK: locfh %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe0]
+#CHECK: locfh %r1, 4095(%r2), 3 # encoding: [0xeb,0x13,0x2f,0xff,0x00,0xe0]
+
+ locfh %r0, 0, 0
+ locfh %r0, 0, 15
+ locfh %r0, -524288, 0
+ locfh %r0, 524287, 0
+ locfh %r0, 0(%r1), 0
+ locfh %r0, 0(%r15), 0
+ locfh %r15, 0, 0
+ locfh %r1, 4095(%r2), 3
+
+#CHECK: locfho %r1, 2(%r3) # encoding: [0xeb,0x11,0x30,0x02,0x00,0xe0]
+#CHECK: locfhh %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe0]
+#CHECK: locfhp %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnle %r1, 2(%r3) # encoding: [0xeb,0x13,0x30,0x02,0x00,0xe0]
+#CHECK: locfhl %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe0]
+#CHECK: locfhm %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnhe %r1, 2(%r3) # encoding: [0xeb,0x15,0x30,0x02,0x00,0xe0]
+#CHECK: locfhlh %r1, 2(%r3) # encoding: [0xeb,0x16,0x30,0x02,0x00,0xe0]
+#CHECK: locfhne %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnz %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe0]
+#CHECK: locfhe %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe0]
+#CHECK: locfhz %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnlh %r1, 2(%r3) # encoding: [0xeb,0x19,0x30,0x02,0x00,0xe0]
+#CHECK: locfhhe %r1, 2(%r3) # encoding: [0xeb,0x1a,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnl %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnm %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe0]
+#CHECK: locfhle %r1, 2(%r3) # encoding: [0xeb,0x1c,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnh %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe0]
+#CHECK: locfhnp %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe0]
+#CHECK: locfhno %r1, 2(%r3) # encoding: [0xeb,0x1e,0x30,0x02,0x00,0xe0]
+
+ locfho %r1, 2(%r3)
+ locfhh %r1, 2(%r3)
+ locfhp %r1, 2(%r3)
+ locfhnle %r1, 2(%r3)
+ locfhl %r1, 2(%r3)
+ locfhm %r1, 2(%r3)
+ locfhnhe %r1, 2(%r3)
+ locfhlh %r1, 2(%r3)
+ locfhne %r1, 2(%r3)
+ locfhnz %r1, 2(%r3)
+ locfhe %r1, 2(%r3)
+ locfhz %r1, 2(%r3)
+ locfhnlh %r1, 2(%r3)
+ locfhhe %r1, 2(%r3)
+ locfhnl %r1, 2(%r3)
+ locfhnm %r1, 2(%r3)
+ locfhle %r1, 2(%r3)
+ locfhnh %r1, 2(%r3)
+ locfhnp %r1, 2(%r3)
+ locfhno %r1, 2(%r3)
+
+#CHECK: locfhr %r1, %r2, 0 # encoding: [0xb9,0xe0,0x00,0x12]
+#CHECK: locfhr %r1, %r2, 15 # encoding: [0xb9,0xe0,0xf0,0x12]
+
+ locfhr %r1, %r2, 0
+ locfhr %r1, %r2, 15
+
+#CHECK: locfhro %r1, %r3 # encoding: [0xb9,0xe0,0x10,0x13]
+#CHECK: locfhrh %r1, %r3 # encoding: [0xb9,0xe0,0x20,0x13]
+#CHECK: locfhrp %r1, %r3 # encoding: [0xb9,0xe0,0x20,0x13]
+#CHECK: locfhrnle %r1, %r3 # encoding: [0xb9,0xe0,0x30,0x13]
+#CHECK: locfhrl %r1, %r3 # encoding: [0xb9,0xe0,0x40,0x13]
+#CHECK: locfhrm %r1, %r3 # encoding: [0xb9,0xe0,0x40,0x13]
+#CHECK: locfhrnhe %r1, %r3 # encoding: [0xb9,0xe0,0x50,0x13]
+#CHECK: locfhrlh %r1, %r3 # encoding: [0xb9,0xe0,0x60,0x13]
+#CHECK: locfhrne %r1, %r3 # encoding: [0xb9,0xe0,0x70,0x13]
+#CHECK: locfhrnz %r1, %r3 # encoding: [0xb9,0xe0,0x70,0x13]
+#CHECK: locfhre %r1, %r3 # encoding: [0xb9,0xe0,0x80,0x13]
+#CHECK: locfhrz %r1, %r3 # encoding: [0xb9,0xe0,0x80,0x13]
+#CHECK: locfhrnlh %r1, %r3 # encoding: [0xb9,0xe0,0x90,0x13]
+#CHECK: locfhrhe %r1, %r3 # encoding: [0xb9,0xe0,0xa0,0x13]
+#CHECK: locfhrnl %r1, %r3 # encoding: [0xb9,0xe0,0xb0,0x13]
+#CHECK: locfhrnm %r1, %r3 # encoding: [0xb9,0xe0,0xb0,0x13]
+#CHECK: locfhrle %r1, %r3 # encoding: [0xb9,0xe0,0xc0,0x13]
+#CHECK: locfhrnh %r1, %r3 # encoding: [0xb9,0xe0,0xd0,0x13]
+#CHECK: locfhrnp %r1, %r3 # encoding: [0xb9,0xe0,0xd0,0x13]
+#CHECK: locfhrno %r1, %r3 # encoding: [0xb9,0xe0,0xe0,0x13]
+
+ locfhro %r1, %r3
+ locfhrh %r1, %r3
+ locfhrp %r1, %r3
+ locfhrnle %r1, %r3
+ locfhrl %r1, %r3
+ locfhrm %r1, %r3
+ locfhrnhe %r1, %r3
+ locfhrlh %r1, %r3
+ locfhrne %r1, %r3
+ locfhrnz %r1, %r3
+ locfhre %r1, %r3
+ locfhrz %r1, %r3
+ locfhrnlh %r1, %r3
+ locfhrhe %r1, %r3
+ locfhrnl %r1, %r3
+ locfhrnm %r1, %r3
+ locfhrle %r1, %r3
+ locfhrnh %r1, %r3
+ locfhrnp %r1, %r3
+ locfhrno %r1, %r3
+
+#CHECK: lzrf %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3b]
+#CHECK: lzrf %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3b]
+#CHECK: lzrf %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3b]
+#CHECK: lzrf %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x3b]
+#CHECK: lzrf %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x3b]
+#CHECK: lzrf %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x3b]
+#CHECK: lzrf %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x3b]
+#CHECK: lzrf %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x3b]
+#CHECK: lzrf %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x3b]
+#CHECK: lzrf %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x3b]
lzrf %r0, -524288
lzrf %r0, -1
@@ -26,16 +274,16 @@
lzrf %r0, 524287(%r15,%r1)
lzrf %r15, 0
-#CHECK: lzrg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x2a]
-#CHECK: lzrg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x2a]
-#CHECK: lzrg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x2a]
-#CHECK: lzrg %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x2a]
-#CHECK: lzrg %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x2a]
-#CHECK: lzrg %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x2a]
-#CHECK: lzrg %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x2a]
-#CHECK: lzrg %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x2a]
-#CHECK: lzrg %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x2a]
-#CHECK: lzrg %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x2a]
+#CHECK: lzrg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x2a]
+#CHECK: lzrg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x2a]
+#CHECK: lzrg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x2a]
+#CHECK: lzrg %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x2a]
+#CHECK: lzrg %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x2a]
+#CHECK: lzrg %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x2a]
+#CHECK: lzrg %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x2a]
+#CHECK: lzrg %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x2a]
+#CHECK: lzrg %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x2a]
+#CHECK: lzrg %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x2a]
lzrg %r0, -524288
lzrg %r0, -1
@@ -48,43 +296,75 @@
lzrg %r0, 524287(%r15,%r1)
lzrg %r15, 0
-#CHECK: llzrgf %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3a]
-#CHECK: llzrgf %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3a]
-#CHECK: llzrgf %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3a]
-#CHECK: llzrgf %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x3a]
-#CHECK: llzrgf %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x3a]
-#CHECK: llzrgf %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x3a]
-#CHECK: llzrgf %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x3a]
-#CHECK: llzrgf %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x3a]
-#CHECK: llzrgf %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x3a]
-#CHECK: llzrgf %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x3a]
+#CHECK: stocfh %r0, 0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe1]
+#CHECK: stocfh %r0, 0, 15 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe1]
+#CHECK: stocfh %r0, -524288, 0 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe1]
+#CHECK: stocfh %r0, 524287, 0 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe1]
+#CHECK: stocfh %r0, 0(%r1), 0 # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe1]
+#CHECK: stocfh %r0, 0(%r15), 0 # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe1]
+#CHECK: stocfh %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe1]
+#CHECK: stocfh %r1, 4095(%r2), 3 # encoding: [0xeb,0x13,0x2f,0xff,0x00,0xe1]
+
+ stocfh %r0, 0, 0
+ stocfh %r0, 0, 15
+ stocfh %r0, -524288, 0
+ stocfh %r0, 524287, 0
+ stocfh %r0, 0(%r1), 0
+ stocfh %r0, 0(%r15), 0
+ stocfh %r15, 0, 0
+ stocfh %r1, 4095(%r2), 3
- llzrgf %r0, -524288
- llzrgf %r0, -1
- llzrgf %r0, 0
- llzrgf %r0, 1
- llzrgf %r0, 524287
- llzrgf %r0, 0(%r1)
- llzrgf %r0, 0(%r15)
- llzrgf %r0, 524287(%r1,%r15)
- llzrgf %r0, 524287(%r15,%r1)
- llzrgf %r15, 0
+#CHECK: stocfho %r1, 2(%r3) # encoding: [0xeb,0x11,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhh %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhp %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnle %r1, 2(%r3) # encoding: [0xeb,0x13,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhl %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhm %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnhe %r1, 2(%r3) # encoding: [0xeb,0x15,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhlh %r1, 2(%r3) # encoding: [0xeb,0x16,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhne %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnz %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhe %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhz %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnlh %r1, 2(%r3) # encoding: [0xeb,0x19,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhhe %r1, 2(%r3) # encoding: [0xeb,0x1a,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnl %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnm %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhle %r1, 2(%r3) # encoding: [0xeb,0x1c,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnh %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhnp %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe1]
+#CHECK: stocfhno %r1, 2(%r3) # encoding: [0xeb,0x1e,0x30,0x02,0x00,0xe1]
-#CHECK: lcbb %r0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x27]
-#CHECK: lcbb %r0, 0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x27]
-#CHECK: lcbb %r0, 4095, 0 # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x27]
-#CHECK: lcbb %r0, 0(%r15), 0 # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x27]
-#CHECK: lcbb %r0, 0(%r15,%r1), 0 # encoding: [0xe7,0x0f,0x10,0x00,0x00,0x27]
-#CHECK: lcbb %r15, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x27]
-#CHECK: lcbb %r2, 1383(%r3,%r4), 8 # encoding: [0xe7,0x23,0x45,0x67,0x80,0x27]
-
- lcbb %r0, 0, 0
- lcbb %r0, 0, 15
- lcbb %r0, 4095, 0
- lcbb %r0, 0(%r15), 0
- lcbb %r0, 0(%r15,%r1), 0
- lcbb %r15, 0, 0
- lcbb %r2, 1383(%r3,%r4), 8
+ stocfho %r1, 2(%r3)
+ stocfhh %r1, 2(%r3)
+ stocfhp %r1, 2(%r3)
+ stocfhnle %r1, 2(%r3)
+ stocfhl %r1, 2(%r3)
+ stocfhm %r1, 2(%r3)
+ stocfhnhe %r1, 2(%r3)
+ stocfhlh %r1, 2(%r3)
+ stocfhne %r1, 2(%r3)
+ stocfhnz %r1, 2(%r3)
+ stocfhe %r1, 2(%r3)
+ stocfhz %r1, 2(%r3)
+ stocfhnlh %r1, 2(%r3)
+ stocfhhe %r1, 2(%r3)
+ stocfhnl %r1, 2(%r3)
+ stocfhnm %r1, 2(%r3)
+ stocfhle %r1, 2(%r3)
+ stocfhnh %r1, 2(%r3)
+ stocfhnp %r1, 2(%r3)
+ stocfhno %r1, 2(%r3)
+
+#CHECK: ppno %r2, %r2 # encoding: [0xb9,0x3c,0x00,0x22]
+#CHECK: ppno %r2, %r14 # encoding: [0xb9,0x3c,0x00,0x2e]
+#CHECK: ppno %r14, %r2 # encoding: [0xb9,0x3c,0x00,0xe2]
+#CHECK: ppno %r6, %r10 # encoding: [0xb9,0x3c,0x00,0x6a]
+
+ ppno %r2, %r2
+ ppno %r2, %r14
+ ppno %r14, %r2
+ ppno %r6, %r10
#CHECK: va %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xf3]
#CHECK: va %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xf3]
@@ -486,18 +766,6 @@
vcdlgb %v31, %v0, 0, 0
vcdlgb %v14, %v17, 4, 10
-#CHECK: vcksm %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x66]
-#CHECK: vcksm %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x66]
-#CHECK: vcksm %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x66]
-#CHECK: vcksm %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x66]
-#CHECK: vcksm %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x66]
-
- vcksm %v0, %v0, %v0
- vcksm %v0, %v0, %v31
- vcksm %v0, %v31, %v0
- vcksm %v31, %v0, %v0
- vcksm %v18, %v3, %v20
-
#CHECK: vceq %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xf8]
#CHECK: vceq %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xf8]
#CHECK: vceq %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x00,0xf8]
@@ -748,6 +1016,18 @@
vchlh %v18, %v3, %v20
vchlhs %v5, %v22, %v7
+#CHECK: vcksm %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x66]
+#CHECK: vcksm %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x66]
+#CHECK: vcksm %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x66]
+#CHECK: vcksm %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x66]
+#CHECK: vcksm %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x66]
+
+ vcksm %v0, %v0, %v0
+ vcksm %v0, %v0, %v31
+ vcksm %v0, %v31, %v0
+ vcksm %v31, %v0, %v0
+ vcksm %v18, %v3, %v20
+
#CHECK: vclgd %v0, %v0, 0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xc0]
#CHECK: vclgd %v0, %v0, 15, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xc0]
#CHECK: vclgd %v0, %v0, 0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x00,0xc0]
@@ -1142,68 +1422,6 @@
verimh %v31, %v0, %v0, 0
verimh %v13, %v17, %v21, 0x79
-#CHECK: verllv %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x73]
-#CHECK: verllv %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x73]
-#CHECK: verllv %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x73]
-#CHECK: verllv %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x73]
-#CHECK: verllv %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x73]
-#CHECK: verllv %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x73]
-
- verllv %v0, %v0, %v0, 0
- verllv %v0, %v0, %v0, 15
- verllv %v0, %v0, %v31, 0
- verllv %v0, %v31, %v0, 0
- verllv %v31, %v0, %v0, 0
- verllv %v18, %v3, %v20, 11
-
-#CHECK: verllvb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x73]
-#CHECK: verllvb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x73]
-#CHECK: verllvb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x73]
-#CHECK: verllvb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x73]
-#CHECK: verllvb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x73]
-
- verllvb %v0, %v0, %v0
- verllvb %v0, %v0, %v31
- verllvb %v0, %v31, %v0
- verllvb %v31, %v0, %v0
- verllvb %v18, %v3, %v20
-
-#CHECK: verllvf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x73]
-#CHECK: verllvf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x73]
-#CHECK: verllvf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x73]
-#CHECK: verllvf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x73]
-#CHECK: verllvf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x73]
-
- verllvf %v0, %v0, %v0
- verllvf %v0, %v0, %v31
- verllvf %v0, %v31, %v0
- verllvf %v31, %v0, %v0
- verllvf %v18, %v3, %v20
-
-#CHECK: verllvg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x73]
-#CHECK: verllvg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x73]
-#CHECK: verllvg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x73]
-#CHECK: verllvg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x73]
-#CHECK: verllvg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x73]
-
- verllvg %v0, %v0, %v0
- verllvg %v0, %v0, %v31
- verllvg %v0, %v31, %v0
- verllvg %v31, %v0, %v0
- verllvg %v18, %v3, %v20
-
-#CHECK: verllvh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x73]
-#CHECK: verllvh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x73]
-#CHECK: verllvh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x73]
-#CHECK: verllvh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x73]
-#CHECK: verllvh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x73]
-
- verllvh %v0, %v0, %v0
- verllvh %v0, %v0, %v31
- verllvh %v0, %v31, %v0
- verllvh %v31, %v0, %v0
- verllvh %v18, %v3, %v20
-
#CHECK: verll %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x33]
#CHECK: verll %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x33]
#CHECK: verll %v0, %v0, 4095, 0 # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x33]
@@ -1276,67 +1494,67 @@
verllh %v31, %v0, 0
verllh %v14, %v17, 1074(%r5)
-#CHECK: veslv %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x70]
-#CHECK: veslv %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x70]
-#CHECK: veslv %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x70]
-#CHECK: veslv %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x70]
-#CHECK: veslv %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x70]
-#CHECK: veslv %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x70]
+#CHECK: verllv %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x73]
+#CHECK: verllv %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x73]
+#CHECK: verllv %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x73]
+#CHECK: verllv %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x73]
+#CHECK: verllv %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x73]
+#CHECK: verllv %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x73]
- veslv %v0, %v0, %v0, 0
- veslv %v0, %v0, %v0, 15
- veslv %v0, %v0, %v31, 0
- veslv %v0, %v31, %v0, 0
- veslv %v31, %v0, %v0, 0
- veslv %v18, %v3, %v20, 11
+ verllv %v0, %v0, %v0, 0
+ verllv %v0, %v0, %v0, 15
+ verllv %v0, %v0, %v31, 0
+ verllv %v0, %v31, %v0, 0
+ verllv %v31, %v0, %v0, 0
+ verllv %v18, %v3, %v20, 11
-#CHECK: veslvb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x70]
-#CHECK: veslvb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x70]
-#CHECK: veslvb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x70]
-#CHECK: veslvb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x70]
-#CHECK: veslvb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x70]
+#CHECK: verllvb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x73]
+#CHECK: verllvb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x73]
+#CHECK: verllvb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x73]
+#CHECK: verllvb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x73]
+#CHECK: verllvb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x73]
- veslvb %v0, %v0, %v0
- veslvb %v0, %v0, %v31
- veslvb %v0, %v31, %v0
- veslvb %v31, %v0, %v0
- veslvb %v18, %v3, %v20
+ verllvb %v0, %v0, %v0
+ verllvb %v0, %v0, %v31
+ verllvb %v0, %v31, %v0
+ verllvb %v31, %v0, %v0
+ verllvb %v18, %v3, %v20
-#CHECK: veslvf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x70]
-#CHECK: veslvf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x70]
-#CHECK: veslvf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x70]
-#CHECK: veslvf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x70]
-#CHECK: veslvf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x70]
+#CHECK: verllvf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x73]
+#CHECK: verllvf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x73]
+#CHECK: verllvf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x73]
+#CHECK: verllvf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x73]
+#CHECK: verllvf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x73]
- veslvf %v0, %v0, %v0
- veslvf %v0, %v0, %v31
- veslvf %v0, %v31, %v0
- veslvf %v31, %v0, %v0
- veslvf %v18, %v3, %v20
+ verllvf %v0, %v0, %v0
+ verllvf %v0, %v0, %v31
+ verllvf %v0, %v31, %v0
+ verllvf %v31, %v0, %v0
+ verllvf %v18, %v3, %v20
-#CHECK: veslvg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x70]
-#CHECK: veslvg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x70]
-#CHECK: veslvg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x70]
-#CHECK: veslvg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x70]
-#CHECK: veslvg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x70]
+#CHECK: verllvg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x73]
+#CHECK: verllvg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x73]
+#CHECK: verllvg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x73]
+#CHECK: verllvg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x73]
+#CHECK: verllvg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x73]
- veslvg %v0, %v0, %v0
- veslvg %v0, %v0, %v31
- veslvg %v0, %v31, %v0
- veslvg %v31, %v0, %v0
- veslvg %v18, %v3, %v20
+ verllvg %v0, %v0, %v0
+ verllvg %v0, %v0, %v31
+ verllvg %v0, %v31, %v0
+ verllvg %v31, %v0, %v0
+ verllvg %v18, %v3, %v20
-#CHECK: veslvh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x70]
-#CHECK: veslvh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x70]
-#CHECK: veslvh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x70]
-#CHECK: veslvh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x70]
-#CHECK: veslvh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x70]
+#CHECK: verllvh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x73]
+#CHECK: verllvh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x73]
+#CHECK: verllvh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x73]
+#CHECK: verllvh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x73]
+#CHECK: verllvh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x73]
- veslvh %v0, %v0, %v0
- veslvh %v0, %v0, %v31
- veslvh %v0, %v31, %v0
- veslvh %v31, %v0, %v0
- veslvh %v18, %v3, %v20
+ verllvh %v0, %v0, %v0
+ verllvh %v0, %v0, %v31
+ verllvh %v0, %v31, %v0
+ verllvh %v31, %v0, %v0
+ verllvh %v18, %v3, %v20
#CHECK: vesl %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x30]
#CHECK: vesl %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x30]
@@ -1410,67 +1628,67 @@
veslh %v31, %v0, 0
veslh %v14, %v17, 1074(%r5)
-#CHECK: vesrav %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x7a]
-#CHECK: vesrav %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x7a]
-#CHECK: vesrav %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x7a]
-#CHECK: vesrav %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x7a]
-#CHECK: vesrav %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x7a]
-#CHECK: vesrav %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x7a]
+#CHECK: veslv %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x70]
+#CHECK: veslv %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x70]
+#CHECK: veslv %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x70]
+#CHECK: veslv %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x70]
+#CHECK: veslv %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x70]
+#CHECK: veslv %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x70]
- vesrav %v0, %v0, %v0, 0
- vesrav %v0, %v0, %v0, 15
- vesrav %v0, %v0, %v31, 0
- vesrav %v0, %v31, %v0, 0
- vesrav %v31, %v0, %v0, 0
- vesrav %v18, %v3, %v20, 11
+ veslv %v0, %v0, %v0, 0
+ veslv %v0, %v0, %v0, 15
+ veslv %v0, %v0, %v31, 0
+ veslv %v0, %v31, %v0, 0
+ veslv %v31, %v0, %v0, 0
+ veslv %v18, %v3, %v20, 11
-#CHECK: vesravb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x7a]
-#CHECK: vesravb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x7a]
-#CHECK: vesravb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x7a]
-#CHECK: vesravb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x7a]
-#CHECK: vesravb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x7a]
+#CHECK: veslvb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x70]
+#CHECK: veslvb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x70]
+#CHECK: veslvb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x70]
+#CHECK: veslvb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x70]
+#CHECK: veslvb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x70]
- vesravb %v0, %v0, %v0
- vesravb %v0, %v0, %v31
- vesravb %v0, %v31, %v0
- vesravb %v31, %v0, %v0
- vesravb %v18, %v3, %v20
+ veslvb %v0, %v0, %v0
+ veslvb %v0, %v0, %v31
+ veslvb %v0, %v31, %v0
+ veslvb %v31, %v0, %v0
+ veslvb %v18, %v3, %v20
-#CHECK: vesravf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x7a]
-#CHECK: vesravf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x7a]
-#CHECK: vesravf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x7a]
-#CHECK: vesravf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x7a]
-#CHECK: vesravf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x7a]
+#CHECK: veslvf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x70]
+#CHECK: veslvf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x70]
+#CHECK: veslvf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x70]
+#CHECK: veslvf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x70]
+#CHECK: veslvf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x70]
- vesravf %v0, %v0, %v0
- vesravf %v0, %v0, %v31
- vesravf %v0, %v31, %v0
- vesravf %v31, %v0, %v0
- vesravf %v18, %v3, %v20
+ veslvf %v0, %v0, %v0
+ veslvf %v0, %v0, %v31
+ veslvf %v0, %v31, %v0
+ veslvf %v31, %v0, %v0
+ veslvf %v18, %v3, %v20
-#CHECK: vesravg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x7a]
-#CHECK: vesravg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x7a]
-#CHECK: vesravg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x7a]
-#CHECK: vesravg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x7a]
-#CHECK: vesravg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x7a]
+#CHECK: veslvg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x70]
+#CHECK: veslvg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x70]
+#CHECK: veslvg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x70]
+#CHECK: veslvg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x70]
+#CHECK: veslvg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x70]
- vesravg %v0, %v0, %v0
- vesravg %v0, %v0, %v31
- vesravg %v0, %v31, %v0
- vesravg %v31, %v0, %v0
- vesravg %v18, %v3, %v20
+ veslvg %v0, %v0, %v0
+ veslvg %v0, %v0, %v31
+ veslvg %v0, %v31, %v0
+ veslvg %v31, %v0, %v0
+ veslvg %v18, %v3, %v20
-#CHECK: vesravh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x7a]
-#CHECK: vesravh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x7a]
-#CHECK: vesravh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x7a]
-#CHECK: vesravh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x7a]
-#CHECK: vesravh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x7a]
+#CHECK: veslvh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x70]
+#CHECK: veslvh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x70]
+#CHECK: veslvh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x70]
+#CHECK: veslvh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x70]
+#CHECK: veslvh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x70]
- vesravh %v0, %v0, %v0
- vesravh %v0, %v0, %v31
- vesravh %v0, %v31, %v0
- vesravh %v31, %v0, %v0
- vesravh %v18, %v3, %v20
+ veslvh %v0, %v0, %v0
+ veslvh %v0, %v0, %v31
+ veslvh %v0, %v31, %v0
+ veslvh %v31, %v0, %v0
+ veslvh %v18, %v3, %v20
#CHECK: vesra %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x3a]
#CHECK: vesra %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x3a]
@@ -1544,67 +1762,67 @@
vesrah %v31, %v0, 0
vesrah %v14, %v17, 1074(%r5)
-#CHECK: vesrlv %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x78]
-#CHECK: vesrlv %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x78]
-#CHECK: vesrlv %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x78]
-#CHECK: vesrlv %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x78]
-#CHECK: vesrlv %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x78]
-#CHECK: vesrlv %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x78]
+#CHECK: vesrav %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x7a]
+#CHECK: vesrav %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x7a]
+#CHECK: vesrav %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x7a]
+#CHECK: vesrav %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x7a]
+#CHECK: vesrav %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x7a]
+#CHECK: vesrav %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x7a]
- vesrlv %v0, %v0, %v0, 0
- vesrlv %v0, %v0, %v0, 15
- vesrlv %v0, %v0, %v31, 0
- vesrlv %v0, %v31, %v0, 0
- vesrlv %v31, %v0, %v0, 0
- vesrlv %v18, %v3, %v20, 11
+ vesrav %v0, %v0, %v0, 0
+ vesrav %v0, %v0, %v0, 15
+ vesrav %v0, %v0, %v31, 0
+ vesrav %v0, %v31, %v0, 0
+ vesrav %v31, %v0, %v0, 0
+ vesrav %v18, %v3, %v20, 11
-#CHECK: vesrlvb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x78]
-#CHECK: vesrlvb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x78]
-#CHECK: vesrlvb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x78]
-#CHECK: vesrlvb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x78]
-#CHECK: vesrlvb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x78]
+#CHECK: vesravb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x7a]
+#CHECK: vesravb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x7a]
+#CHECK: vesravb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x7a]
+#CHECK: vesravb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x7a]
+#CHECK: vesravb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x7a]
- vesrlvb %v0, %v0, %v0
- vesrlvb %v0, %v0, %v31
- vesrlvb %v0, %v31, %v0
- vesrlvb %v31, %v0, %v0
- vesrlvb %v18, %v3, %v20
+ vesravb %v0, %v0, %v0
+ vesravb %v0, %v0, %v31
+ vesravb %v0, %v31, %v0
+ vesravb %v31, %v0, %v0
+ vesravb %v18, %v3, %v20
-#CHECK: vesrlvf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x78]
-#CHECK: vesrlvf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x78]
-#CHECK: vesrlvf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x78]
-#CHECK: vesrlvf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x78]
-#CHECK: vesrlvf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x78]
+#CHECK: vesravf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x7a]
+#CHECK: vesravf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x7a]
+#CHECK: vesravf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x7a]
+#CHECK: vesravf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x7a]
+#CHECK: vesravf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x7a]
- vesrlvf %v0, %v0, %v0
- vesrlvf %v0, %v0, %v31
- vesrlvf %v0, %v31, %v0
- vesrlvf %v31, %v0, %v0
- vesrlvf %v18, %v3, %v20
+ vesravf %v0, %v0, %v0
+ vesravf %v0, %v0, %v31
+ vesravf %v0, %v31, %v0
+ vesravf %v31, %v0, %v0
+ vesravf %v18, %v3, %v20
-#CHECK: vesrlvg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x78]
-#CHECK: vesrlvg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x78]
-#CHECK: vesrlvg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x78]
-#CHECK: vesrlvg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x78]
-#CHECK: vesrlvg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x78]
+#CHECK: vesravg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x7a]
+#CHECK: vesravg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x7a]
+#CHECK: vesravg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x7a]
+#CHECK: vesravg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x7a]
+#CHECK: vesravg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x7a]
- vesrlvg %v0, %v0, %v0
- vesrlvg %v0, %v0, %v31
- vesrlvg %v0, %v31, %v0
- vesrlvg %v31, %v0, %v0
- vesrlvg %v18, %v3, %v20
+ vesravg %v0, %v0, %v0
+ vesravg %v0, %v0, %v31
+ vesravg %v0, %v31, %v0
+ vesravg %v31, %v0, %v0
+ vesravg %v18, %v3, %v20
-#CHECK: vesrlvh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x78]
-#CHECK: vesrlvh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x78]
-#CHECK: vesrlvh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x78]
-#CHECK: vesrlvh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x78]
-#CHECK: vesrlvh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x78]
+#CHECK: vesravh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x7a]
+#CHECK: vesravh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x7a]
+#CHECK: vesravh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x7a]
+#CHECK: vesravh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x7a]
+#CHECK: vesravh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x7a]
- vesrlvh %v0, %v0, %v0
- vesrlvh %v0, %v0, %v31
- vesrlvh %v0, %v31, %v0
- vesrlvh %v31, %v0, %v0
- vesrlvh %v18, %v3, %v20
+ vesravh %v0, %v0, %v0
+ vesravh %v0, %v0, %v31
+ vesravh %v0, %v31, %v0
+ vesravh %v31, %v0, %v0
+ vesravh %v18, %v3, %v20
#CHECK: vesrl %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x38]
#CHECK: vesrl %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x38]
@@ -1678,6 +1896,68 @@
vesrlh %v31, %v0, 0
vesrlh %v14, %v17, 1074(%r5)
+#CHECK: vesrlv %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x78]
+#CHECK: vesrlv %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x78]
+#CHECK: vesrlv %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x78]
+#CHECK: vesrlv %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x78]
+#CHECK: vesrlv %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x78]
+#CHECK: vesrlv %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0x78]
+
+ vesrlv %v0, %v0, %v0, 0
+ vesrlv %v0, %v0, %v0, 15
+ vesrlv %v0, %v0, %v31, 0
+ vesrlv %v0, %v31, %v0, 0
+ vesrlv %v31, %v0, %v0, 0
+ vesrlv %v18, %v3, %v20, 11
+
+#CHECK: vesrlvb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x78]
+#CHECK: vesrlvb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x78]
+#CHECK: vesrlvb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x78]
+#CHECK: vesrlvb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x78]
+#CHECK: vesrlvb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x78]
+
+ vesrlvb %v0, %v0, %v0
+ vesrlvb %v0, %v0, %v31
+ vesrlvb %v0, %v31, %v0
+ vesrlvb %v31, %v0, %v0
+ vesrlvb %v18, %v3, %v20
+
+#CHECK: vesrlvf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x78]
+#CHECK: vesrlvf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x78]
+#CHECK: vesrlvf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x78]
+#CHECK: vesrlvf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x78]
+#CHECK: vesrlvf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x78]
+
+ vesrlvf %v0, %v0, %v0
+ vesrlvf %v0, %v0, %v31
+ vesrlvf %v0, %v31, %v0
+ vesrlvf %v31, %v0, %v0
+ vesrlvf %v18, %v3, %v20
+
+#CHECK: vesrlvg %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x78]
+#CHECK: vesrlvg %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0x78]
+#CHECK: vesrlvg %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x78]
+#CHECK: vesrlvg %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x78]
+#CHECK: vesrlvg %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x3a,0x78]
+
+ vesrlvg %v0, %v0, %v0
+ vesrlvg %v0, %v0, %v31
+ vesrlvg %v0, %v31, %v0
+ vesrlvg %v31, %v0, %v0
+ vesrlvg %v18, %v3, %v20
+
+#CHECK: vesrlvh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x78]
+#CHECK: vesrlvh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x78]
+#CHECK: vesrlvh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x78]
+#CHECK: vesrlvh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x78]
+#CHECK: vesrlvh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x78]
+
+ vesrlvh %v0, %v0, %v0
+ vesrlvh %v0, %v0, %v31
+ vesrlvh %v0, %v31, %v0
+ vesrlvh %v31, %v0, %v0
+ vesrlvh %v18, %v3, %v20
+
#CHECK: vfa %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xe3]
#CHECK: vfa %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xe3]
#CHECK: vfa %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xe3]
@@ -2040,19 +2320,19 @@
vfeezb %v18, %v3, %v20
vfeezbs %v5, %v22, %v7
-#CFECK: vfeef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x80]
-#CFECK: vfeef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x80]
-#CFECK: vfeef %v0, %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x20,0x80]
-#CFECK: vfeef %v0, %v0, %v15, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x20,0x80]
-#CFECK: vfeef %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x80]
-#CFECK: vfeef %v0, %v15, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x80]
-#CFECK: vfeef %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x80]
-#CFECK: vfeef %v15, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x80]
-#CFECK: vfeef %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x80]
-#CFECK: vfeef %v18, %v3, %v20, 0 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x80]
-#CFECK: vfeefs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x10,0x24,0x80]
-#CFECK: vfeezf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x20,0x2a,0x80]
-#CFECK: vfeezfs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x30,0x24,0x80]
+#CHECK: vfeef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x80]
+#CHECK: vfeef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x80]
+#CHECK: vfeef %v0, %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x20,0x80]
+#CHECK: vfeef %v0, %v0, %v15, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x20,0x80]
+#CHECK: vfeef %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x80]
+#CHECK: vfeef %v0, %v15, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x80]
+#CHECK: vfeef %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x80]
+#CHECK: vfeef %v15, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x80]
+#CHECK: vfeef %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x80]
+#CHECK: vfeef %v18, %v3, %v20, 0 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x80]
+#CHECK: vfeefs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x10,0x24,0x80]
+#CHECK: vfeezf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x20,0x2a,0x80]
+#CHECK: vfeezfs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x30,0x24,0x80]
vfeef %v0, %v0, %v0
vfeef %v0, %v0, %v0, 0
@@ -2152,19 +2432,19 @@
vfenezb %v18, %v3, %v20
vfenezbs %v5, %v22, %v7
-#CFECK: vfenef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x81]
-#CFECK: vfenef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x81]
-#CFECK: vfenef %v0, %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x20,0x81]
-#CFECK: vfenef %v0, %v0, %v15, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x20,0x81]
-#CFECK: vfenef %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x81]
-#CFECK: vfenef %v0, %v15, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x81]
-#CFECK: vfenef %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x81]
-#CFECK: vfenef %v15, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x81]
-#CFECK: vfenef %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x81]
-#CFECK: vfenef %v18, %v3, %v20, 0 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x81]
-#CFECK: vfenefs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x10,0x24,0x81]
-#CFECK: vfenezf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x20,0x2a,0x81]
-#CFECK: vfenezfs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x30,0x24,0x81]
+#CHECK: vfenef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x81]
+#CHECK: vfenef %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x81]
+#CHECK: vfenef %v0, %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x20,0x81]
+#CHECK: vfenef %v0, %v0, %v15, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x20,0x81]
+#CHECK: vfenef %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x81]
+#CHECK: vfenef %v0, %v15, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x81]
+#CHECK: vfenef %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x81]
+#CHECK: vfenef %v15, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x81]
+#CHECK: vfenef %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x81]
+#CHECK: vfenef %v18, %v3, %v20, 0 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x81]
+#CHECK: vfenefs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x10,0x24,0x81]
+#CHECK: vfenezf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x20,0x2a,0x81]
+#CHECK: vfenezfs %v5, %v22, %v7 # encoding: [0xe7,0x56,0x70,0x30,0x24,0x81]
vfenef %v0, %v0, %v0
vfenef %v0, %v0, %v0, 0
@@ -2242,123 +2522,6 @@
vfidb %v31, %v0, 0, 0
vfidb %v14, %v17, 4, 10
-#CHECK: vistr %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
-#CHECK: vistr %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x5c]
-#CHECK: vistr %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
-#CHECK: vistr %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x5c]
-#CHECK: vistr %v0, %v0, 0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x00,0x5c]
-#CHECK: vistr %v0, %v15, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0x5c]
-#CHECK: vistr %v0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x5c]
-#CHECK: vistr %v15, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x5c]
-#CHECK: vistr %v31, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x5c]
-#CHECK: vistr %v18, %v3, 11, 9 # encoding: [0xe7,0x23,0x00,0x90,0xb8,0x5c]
-
- vistr %v0, %v0, 0
- vistr %v0, %v0, 15
- vistr %v0, %v0, 0, 0
- vistr %v0, %v0, 15, 0
- vistr %v0, %v0, 0, 12
- vistr %v0, %v15, 0
- vistr %v0, %v31, 0
- vistr %v15, %v0, 0
- vistr %v31, %v0, 0
- vistr %v18, %v3, 11, 9
-
-#CHECK: vistrb %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
-#CHECK: vistrb %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
-#CHECK: vistrb %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x00,0x5c]
-#CHECK: vistrb %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0x5c]
-#CHECK: vistrb %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x5c]
-#CHECK: vistrb %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x5c]
-#CHECK: vistrb %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x5c]
-#CHECK: vistrb %v18, %v3, 0 # encoding: [0xe7,0x23,0x00,0x00,0x08,0x5c]
-#CHECK: vistrbs %v5, %v22 # encoding: [0xe7,0x56,0x00,0x10,0x04,0x5c]
-
- vistrb %v0, %v0
- vistrb %v0, %v0, 0
- vistrb %v0, %v0, 12
- vistrb %v0, %v15
- vistrb %v0, %v31
- vistrb %v15, %v0
- vistrb %v31, %v0
- vistrb %v18, %v3
- vistrbs %v5, %v22
-
-#CBECK: vistrf %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x5c]
-#CBECK: vistrf %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x5c]
-#CBECK: vistrf %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x20,0x5c]
-#CBECK: vistrf %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x5c]
-#CBECK: vistrf %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x5c]
-#CBECK: vistrf %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x5c]
-#CBECK: vistrf %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x5c]
-#CBECK: vistrf %v18, %v3, 0 # encoding: [0xe7,0x23,0x00,0x00,0x28,0x5c]
-#CBECK: vistrfs %v5, %v22 # encoding: [0xe7,0x56,0x00,0x10,0x24,0x5c]
-
- vistrf %v0, %v0
- vistrf %v0, %v0, 0
- vistrf %v0, %v0, 12
- vistrf %v0, %v15
- vistrf %v0, %v31
- vistrf %v15, %v0
- vistrf %v31, %v0
- vistrf %v18, %v3
- vistrfs %v5, %v22
-
-#CHECK: vistrh %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x5c]
-#CHECK: vistrh %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x5c]
-#CHECK: vistrh %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x10,0x5c]
-#CHECK: vistrh %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x10,0x5c]
-#CHECK: vistrh %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x5c]
-#CHECK: vistrh %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x10,0x5c]
-#CHECK: vistrh %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x5c]
-#CHECK: vistrh %v18, %v3, 0 # encoding: [0xe7,0x23,0x00,0x00,0x18,0x5c]
-#CHECK: vistrhs %v5, %v22 # encoding: [0xe7,0x56,0x00,0x10,0x14,0x5c]
-
- vistrh %v0, %v0
- vistrh %v0, %v0, 0
- vistrh %v0, %v0, 12
- vistrh %v0, %v15
- vistrh %v0, %v31
- vistrh %v15, %v0
- vistrh %v31, %v0
- vistrh %v18, %v3
- vistrhs %v5, %v22
-
-#CHECK: vfpso %v0, %v0, 0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xcc]
-#CHECK: vfpso %v0, %v0, 15, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xcc]
-#CHECK: vfpso %v0, %v0, 0, 15, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xcc]
-#CHECK: vfpso %v0, %v0, 0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x00,0xcc]
-#CHECK: vfpso %v0, %v15, 0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xcc]
-#CHECK: vfpso %v0, %v31, 0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xcc]
-#CHECK: vfpso %v15, %v0, 0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xcc]
-#CHECK: vfpso %v31, %v0, 0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xcc]
-#CHECK: vfpso %v14, %v17, 11, 9, 7 # encoding: [0xe7,0xe1,0x00,0x79,0xb4,0xcc]
-
- vfpso %v0, %v0, 0, 0, 0
- vfpso %v0, %v0, 15, 0, 0
- vfpso %v0, %v0, 0, 15, 0
- vfpso %v0, %v0, 0, 0, 15
- vfpso %v0, %v15, 0, 0, 0
- vfpso %v0, %v31, 0, 0, 0
- vfpso %v15, %v0, 0, 0, 0
- vfpso %v31, %v0, 0, 0, 0
- vfpso %v14, %v17, 11, 9, 7
-
-#CHECK: vfpsodb %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x30,0xcc]
-#CHECK: vfpsodb %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x30,0xcc]
-#CHECK: vfpsodb %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0xcc]
-#CHECK: vfpsodb %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x30,0xcc]
-#CHECK: vfpsodb %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0xcc]
-#CHECK: vfpsodb %v14, %v17, 7 # encoding: [0xe7,0xe1,0x00,0x70,0x34,0xcc]
-
- vfpsodb %v0, %v0, 0
- vfpsodb %v0, %v0, 15
- vfpsodb %v0, %v15, 0
- vfpsodb %v0, %v31, 0
- vfpsodb %v15, %v0, 0
- vfpsodb %v31, %v0, 0
- vfpsodb %v14, %v17, 7
-
#CHECK: vflcdb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0xcc]
#CHECK: vflcdb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x30,0xcc]
#CHECK: vflcdb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0xcc]
@@ -2401,6 +2564,22 @@
vflpdb %v31, %v0
vflpdb %v14, %v17
+#CHECK: vfm %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xe7]
+#CHECK: vfm %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xe7]
+#CHECK: vfm %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xe7]
+#CHECK: vfm %v0, %v0, %v31, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xe7]
+#CHECK: vfm %v0, %v31, %v0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xe7]
+#CHECK: vfm %v31, %v0, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xe7]
+#CHECK: vfm %v18, %v3, %v20, 11, 9 # encoding: [0xe7,0x23,0x40,0x09,0xba,0xe7]
+
+ vfm %v0, %v0, %v0, 0, 0
+ vfm %v0, %v0, %v0, 15, 0
+ vfm %v0, %v0, %v0, 0, 15
+ vfm %v0, %v0, %v31, 0, 0
+ vfm %v0, %v31, %v0, 0, 0
+ vfm %v31, %v0, %v0, 0, 0
+ vfm %v18, %v3, %v20, 11, 9
+
#CHECK: vfma %v0, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x8f]
#CHECK: vfma %v0, %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x0f,0x00,0x00,0x8f]
#CHECK: vfma %v0, %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0x8f]
@@ -2433,22 +2612,6 @@
vfmadb %v31, %v0, %v0, %v0
vfmadb %v13, %v17, %v21, %v25
-#CHECK: vfm %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xe7]
-#CHECK: vfm %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xe7]
-#CHECK: vfm %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xe7]
-#CHECK: vfm %v0, %v0, %v31, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xe7]
-#CHECK: vfm %v0, %v31, %v0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xe7]
-#CHECK: vfm %v31, %v0, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xe7]
-#CHECK: vfm %v18, %v3, %v20, 11, 9 # encoding: [0xe7,0x23,0x40,0x09,0xba,0xe7]
-
- vfm %v0, %v0, %v0, 0, 0
- vfm %v0, %v0, %v0, 15, 0
- vfm %v0, %v0, %v0, 0, 15
- vfm %v0, %v0, %v31, 0, 0
- vfm %v0, %v31, %v0, 0, 0
- vfm %v31, %v0, %v0, 0, 0
- vfm %v18, %v3, %v20, 11, 9
-
#CHECK: vfmdb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0xe7]
#CHECK: vfmdb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0xe7]
#CHECK: vfmdb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0xe7]
@@ -2493,6 +2656,41 @@
vfmsdb %v31, %v0, %v0, %v0
vfmsdb %v13, %v17, %v21, %v25
+#CHECK: vfpso %v0, %v0, 0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xcc]
+#CHECK: vfpso %v0, %v0, 15, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xcc]
+#CHECK: vfpso %v0, %v0, 0, 15, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xcc]
+#CHECK: vfpso %v0, %v0, 0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x00,0xcc]
+#CHECK: vfpso %v0, %v15, 0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xcc]
+#CHECK: vfpso %v0, %v31, 0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xcc]
+#CHECK: vfpso %v15, %v0, 0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xcc]
+#CHECK: vfpso %v31, %v0, 0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xcc]
+#CHECK: vfpso %v14, %v17, 11, 9, 7 # encoding: [0xe7,0xe1,0x00,0x79,0xb4,0xcc]
+
+ vfpso %v0, %v0, 0, 0, 0
+ vfpso %v0, %v0, 15, 0, 0
+ vfpso %v0, %v0, 0, 15, 0
+ vfpso %v0, %v0, 0, 0, 15
+ vfpso %v0, %v15, 0, 0, 0
+ vfpso %v0, %v31, 0, 0, 0
+ vfpso %v15, %v0, 0, 0, 0
+ vfpso %v31, %v0, 0, 0, 0
+ vfpso %v14, %v17, 11, 9, 7
+
+#CHECK: vfpsodb %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x30,0xcc]
+#CHECK: vfpsodb %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x30,0xcc]
+#CHECK: vfpsodb %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0xcc]
+#CHECK: vfpsodb %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x30,0xcc]
+#CHECK: vfpsodb %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0xcc]
+#CHECK: vfpsodb %v14, %v17, 7 # encoding: [0xe7,0xe1,0x00,0x70,0x34,0xcc]
+
+ vfpsodb %v0, %v0, 0
+ vfpsodb %v0, %v0, 15
+ vfpsodb %v0, %v15, 0
+ vfpsodb %v0, %v31, 0
+ vfpsodb %v15, %v0, 0
+ vfpsodb %v31, %v0, 0
+ vfpsodb %v14, %v17, 7
+
#CHECK: vfs %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xe2]
#CHECK: vfs %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xe2]
#CHECK: vfs %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xe2]
@@ -2645,6 +2843,20 @@
vgeg %v31, 0(%v0,%r1), 0
vgeg %v10, 1000(%v19,%r7), 1
+#CHECK: vgfm %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xb4]
+#CHECK: vgfm %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xb4]
+#CHECK: vgfm %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xb4]
+#CHECK: vgfm %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xb4]
+#CHECK: vgfm %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xb4]
+#CHECK: vgfm %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0xb4]
+
+ vgfm %v0, %v0, %v0, 0
+ vgfm %v0, %v0, %v0, 15
+ vgfm %v0, %v0, %v31, 0
+ vgfm %v0, %v31, %v0, 0
+ vgfm %v31, %v0, %v0, 0
+ vgfm %v18, %v3, %v20, 11
+
#CHECK: vgfma %v0, %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xbc]
#CHECK: vgfma %v0, %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x0f,0x00,0x00,0xbc]
#CHECK: vgfma %v0, %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf1,0xbc]
@@ -2717,20 +2929,6 @@
vgfmah %v31, %v0, %v0, %v0
vgfmah %v13, %v17, %v21, %v25
-#CHECK: vgfm %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xb4]
-#CHECK: vgfm %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xb4]
-#CHECK: vgfm %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xb4]
-#CHECK: vgfm %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xb4]
-#CHECK: vgfm %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xb4]
-#CHECK: vgfm %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0x00,0xba,0xb4]
-
- vgfm %v0, %v0, %v0, 0
- vgfm %v0, %v0, %v0, 15
- vgfm %v0, %v0, %v31, 0
- vgfm %v0, %v31, %v0, 0
- vgfm %v31, %v0, %v0, 0
- vgfm %v18, %v3, %v20, 11
-
#CHECK: vgfmb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xb4]
#CHECK: vgfmb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xb4]
#CHECK: vgfmb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xb4]
@@ -2851,6 +3049,88 @@
vgmh %v31, 0, 0
vgmh %v21, 2, 3
+#CHECK: vistr %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
+#CHECK: vistr %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x5c]
+#CHECK: vistr %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
+#CHECK: vistr %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x5c]
+#CHECK: vistr %v0, %v0, 0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x00,0x5c]
+#CHECK: vistr %v0, %v15, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0x5c]
+#CHECK: vistr %v0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x5c]
+#CHECK: vistr %v15, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x5c]
+#CHECK: vistr %v31, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x5c]
+#CHECK: vistr %v18, %v3, 11, 9 # encoding: [0xe7,0x23,0x00,0x90,0xb8,0x5c]
+
+ vistr %v0, %v0, 0
+ vistr %v0, %v0, 15
+ vistr %v0, %v0, 0, 0
+ vistr %v0, %v0, 15, 0
+ vistr %v0, %v0, 0, 12
+ vistr %v0, %v15, 0
+ vistr %v0, %v31, 0
+ vistr %v15, %v0, 0
+ vistr %v31, %v0, 0
+ vistr %v18, %v3, 11, 9
+
+#CHECK: vistrb %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
+#CHECK: vistrb %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5c]
+#CHECK: vistrb %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x00,0x5c]
+#CHECK: vistrb %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0x5c]
+#CHECK: vistrb %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x5c]
+#CHECK: vistrb %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x5c]
+#CHECK: vistrb %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x5c]
+#CHECK: vistrb %v18, %v3, 0 # encoding: [0xe7,0x23,0x00,0x00,0x08,0x5c]
+#CHECK: vistrbs %v5, %v22 # encoding: [0xe7,0x56,0x00,0x10,0x04,0x5c]
+
+ vistrb %v0, %v0
+ vistrb %v0, %v0, 0
+ vistrb %v0, %v0, 12
+ vistrb %v0, %v15
+ vistrb %v0, %v31
+ vistrb %v15, %v0
+ vistrb %v31, %v0
+ vistrb %v18, %v3
+ vistrbs %v5, %v22
+
+#CHECK: vistrf %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x5c]
+#CHECK: vistrf %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x5c]
+#CHECK: vistrf %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x20,0x5c]
+#CHECK: vistrf %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x5c]
+#CHECK: vistrf %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x5c]
+#CHECK: vistrf %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x5c]
+#CHECK: vistrf %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x5c]
+#CHECK: vistrf %v18, %v3, 0 # encoding: [0xe7,0x23,0x00,0x00,0x28,0x5c]
+#CHECK: vistrfs %v5, %v22 # encoding: [0xe7,0x56,0x00,0x10,0x24,0x5c]
+
+ vistrf %v0, %v0
+ vistrf %v0, %v0, 0
+ vistrf %v0, %v0, 12
+ vistrf %v0, %v15
+ vistrf %v0, %v31
+ vistrf %v15, %v0
+ vistrf %v31, %v0
+ vistrf %v18, %v3
+ vistrfs %v5, %v22
+
+#CHECK: vistrh %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x5c]
+#CHECK: vistrh %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x5c]
+#CHECK: vistrh %v0, %v0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x10,0x5c]
+#CHECK: vistrh %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x10,0x5c]
+#CHECK: vistrh %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x5c]
+#CHECK: vistrh %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x10,0x5c]
+#CHECK: vistrh %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x5c]
+#CHECK: vistrh %v18, %v3, 0 # encoding: [0xe7,0x23,0x00,0x00,0x18,0x5c]
+#CHECK: vistrhs %v5, %v22 # encoding: [0xe7,0x56,0x00,0x10,0x14,0x5c]
+
+ vistrh %v0, %v0
+ vistrh %v0, %v0, 0
+ vistrh %v0, %v0, 12
+ vistrh %v0, %v15
+ vistrh %v0, %v31
+ vistrh %v15, %v0
+ vistrh %v31, %v0
+ vistrh %v18, %v3
+ vistrhs %v5, %v22
+
#CHECK: vl %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x06]
#CHECK: vl %v0, 4095 # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x06]
#CHECK: vl %v0, 0(%r15) # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x06]
@@ -5511,20 +5791,6 @@
vsceg %v31, 0(%v0,%r1), 0
vsceg %v10, 1000(%v19,%r7), 1
-#CHECK: vsel %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x8d]
-#CHECK: vsel %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x00,0x00,0xf1,0x8d]
-#CHECK: vsel %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x8d]
-#CHECK: vsel %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x8d]
-#CHECK: vsel %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x8d]
-#CHECK: vsel %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x50,0x00,0x97,0x8d]
-
- vsel %v0, %v0, %v0, %v0
- vsel %v0, %v0, %v0, %v31
- vsel %v0, %v0, %v31, %v0
- vsel %v0, %v31, %v0, %v0
- vsel %v31, %v0, %v0, %v0
- vsel %v13, %v17, %v21, %v25
-
#CHECK: vseg %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x5f]
#CHECK: vseg %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x5f]
#CHECK: vseg %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0x5f]
@@ -5583,6 +5849,20 @@
vsegh %v31, %v0
vsegh %v14, %v17
+#CHECK: vsel %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x8d]
+#CHECK: vsel %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x00,0x00,0xf1,0x8d]
+#CHECK: vsel %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x8d]
+#CHECK: vsel %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x8d]
+#CHECK: vsel %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x8d]
+#CHECK: vsel %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x50,0x00,0x97,0x8d]
+
+ vsel %v0, %v0, %v0, %v0
+ vsel %v0, %v0, %v0, %v31
+ vsel %v0, %v0, %v31, %v0
+ vsel %v0, %v31, %v0, %v0
+ vsel %v31, %v0, %v0, %v0
+ vsel %v13, %v17, %v21, %v25
+
#CHECK: vsf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xf7]
#CHECK: vsf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xf7]
#CHECK: vsf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xf7]
@@ -5989,6 +6269,18 @@
vsum %v31, %v0, %v0, 0
vsum %v18, %v3, %v20, 11
+#CHECK: vsumb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x64]
+#CHECK: vsumb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x64]
+#CHECK: vsumb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x64]
+#CHECK: vsumb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x64]
+#CHECK: vsumb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x64]
+
+ vsumb %v0, %v0, %v0
+ vsumb %v0, %v0, %v31
+ vsumb %v0, %v31, %v0
+ vsumb %v31, %v0, %v0
+ vsumb %v18, %v3, %v20
+
#CHECK: vsumg %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x65]
#CHECK: vsumg %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x65]
#CHECK: vsumg %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x65]
@@ -6003,6 +6295,18 @@
vsumg %v31, %v0, %v0, 0
vsumg %v18, %v3, %v20, 11
+#CHECK: vsumgf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x65]
+#CHECK: vsumgf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x65]
+#CHECK: vsumgf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x65]
+#CHECK: vsumgf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x65]
+#CHECK: vsumgf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x65]
+
+ vsumgf %v0, %v0, %v0
+ vsumgf %v0, %v0, %v31
+ vsumgf %v0, %v31, %v0
+ vsumgf %v31, %v0, %v0
+ vsumgf %v18, %v3, %v20
+
#CHECK: vsumgh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x65]
#CHECK: vsumgh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x65]
#CHECK: vsumgh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x65]
@@ -6015,17 +6319,17 @@
vsumgh %v31, %v0, %v0
vsumgh %v18, %v3, %v20
-#CHECK: vsumgf %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x65]
-#CHECK: vsumgf %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0x65]
-#CHECK: vsumgf %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x65]
-#CHECK: vsumgf %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x65]
-#CHECK: vsumgf %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0x65]
+#CHECK: vsumh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x64]
+#CHECK: vsumh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x64]
+#CHECK: vsumh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x64]
+#CHECK: vsumh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x64]
+#CHECK: vsumh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x64]
- vsumgf %v0, %v0, %v0
- vsumgf %v0, %v0, %v31
- vsumgf %v0, %v31, %v0
- vsumgf %v31, %v0, %v0
- vsumgf %v18, %v3, %v20
+ vsumh %v0, %v0, %v0
+ vsumh %v0, %v0, %v31
+ vsumh %v0, %v31, %v0
+ vsumh %v31, %v0, %v0
+ vsumh %v18, %v3, %v20
#CHECK: vsumq %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x67]
#CHECK: vsumq %v0, %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x67]
@@ -6065,30 +6369,6 @@
vsumqg %v31, %v0, %v0
vsumqg %v18, %v3, %v20
-#CHECK: vsumb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x64]
-#CHECK: vsumb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x64]
-#CHECK: vsumb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x64]
-#CHECK: vsumb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x64]
-#CHECK: vsumb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x64]
-
- vsumb %v0, %v0, %v0
- vsumb %v0, %v0, %v31
- vsumb %v0, %v31, %v0
- vsumb %v31, %v0, %v0
- vsumb %v18, %v3, %v20
-
-#CHECK: vsumh %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x64]
-#CHECK: vsumh %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x12,0x64]
-#CHECK: vsumh %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x64]
-#CHECK: vsumh %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x64]
-#CHECK: vsumh %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x1a,0x64]
-
- vsumh %v0, %v0, %v0
- vsumh %v0, %v0, %v31
- vsumh %v0, %v31, %v0
- vsumh %v31, %v0, %v0
- vsumh %v18, %v3, %v20
-
#CHECK: vtm %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xd8]
#CHECK: vtm %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xd8]
#CHECK: vtm %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xd8]
@@ -6161,6 +6441,50 @@
vuphh %v31, %v0
vuphh %v14, %v17
+#CHECK: vupl %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xd6]
+#CHECK: vupl %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xd6]
+#CHECK: vupl %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xd6]
+#CHECK: vupl %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xd6]
+#CHECK: vupl %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xd6]
+#CHECK: vupl %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xd6]
+#CHECK: vupl %v14, %v17, 11 # encoding: [0xe7,0xe1,0x00,0x00,0xb4,0xd6]
+
+ vupl %v0, %v0, 0
+ vupl %v0, %v0, 15
+ vupl %v0, %v15, 0
+ vupl %v0, %v31, 0
+ vupl %v15, %v0, 0
+ vupl %v31, %v0, 0
+ vupl %v14, %v17, 11
+
+#CHECK: vuplb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xd6]
+#CHECK: vuplb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xd6]
+#CHECK: vuplb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xd6]
+#CHECK: vuplb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xd6]
+#CHECK: vuplb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xd6]
+#CHECK: vuplb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x04,0xd6]
+
+ vuplb %v0, %v0
+ vuplb %v0, %v15
+ vuplb %v0, %v31
+ vuplb %v15, %v0
+ vuplb %v31, %v0
+ vuplb %v14, %v17
+
+#CHECK: vuplf %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xd6]
+#CHECK: vuplf %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0xd6]
+#CHECK: vuplf %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xd6]
+#CHECK: vuplf %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0xd6]
+#CHECK: vuplf %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xd6]
+#CHECK: vuplf %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0xd6]
+
+ vuplf %v0, %v0
+ vuplf %v0, %v15
+ vuplf %v0, %v31
+ vuplf %v15, %v0
+ vuplf %v31, %v0
+ vuplf %v14, %v17
+
#CHECK: vuplh %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xd5]
#CHECK: vuplh %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xd5]
#CHECK: vuplh %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xd5]
@@ -6219,50 +6543,6 @@
vuplhh %v31, %v0
vuplhh %v14, %v17
-#CHECK: vupl %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xd6]
-#CHECK: vupl %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xd6]
-#CHECK: vupl %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xd6]
-#CHECK: vupl %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xd6]
-#CHECK: vupl %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xd6]
-#CHECK: vupl %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xd6]
-#CHECK: vupl %v14, %v17, 11 # encoding: [0xe7,0xe1,0x00,0x00,0xb4,0xd6]
-
- vupl %v0, %v0, 0
- vupl %v0, %v0, 15
- vupl %v0, %v15, 0
- vupl %v0, %v31, 0
- vupl %v15, %v0, 0
- vupl %v31, %v0, 0
- vupl %v14, %v17, 11
-
-#CHECK: vuplb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xd6]
-#CHECK: vuplb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xd6]
-#CHECK: vuplb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xd6]
-#CHECK: vuplb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xd6]
-#CHECK: vuplb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xd6]
-#CHECK: vuplb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x04,0xd6]
-
- vuplb %v0, %v0
- vuplb %v0, %v15
- vuplb %v0, %v31
- vuplb %v15, %v0
- vuplb %v31, %v0
- vuplb %v14, %v17
-
-#CHECK: vuplf %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xd6]
-#CHECK: vuplf %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0xd6]
-#CHECK: vuplf %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xd6]
-#CHECK: vuplf %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0xd6]
-#CHECK: vuplf %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xd6]
-#CHECK: vuplf %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0xd6]
-
- vuplf %v0, %v0
- vuplf %v0, %v15
- vuplf %v0, %v31
- vuplf %v15, %v0
- vuplf %v31, %v0
- vuplf %v14, %v17
-
#CHECK: vuplhw %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0xd6]
#CHECK: vuplhw %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x10,0xd6]
#CHECK: vuplhw %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0xd6]
@@ -6358,7 +6638,7 @@
vzero %v31
#CHECK: wcdgb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc3]
-#CHECK: wcdgb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc3]
+#CHECK: wcdgb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc3]
#CHECK: wcdgb %f0, %f0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xc3]
#CHECK: wcdgb %f0, %f0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc3]
#CHECK: wcdgb %f0, %f0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc3]
@@ -6376,7 +6656,7 @@
wcdgb %v14, %v17, 4, 10
#CHECK: wcdlgb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc1]
-#CHECK: wcdlgb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc1]
+#CHECK: wcdlgb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc1]
#CHECK: wcdlgb %f0, %f0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xc1]
#CHECK: wcdlgb %f0, %f0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc1]
#CHECK: wcdlgb %f0, %f0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc1]
@@ -6394,7 +6674,7 @@
wcdlgb %v14, %v17, 4, 10
#CHECK: wcgdb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc2]
-#CHECK: wcgdb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc2]
+#CHECK: wcgdb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc2]
#CHECK: wcgdb %f0, %f0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xc2]
#CHECK: wcgdb %f0, %f0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc2]
#CHECK: wcgdb %f0, %f0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc2]
@@ -6412,7 +6692,7 @@
wcgdb %v14, %v17, 4, 10
#CHECK: wclgdb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc0]
-#CHECK: wclgdb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc0]
+#CHECK: wclgdb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc0]
#CHECK: wclgdb %f0, %f0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xc0]
#CHECK: wclgdb %f0, %f0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc0]
#CHECK: wclgdb %f0, %f0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc0]
@@ -6470,7 +6750,7 @@
#CHECK: wfcdb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x00,0x30,0xcb]
#CHECK: wfcdb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0xcb]
#CHECK: wfcdb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x34,0xcb]
-
+
wfcdb %v0, %v0
wfcdb %f0, %f0
wfcdb %v0, %v15
@@ -6527,7 +6807,7 @@
#CHECK: wfchdbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x18,0x34,0xeb]
#CHECK: wfchdbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x18,0x38,0xeb]
#CHECK: wfchdbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x18,0x3a,0xeb]
-
+
wfchdbs %v0, %v0, %v0
wfchdbs %f0, %f0, %f0
wfchdbs %v0, %v0, %v31
@@ -6578,7 +6858,7 @@
wfddb %v18, %v3, %v20
#CHECK: wfidb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc7]
-#CHECK: wfidb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc7]
+#CHECK: wfidb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc7]
#CHECK: wfidb %f0, %f0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xc7]
#CHECK: wfidb %f0, %f0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc7]
#CHECK: wfidb %f0, %f0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc7]
@@ -6631,24 +6911,6 @@
wfkdb %v31, %v0
wfkdb %v14, %v17
-#CHECK: wfpsodb %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xcc]
-#CHECK: wfpsodb %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xcc]
-#CHECK: wfpsodb %f0, %f0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xcc]
-#CHECK: wfpsodb %f0, %f15, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x30,0xcc]
-#CHECK: wfpsodb %f0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x34,0xcc]
-#CHECK: wfpsodb %f15, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x30,0xcc]
-#CHECK: wfpsodb %v31, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xcc]
-#CHECK: wfpsodb %f14, %v17, 7 # encoding: [0xe7,0xe1,0x00,0x78,0x34,0xcc]
-
- wfpsodb %v0, %v0, 0
- wfpsodb %f0, %f0, 0
- wfpsodb %v0, %v0, 15
- wfpsodb %v0, %v15, 0
- wfpsodb %v0, %v31, 0
- wfpsodb %v15, %v0, 0
- wfpsodb %v31, %v0, 0
- wfpsodb %v14, %v17, 7
-
#CHECK: wflcdb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xcc]
#CHECK: wflcdb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xcc]
#CHECK: wflcdb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x08,0x30,0xcc]
@@ -6743,20 +7005,38 @@
wfmsdb %v31, %v0, %v0, %v0
wfmsdb %v13, %v17, %v21, %v25
+#CHECK: wfpsodb %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xcc]
+#CHECK: wfpsodb %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xcc]
+#CHECK: wfpsodb %f0, %f0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xcc]
+#CHECK: wfpsodb %f0, %f15, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x30,0xcc]
+#CHECK: wfpsodb %f0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x34,0xcc]
+#CHECK: wfpsodb %f15, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x30,0xcc]
+#CHECK: wfpsodb %v31, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xcc]
+#CHECK: wfpsodb %f14, %v17, 7 # encoding: [0xe7,0xe1,0x00,0x78,0x34,0xcc]
+
+ wfpsodb %v0, %v0, 0
+ wfpsodb %f0, %f0, 0
+ wfpsodb %v0, %v0, 15
+ wfpsodb %v0, %v15, 0
+ wfpsodb %v0, %v31, 0
+ wfpsodb %v15, %v0, 0
+ wfpsodb %v31, %v0, 0
+ wfpsodb %v14, %v17, 7
+
#CHECK: wfsdb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xe2]
#CHECK: wfsdb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xe2]
#CHECK: wfsdb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x32,0xe2]
#CHECK: wfsdb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x34,0xe2]
#CHECK: wfsdb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xe2]
#CHECK: wfsdb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x3a,0xe2]
-
+
wfsdb %v0, %v0, %v0
wfsdb %f0, %f0, %f0
wfsdb %v0, %v0, %v31
wfsdb %v0, %v31, %v0
wfsdb %v31, %v0, %v0
wfsdb %v18, %v3, %v20
-
+
#CHECK: wfsqdb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xce]
#CHECK: wfsqdb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xce]
#CHECK: wfsqdb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x08,0x30,0xce]
@@ -6764,7 +7044,7 @@
#CHECK: wfsqdb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x30,0xce]
#CHECK: wfsqdb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xce]
#CHECK: wfsqdb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x08,0x34,0xce]
-
+
wfsqdb %v0, %v0
wfsqdb %f0, %f0
wfsqdb %v0, %v15
@@ -6781,7 +7061,7 @@
#CHECK: wftcidb %f15, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x30,0x4a]
#CHECK: wftcidb %v31, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0x4a]
#CHECK: wftcidb %f4, %v21, 1656 # encoding: [0xe7,0x45,0x67,0x88,0x34,0x4a]
-
+
wftcidb %v0, %v0, 0
wftcidb %f0, %f0, 0
wftcidb %v0, %v0, 4095
@@ -6818,280 +7098,10 @@
wledb %v0, %v0, 0, 0
wledb %f0, %f0, 0, 0
- wledb %v0, %v0, 0, 15
+ wledb %v0, %v0, 0, 15
wledb %v0, %v0, 4, 0
wledb %v0, %v0, 12, 0
wledb %v0, %v31, 0, 0
wledb %v31, %v0, 0, 0
wledb %v14, %v17, 4, 10
-#CHECK: lochi %r11, 42, 0 # encoding: [0xec,0xb0,0x00,0x2a,0x00,0x42]
-#CHECK: lochio %r11, 42 # encoding: [0xec,0xb1,0x00,0x2a,0x00,0x42]
-#CHECK: lochih %r11, 42 # encoding: [0xec,0xb2,0x00,0x2a,0x00,0x42]
-#CHECK: lochinle %r11, 42 # encoding: [0xec,0xb3,0x00,0x2a,0x00,0x42]
-#CHECK: lochil %r11, -1 # encoding: [0xec,0xb4,0xff,0xff,0x00,0x42]
-#CHECK: lochinhe %r11, 42 # encoding: [0xec,0xb5,0x00,0x2a,0x00,0x42]
-#CHECK: lochilh %r11, -1 # encoding: [0xec,0xb6,0xff,0xff,0x00,0x42]
-#CHECK: lochine %r11, 0 # encoding: [0xec,0xb7,0x00,0x00,0x00,0x42]
-#CHECK: lochie %r11, 0 # encoding: [0xec,0xb8,0x00,0x00,0x00,0x42]
-#CHECK: lochinlh %r11, 42 # encoding: [0xec,0xb9,0x00,0x2a,0x00,0x42]
-#CHECK: lochihe %r11, 255 # encoding: [0xec,0xba,0x00,0xff,0x00,0x42]
-#CHECK: lochinl %r11, 255 # encoding: [0xec,0xbb,0x00,0xff,0x00,0x42]
-#CHECK: lochile %r11, 32767 # encoding: [0xec,0xbc,0x7f,0xff,0x00,0x42]
-#CHECK: lochinh %r11, 32767 # encoding: [0xec,0xbd,0x7f,0xff,0x00,0x42]
-#CHECK: lochino %r11, 32512 # encoding: [0xec,0xbe,0x7f,0x00,0x00,0x42]
-#CHECK: lochi %r11, 32512, 15 # encoding: [0xec,0xbf,0x7f,0x00,0x00,0x42]
-
- lochi %r11, 42, 0
- lochio %r11, 42
- lochih %r11, 42
- lochinle %r11, 42
- lochil %r11, -1
- lochinhe %r11, 42
- lochilh %r11, -1
- lochine %r11, 0
- lochie %r11, 0
- lochinlh %r11, 42
- lochihe %r11, 255
- lochinl %r11, 255
- lochile %r11, 32767
- lochinh %r11, 32767
- lochino %r11, 32512
- lochi %r11, 32512, 15
-
-#CHECK: locghi %r11, 42, 0 # encoding: [0xec,0xb0,0x00,0x2a,0x00,0x46]
-#CHECK: locghio %r11, 42 # encoding: [0xec,0xb1,0x00,0x2a,0x00,0x46]
-#CHECK: locghih %r11, 42 # encoding: [0xec,0xb2,0x00,0x2a,0x00,0x46]
-#CHECK: locghinle %r11, 42 # encoding: [0xec,0xb3,0x00,0x2a,0x00,0x46]
-#CHECK: locghil %r11, -1 # encoding: [0xec,0xb4,0xff,0xff,0x00,0x46]
-#CHECK: locghinhe %r11, 42 # encoding: [0xec,0xb5,0x00,0x2a,0x00,0x46]
-#CHECK: locghilh %r11, -1 # encoding: [0xec,0xb6,0xff,0xff,0x00,0x46]
-#CHECK: locghine %r11, 0 # encoding: [0xec,0xb7,0x00,0x00,0x00,0x46]
-#CHECK: locghie %r11, 0 # encoding: [0xec,0xb8,0x00,0x00,0x00,0x46]
-#CHECK: locghinlh %r11, 42 # encoding: [0xec,0xb9,0x00,0x2a,0x00,0x46]
-#CHECK: locghihe %r11, 255 # encoding: [0xec,0xba,0x00,0xff,0x00,0x46]
-#CHECK: locghinl %r11, 255 # encoding: [0xec,0xbb,0x00,0xff,0x00,0x46]
-#CHECK: locghile %r11, 32767 # encoding: [0xec,0xbc,0x7f,0xff,0x00,0x46]
-#CHECK: locghinh %r11, 32767 # encoding: [0xec,0xbd,0x7f,0xff,0x00,0x46]
-#CHECK: locghino %r11, 32512 # encoding: [0xec,0xbe,0x7f,0x00,0x00,0x46]
-#CHECK: locghi %r11, 32512, 15 # encoding: [0xec,0xbf,0x7f,0x00,0x00,0x46]
-
- locghi %r11, 42, 0
- locghio %r11, 42
- locghih %r11, 42
- locghinle %r11, 42
- locghil %r11, -1
- locghinhe %r11, 42
- locghilh %r11, -1
- locghine %r11, 0
- locghie %r11, 0
- locghinlh %r11, 42
- locghihe %r11, 255
- locghinl %r11, 255
- locghile %r11, 32767
- locghinh %r11, 32767
- locghino %r11, 32512
- locghi %r11, 32512, 15
-
-#CHECK: lochhi %r11, 42, 0 # encoding: [0xec,0xb0,0x00,0x2a,0x00,0x4e]
-#CHECK: lochhio %r11, 42 # encoding: [0xec,0xb1,0x00,0x2a,0x00,0x4e]
-#CHECK: lochhih %r11, 42 # encoding: [0xec,0xb2,0x00,0x2a,0x00,0x4e]
-#CHECK: lochhinle %r11, 42 # encoding: [0xec,0xb3,0x00,0x2a,0x00,0x4e]
-#CHECK: lochhil %r11, -1 # encoding: [0xec,0xb4,0xff,0xff,0x00,0x4e]
-#CHECK: lochhinhe %r11, 42 # encoding: [0xec,0xb5,0x00,0x2a,0x00,0x4e]
-#CHECK: lochhilh %r11, -1 # encoding: [0xec,0xb6,0xff,0xff,0x00,0x4e]
-#CHECK: lochhine %r11, 0 # encoding: [0xec,0xb7,0x00,0x00,0x00,0x4e]
-#CHECK: lochhie %r11, 0 # encoding: [0xec,0xb8,0x00,0x00,0x00,0x4e]
-#CHECK: lochhinlh %r11, 42 # encoding: [0xec,0xb9,0x00,0x2a,0x00,0x4e]
-#CHECK: lochhihe %r11, 255 # encoding: [0xec,0xba,0x00,0xff,0x00,0x4e]
-#CHECK: lochhinl %r11, 255 # encoding: [0xec,0xbb,0x00,0xff,0x00,0x4e]
-#CHECK: lochhile %r11, 32767 # encoding: [0xec,0xbc,0x7f,0xff,0x00,0x4e]
-#CHECK: lochhinh %r11, 32767 # encoding: [0xec,0xbd,0x7f,0xff,0x00,0x4e]
-#CHECK: lochhino %r11, 32512 # encoding: [0xec,0xbe,0x7f,0x00,0x00,0x4e]
-#CHECK: lochhi %r11, 32512, 15 # encoding: [0xec,0xbf,0x7f,0x00,0x00,0x4e]
-
- lochhi %r11, 42, 0
- lochhio %r11, 42
- lochhih %r11, 42
- lochhinle %r11, 42
- lochhil %r11, -1
- lochhinhe %r11, 42
- lochhilh %r11, -1
- lochhine %r11, 0
- lochhie %r11, 0
- lochhinlh %r11, 42
- lochhihe %r11, 255
- lochhinl %r11, 255
- lochhile %r11, 32767
- lochhinh %r11, 32767
- lochhino %r11, 32512
- lochhi %r11, 32512, 15
-
-#CHECK: locfh %r0, 0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe0]
-#CHECK: locfh %r0, 0, 15 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe0]
-#CHECK: locfh %r0, -524288, 0 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe0]
-#CHECK: locfh %r0, 524287, 0 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe0]
-#CHECK: locfh %r0, 0(%r1), 0 # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe0]
-#CHECK: locfh %r0, 0(%r15), 0 # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe0]
-#CHECK: locfh %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe0]
-#CHECK: locfh %r1, 4095(%r2), 3 # encoding: [0xeb,0x13,0x2f,0xff,0x00,0xe0]
-
- locfh %r0,0,0
- locfh %r0,0,15
- locfh %r0,-524288,0
- locfh %r0,524287,0
- locfh %r0,0(%r1),0
- locfh %r0,0(%r15),0
- locfh %r15,0,0
- locfh %r1,4095(%r2),3
-
-#CHECK: locfho %r1, 2(%r3) # encoding: [0xeb,0x11,0x30,0x02,0x00,0xe0]
-#CHECK: locfhh %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe0]
-#CHECK: locfhp %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnle %r1, 2(%r3) # encoding: [0xeb,0x13,0x30,0x02,0x00,0xe0]
-#CHECK: locfhl %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe0]
-#CHECK: locfhm %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnhe %r1, 2(%r3) # encoding: [0xeb,0x15,0x30,0x02,0x00,0xe0]
-#CHECK: locfhlh %r1, 2(%r3) # encoding: [0xeb,0x16,0x30,0x02,0x00,0xe0]
-#CHECK: locfhne %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnz %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe0]
-#CHECK: locfhe %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe0]
-#CHECK: locfhz %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnlh %r1, 2(%r3) # encoding: [0xeb,0x19,0x30,0x02,0x00,0xe0]
-#CHECK: locfhhe %r1, 2(%r3) # encoding: [0xeb,0x1a,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnl %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnm %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe0]
-#CHECK: locfhle %r1, 2(%r3) # encoding: [0xeb,0x1c,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnh %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe0]
-#CHECK: locfhnp %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe0]
-#CHECK: locfhno %r1, 2(%r3) # encoding: [0xeb,0x1e,0x30,0x02,0x00,0xe0]
-
- locfho %r1,2(%r3)
- locfhh %r1,2(%r3)
- locfhp %r1,2(%r3)
- locfhnle %r1,2(%r3)
- locfhl %r1,2(%r3)
- locfhm %r1,2(%r3)
- locfhnhe %r1,2(%r3)
- locfhlh %r1,2(%r3)
- locfhne %r1,2(%r3)
- locfhnz %r1,2(%r3)
- locfhe %r1,2(%r3)
- locfhz %r1,2(%r3)
- locfhnlh %r1,2(%r3)
- locfhhe %r1,2(%r3)
- locfhnl %r1,2(%r3)
- locfhnm %r1,2(%r3)
- locfhle %r1,2(%r3)
- locfhnh %r1,2(%r3)
- locfhnp %r1,2(%r3)
- locfhno %r1,2(%r3)
-
-#CHECK: locfhr %r1, %r2, 0 # encoding: [0xb9,0xe0,0x00,0x12]
-#CHECK: locfhr %r1, %r2, 15 # encoding: [0xb9,0xe0,0xf0,0x12]
-
- locfhr %r1,%r2,0
- locfhr %r1,%r2,15
-
-#CHECK: locfhro %r1, %r3 # encoding: [0xb9,0xe0,0x10,0x13]
-#CHECK: locfhrh %r1, %r3 # encoding: [0xb9,0xe0,0x20,0x13]
-#CHECK: locfhrp %r1, %r3 # encoding: [0xb9,0xe0,0x20,0x13]
-#CHECK: locfhrnle %r1, %r3 # encoding: [0xb9,0xe0,0x30,0x13]
-#CHECK: locfhrl %r1, %r3 # encoding: [0xb9,0xe0,0x40,0x13]
-#CHECK: locfhrm %r1, %r3 # encoding: [0xb9,0xe0,0x40,0x13]
-#CHECK: locfhrnhe %r1, %r3 # encoding: [0xb9,0xe0,0x50,0x13]
-#CHECK: locfhrlh %r1, %r3 # encoding: [0xb9,0xe0,0x60,0x13]
-#CHECK: locfhrne %r1, %r3 # encoding: [0xb9,0xe0,0x70,0x13]
-#CHECK: locfhrnz %r1, %r3 # encoding: [0xb9,0xe0,0x70,0x13]
-#CHECK: locfhre %r1, %r3 # encoding: [0xb9,0xe0,0x80,0x13]
-#CHECK: locfhrz %r1, %r3 # encoding: [0xb9,0xe0,0x80,0x13]
-#CHECK: locfhrnlh %r1, %r3 # encoding: [0xb9,0xe0,0x90,0x13]
-#CHECK: locfhrhe %r1, %r3 # encoding: [0xb9,0xe0,0xa0,0x13]
-#CHECK: locfhrnl %r1, %r3 # encoding: [0xb9,0xe0,0xb0,0x13]
-#CHECK: locfhrnm %r1, %r3 # encoding: [0xb9,0xe0,0xb0,0x13]
-#CHECK: locfhrle %r1, %r3 # encoding: [0xb9,0xe0,0xc0,0x13]
-#CHECK: locfhrnh %r1, %r3 # encoding: [0xb9,0xe0,0xd0,0x13]
-#CHECK: locfhrnp %r1, %r3 # encoding: [0xb9,0xe0,0xd0,0x13]
-#CHECK: locfhrno %r1, %r3 # encoding: [0xb9,0xe0,0xe0,0x13]
-
- locfhro %r1,%r3
- locfhrh %r1,%r3
- locfhrp %r1,%r3
- locfhrnle %r1,%r3
- locfhrl %r1,%r3
- locfhrm %r1,%r3
- locfhrnhe %r1,%r3
- locfhrlh %r1,%r3
- locfhrne %r1,%r3
- locfhrnz %r1,%r3
- locfhre %r1,%r3
- locfhrz %r1,%r3
- locfhrnlh %r1,%r3
- locfhrhe %r1,%r3
- locfhrnl %r1,%r3
- locfhrnm %r1,%r3
- locfhrle %r1,%r3
- locfhrnh %r1,%r3
- locfhrnp %r1,%r3
- locfhrno %r1,%r3
-
-#CHECK: stocfh %r0, 0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe1]
-#CHECK: stocfh %r0, 0, 15 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe1]
-#CHECK: stocfh %r0, -524288, 0 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe1]
-#CHECK: stocfh %r0, 524287, 0 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe1]
-#CHECK: stocfh %r0, 0(%r1), 0 # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe1]
-#CHECK: stocfh %r0, 0(%r15), 0 # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe1]
-#CHECK: stocfh %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe1]
-#CHECK: stocfh %r1, 4095(%r2), 3 # encoding: [0xeb,0x13,0x2f,0xff,0x00,0xe1]
-
- stocfh %r0,0,0
- stocfh %r0,0,15
- stocfh %r0,-524288,0
- stocfh %r0,524287,0
- stocfh %r0,0(%r1),0
- stocfh %r0,0(%r15),0
- stocfh %r15,0,0
- stocfh %r1,4095(%r2),3
-
-#CHECK: stocfho %r1, 2(%r3) # encoding: [0xeb,0x11,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhh %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhp %r1, 2(%r3) # encoding: [0xeb,0x12,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnle %r1, 2(%r3) # encoding: [0xeb,0x13,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhl %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhm %r1, 2(%r3) # encoding: [0xeb,0x14,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnhe %r1, 2(%r3) # encoding: [0xeb,0x15,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhlh %r1, 2(%r3) # encoding: [0xeb,0x16,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhne %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnz %r1, 2(%r3) # encoding: [0xeb,0x17,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhe %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhz %r1, 2(%r3) # encoding: [0xeb,0x18,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnlh %r1, 2(%r3) # encoding: [0xeb,0x19,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhhe %r1, 2(%r3) # encoding: [0xeb,0x1a,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnl %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnm %r1, 2(%r3) # encoding: [0xeb,0x1b,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhle %r1, 2(%r3) # encoding: [0xeb,0x1c,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnh %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhnp %r1, 2(%r3) # encoding: [0xeb,0x1d,0x30,0x02,0x00,0xe1]
-#CHECK: stocfhno %r1, 2(%r3) # encoding: [0xeb,0x1e,0x30,0x02,0x00,0xe1]
-
- stocfho %r1,2(%r3)
- stocfhh %r1,2(%r3)
- stocfhp %r1,2(%r3)
- stocfhnle %r1,2(%r3)
- stocfhl %r1,2(%r3)
- stocfhm %r1,2(%r3)
- stocfhnhe %r1,2(%r3)
- stocfhlh %r1,2(%r3)
- stocfhne %r1,2(%r3)
- stocfhnz %r1,2(%r3)
- stocfhe %r1,2(%r3)
- stocfhz %r1,2(%r3)
- stocfhnlh %r1,2(%r3)
- stocfhhe %r1,2(%r3)
- stocfhnl %r1,2(%r3)
- stocfhnm %r1,2(%r3)
- stocfhle %r1,2(%r3)
- stocfhnh %r1,2(%r3)
- stocfhnp %r1,2(%r3)
- stocfhno %r1,2(%r3)
-
diff --git a/test/MC/SystemZ/insn-good-z196.s b/test/MC/SystemZ/insn-good-z196.s
index b24cc7d18e1f..02c473c11a4a 100644
--- a/test/MC/SystemZ/insn-good-z196.s
+++ b/test/MC/SystemZ/insn-good-z196.s
@@ -136,34 +136,6 @@
ark %r15,%r0,%r0
ark %r7,%r8,%r9
-#CHECK: cdfbra %f0, 0, %r0, 0 # encoding: [0xb3,0x95,0x00,0x00]
-#CHECK: cdfbra %f0, 0, %r0, 15 # encoding: [0xb3,0x95,0x0f,0x00]
-#CHECK: cdfbra %f0, 0, %r15, 0 # encoding: [0xb3,0x95,0x00,0x0f]
-#CHECK: cdfbra %f0, 15, %r0, 0 # encoding: [0xb3,0x95,0xf0,0x00]
-#CHECK: cdfbra %f4, 5, %r6, 7 # encoding: [0xb3,0x95,0x57,0x46]
-#CHECK: cdfbra %f15, 0, %r0, 0 # encoding: [0xb3,0x95,0x00,0xf0]
-
- cdfbra %f0, 0, %r0, 0
- cdfbra %f0, 0, %r0, 15
- cdfbra %f0, 0, %r15, 0
- cdfbra %f0, 15, %r0, 0
- cdfbra %f4, 5, %r6, 7
- cdfbra %f15, 0, %r0, 0
-
-#CHECK: cdgbra %f0, 0, %r0, 0 # encoding: [0xb3,0xa5,0x00,0x00]
-#CHECK: cdgbra %f0, 0, %r0, 15 # encoding: [0xb3,0xa5,0x0f,0x00]
-#CHECK: cdgbra %f0, 0, %r15, 0 # encoding: [0xb3,0xa5,0x00,0x0f]
-#CHECK: cdgbra %f0, 15, %r0, 0 # encoding: [0xb3,0xa5,0xf0,0x00]
-#CHECK: cdgbra %f4, 5, %r6, 7 # encoding: [0xb3,0xa5,0x57,0x46]
-#CHECK: cdgbra %f15, 0, %r0, 0 # encoding: [0xb3,0xa5,0x00,0xf0]
-
- cdgbra %f0, 0, %r0, 0
- cdgbra %f0, 0, %r0, 15
- cdgbra %f0, 0, %r15, 0
- cdgbra %f0, 15, %r0, 0
- cdgbra %f4, 5, %r6, 7
- cdgbra %f15, 0, %r0, 0
-
#CHECK: brcth %r0, .[[LAB:L.*]]-4294967296 # encoding: [0xcc,0x06,A,A,A,A]
#CHECK: fixup A - offset: 2, value: (.[[LAB]]-4294967296)+2, kind: FK_390_PC32DBL
brcth %r0, -0x100000000
@@ -201,6 +173,34 @@
brcth %r7,frob@PLT
brcth %r8,frob@PLT
+#CHECK: cdfbra %f0, 0, %r0, 0 # encoding: [0xb3,0x95,0x00,0x00]
+#CHECK: cdfbra %f0, 0, %r0, 15 # encoding: [0xb3,0x95,0x0f,0x00]
+#CHECK: cdfbra %f0, 0, %r15, 0 # encoding: [0xb3,0x95,0x00,0x0f]
+#CHECK: cdfbra %f0, 15, %r0, 0 # encoding: [0xb3,0x95,0xf0,0x00]
+#CHECK: cdfbra %f4, 5, %r6, 7 # encoding: [0xb3,0x95,0x57,0x46]
+#CHECK: cdfbra %f15, 0, %r0, 0 # encoding: [0xb3,0x95,0x00,0xf0]
+
+ cdfbra %f0, 0, %r0, 0
+ cdfbra %f0, 0, %r0, 15
+ cdfbra %f0, 0, %r15, 0
+ cdfbra %f0, 15, %r0, 0
+ cdfbra %f4, 5, %r6, 7
+ cdfbra %f15, 0, %r0, 0
+
+#CHECK: cdgbra %f0, 0, %r0, 0 # encoding: [0xb3,0xa5,0x00,0x00]
+#CHECK: cdgbra %f0, 0, %r0, 15 # encoding: [0xb3,0xa5,0x0f,0x00]
+#CHECK: cdgbra %f0, 0, %r15, 0 # encoding: [0xb3,0xa5,0x00,0x0f]
+#CHECK: cdgbra %f0, 15, %r0, 0 # encoding: [0xb3,0xa5,0xf0,0x00]
+#CHECK: cdgbra %f4, 5, %r6, 7 # encoding: [0xb3,0xa5,0x57,0x46]
+#CHECK: cdgbra %f15, 0, %r0, 0 # encoding: [0xb3,0xa5,0x00,0xf0]
+
+ cdgbra %f0, 0, %r0, 0
+ cdgbra %f0, 0, %r0, 15
+ cdgbra %f0, 0, %r15, 0
+ cdgbra %f0, 15, %r0, 0
+ cdgbra %f4, 5, %r6, 7
+ cdgbra %f15, 0, %r0, 0
+
#CHECK: cdlfbr %f0, 0, %r0, 0 # encoding: [0xb3,0x91,0x00,0x00]
#CHECK: cdlfbr %f0, 0, %r0, 15 # encoding: [0xb3,0x91,0x0f,0x00]
#CHECK: cdlfbr %f0, 0, %r15, 0 # encoding: [0xb3,0x91,0x00,0x0f]
@@ -619,6 +619,36 @@
fixbra %f4, 5, %f8, 9
fixbra %f13, 0, %f0, 0
+#CHECK: kmctr %r2, %r2, %r2 # encoding: [0xb9,0x2d,0x20,0x22]
+#CHECK: kmctr %r2, %r8, %r14 # encoding: [0xb9,0x2d,0x80,0x2e]
+#CHECK: kmctr %r14, %r8, %r2 # encoding: [0xb9,0x2d,0x80,0xe2]
+#CHECK: kmctr %r6, %r8, %r10 # encoding: [0xb9,0x2d,0x80,0x6a]
+
+ kmctr %r2, %r2, %r2
+ kmctr %r2, %r8, %r14
+ kmctr %r14, %r8, %r2
+ kmctr %r6, %r8, %r10
+
+#CHECK: kmf %r2, %r2 # encoding: [0xb9,0x2a,0x00,0x22]
+#CHECK: kmf %r2, %r14 # encoding: [0xb9,0x2a,0x00,0x2e]
+#CHECK: kmf %r14, %r2 # encoding: [0xb9,0x2a,0x00,0xe2]
+#CHECK: kmf %r6, %r10 # encoding: [0xb9,0x2a,0x00,0x6a]
+
+ kmf %r2, %r2
+ kmf %r2, %r14
+ kmf %r14, %r2
+ kmf %r6, %r10
+
+#CHECK: kmo %r2, %r2 # encoding: [0xb9,0x2b,0x00,0x22]
+#CHECK: kmo %r2, %r14 # encoding: [0xb9,0x2b,0x00,0x2e]
+#CHECK: kmo %r14, %r2 # encoding: [0xb9,0x2b,0x00,0xe2]
+#CHECK: kmo %r6, %r10 # encoding: [0xb9,0x2b,0x00,0x6a]
+
+ kmo %r2, %r2
+ kmo %r2, %r14
+ kmo %r14, %r2
+ kmo %r6, %r10
+
#CHECK: laa %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xf8]
#CHECK: laa %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xf8]
#CHECK: laa %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xf8]
@@ -1303,6 +1333,10 @@
ork %r15,%r0,%r0
ork %r7,%r8,%r9
+#CHECK: pcc # encoding: [0xb9,0x2c,0x00,0x00]
+
+ pcc
+
#CHECK: popcnt %r0, %r0 # encoding: [0xb9,0xe1,0x00,0x00]
#CHECK: popcnt %r0, %r15 # encoding: [0xb9,0xe1,0x00,0x0f]
#CHECK: popcnt %r15, %r0 # encoding: [0xb9,0xe1,0x00,0xf0]
@@ -1395,18 +1429,6 @@
slgrk %r15,%r0,%r0
slgrk %r7,%r8,%r9
-#CHECK: slrk %r0, %r0, %r0 # encoding: [0xb9,0xfb,0x00,0x00]
-#CHECK: slrk %r0, %r0, %r15 # encoding: [0xb9,0xfb,0xf0,0x00]
-#CHECK: slrk %r0, %r15, %r0 # encoding: [0xb9,0xfb,0x00,0x0f]
-#CHECK: slrk %r15, %r0, %r0 # encoding: [0xb9,0xfb,0x00,0xf0]
-#CHECK: slrk %r7, %r8, %r9 # encoding: [0xb9,0xfb,0x90,0x78]
-
- slrk %r0,%r0,%r0
- slrk %r0,%r0,%r15
- slrk %r0,%r15,%r0
- slrk %r15,%r0,%r0
- slrk %r7,%r8,%r9
-
#CHECK: sllk %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xdf]
#CHECK: sllk %r15, %r1, 0 # encoding: [0xeb,0xf1,0x00,0x00,0x00,0xdf]
#CHECK: sllk %r1, %r15, 0 # encoding: [0xeb,0x1f,0x00,0x00,0x00,0xdf]
@@ -1433,6 +1455,18 @@
sllk %r0,%r0,524287(%r1)
sllk %r0,%r0,524287(%r15)
+#CHECK: slrk %r0, %r0, %r0 # encoding: [0xb9,0xfb,0x00,0x00]
+#CHECK: slrk %r0, %r0, %r15 # encoding: [0xb9,0xfb,0xf0,0x00]
+#CHECK: slrk %r0, %r15, %r0 # encoding: [0xb9,0xfb,0x00,0x0f]
+#CHECK: slrk %r15, %r0, %r0 # encoding: [0xb9,0xfb,0x00,0xf0]
+#CHECK: slrk %r7, %r8, %r9 # encoding: [0xb9,0xfb,0x90,0x78]
+
+ slrk %r0,%r0,%r0
+ slrk %r0,%r0,%r15
+ slrk %r0,%r15,%r0
+ slrk %r15,%r0,%r0
+ slrk %r7,%r8,%r9
+
#CHECK: srak %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xdc]
#CHECK: srak %r15, %r1, 0 # encoding: [0xeb,0xf1,0x00,0x00,0x00,0xdc]
#CHECK: srak %r1, %r15, 0 # encoding: [0xeb,0x1f,0x00,0x00,0x00,0xdc]
@@ -1533,28 +1567,6 @@
stch %r0, 524287(%r15,%r1)
stch %r15, 0
-#CHECK: sthh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0xc7]
-#CHECK: sthh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0xc7]
-#CHECK: sthh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0xc7]
-#CHECK: sthh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0xc7]
-#CHECK: sthh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0xc7]
-#CHECK: sthh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0xc7]
-#CHECK: sthh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0xc7]
-#CHECK: sthh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0xc7]
-#CHECK: sthh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0xc7]
-#CHECK: sthh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0xc7]
-
- sthh %r0, -524288
- sthh %r0, -1
- sthh %r0, 0
- sthh %r0, 1
- sthh %r0, 524287
- sthh %r0, 0(%r1)
- sthh %r0, 0(%r15)
- sthh %r0, 524287(%r1,%r15)
- sthh %r0, 524287(%r15,%r1)
- sthh %r15, 0
-
#CHECK: stfh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0xcb]
#CHECK: stfh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0xcb]
#CHECK: stfh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0xcb]
@@ -1577,6 +1589,28 @@
stfh %r0, 524287(%r15,%r1)
stfh %r15, 0
+#CHECK: sthh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0xc7]
+#CHECK: sthh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0xc7]
+#CHECK: sthh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0xc7]
+#CHECK: sthh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0xc7]
+#CHECK: sthh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0xc7]
+#CHECK: sthh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0xc7]
+#CHECK: sthh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0xc7]
+#CHECK: sthh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0xc7]
+#CHECK: sthh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0xc7]
+#CHECK: sthh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0xc7]
+
+ sthh %r0, -524288
+ sthh %r0, -1
+ sthh %r0, 0
+ sthh %r0, 1
+ sthh %r0, 524287
+ sthh %r0, 0(%r1)
+ sthh %r0, 0(%r15)
+ sthh %r0, 524287(%r1,%r15)
+ sthh %r0, 524287(%r15,%r1)
+ sthh %r15, 0
+
#CHECK: stoc %r0, 0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xf3]
#CHECK: stoc %r0, 0, 15 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xf3]
#CHECK: stoc %r0, -524288, 0 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xf3]
diff --git a/test/MC/SystemZ/insn-good-zEC12.s b/test/MC/SystemZ/insn-good-zEC12.s
index bdaeef95eef1..275d68d8a619 100644
--- a/test/MC/SystemZ/insn-good-zEC12.s
+++ b/test/MC/SystemZ/insn-good-zEC12.s
@@ -178,6 +178,14 @@
clgtnl %r0, 0(%r15)
clgtnh %r0, 0(%r15)
+#CHECK: etnd %r0 # encoding: [0xb2,0xec,0x00,0x00]
+#CHECK: etnd %r15 # encoding: [0xb2,0xec,0x00,0xf0]
+#CHECK: etnd %r7 # encoding: [0xb2,0xec,0x00,0x70]
+
+ etnd %r0
+ etnd %r15
+ etnd %r7
+
#CHECK: lat %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x9f]
#CHECK: lat %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x9f]
#CHECK: lat %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x9f]
@@ -288,14 +296,6 @@
llgtat %r0, 524287(%r15,%r1)
llgtat %r15, 0
-#CHECK: etnd %r0 # encoding: [0xb2,0xec,0x00,0x00]
-#CHECK: etnd %r15 # encoding: [0xb2,0xec,0x00,0xf0]
-#CHECK: etnd %r7 # encoding: [0xb2,0xec,0x00,0x70]
-
- etnd %r0
- etnd %r15
- etnd %r7
-
#CHECK: niai 0, 0 # encoding: [0xb2,0xfa,0x00,0x00]
#CHECK: niai 15, 0 # encoding: [0xb2,0xfa,0x00,0xf0]
#CHECK: niai 0, 15 # encoding: [0xb2,0xfa,0x00,0x0f]
diff --git a/test/MC/SystemZ/insn-good.s b/test/MC/SystemZ/insn-good.s
index f4dddc4712d5..a6228f23c8f8 100644
--- a/test/MC/SystemZ/insn-good.s
+++ b/test/MC/SystemZ/insn-good.s
@@ -415,6 +415,34 @@
algr %r15,%r0
algr %r7,%r8
+#CHECK: algsi -524288, 0 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x7e]
+#CHECK: algsi -1, 0 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x7e]
+#CHECK: algsi 0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x7e]
+#CHECK: algsi 1, 0 # encoding: [0xeb,0x00,0x00,0x01,0x00,0x7e]
+#CHECK: algsi 524287, 0 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0x7e]
+#CHECK: algsi 0, -128 # encoding: [0xeb,0x80,0x00,0x00,0x00,0x7e]
+#CHECK: algsi 0, -1 # encoding: [0xeb,0xff,0x00,0x00,0x00,0x7e]
+#CHECK: algsi 0, 1 # encoding: [0xeb,0x01,0x00,0x00,0x00,0x7e]
+#CHECK: algsi 0, 127 # encoding: [0xeb,0x7f,0x00,0x00,0x00,0x7e]
+#CHECK: algsi 0(%r1), 42 # encoding: [0xeb,0x2a,0x10,0x00,0x00,0x7e]
+#CHECK: algsi 0(%r15), 42 # encoding: [0xeb,0x2a,0xf0,0x00,0x00,0x7e]
+#CHECK: algsi 524287(%r1), 42 # encoding: [0xeb,0x2a,0x1f,0xff,0x7f,0x7e]
+#CHECK: algsi 524287(%r15), 42 # encoding: [0xeb,0x2a,0xff,0xff,0x7f,0x7e]
+
+ algsi -524288, 0
+ algsi -1, 0
+ algsi 0, 0
+ algsi 1, 0
+ algsi 524287, 0
+ algsi 0, -128
+ algsi 0, -1
+ algsi 0, 1
+ algsi 0, 127
+ algsi 0(%r1), 42
+ algsi 0(%r15), 42
+ algsi 524287(%r1), 42
+ algsi 524287(%r15), 42
+
#CHECK: alr %r0, %r0 # encoding: [0x1e,0x00]
#CHECK: alr %r0, %r15 # encoding: [0x1e,0x0f]
#CHECK: alr %r15, %r0 # encoding: [0x1e,0xf0]
@@ -425,6 +453,34 @@
alr %r15,%r0
alr %r7,%r8
+#CHECK: alsi -524288, 0 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x6e]
+#CHECK: alsi -1, 0 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x6e]
+#CHECK: alsi 0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x6e]
+#CHECK: alsi 1, 0 # encoding: [0xeb,0x00,0x00,0x01,0x00,0x6e]
+#CHECK: alsi 524287, 0 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0x6e]
+#CHECK: alsi 0, -128 # encoding: [0xeb,0x80,0x00,0x00,0x00,0x6e]
+#CHECK: alsi 0, -1 # encoding: [0xeb,0xff,0x00,0x00,0x00,0x6e]
+#CHECK: alsi 0, 1 # encoding: [0xeb,0x01,0x00,0x00,0x00,0x6e]
+#CHECK: alsi 0, 127 # encoding: [0xeb,0x7f,0x00,0x00,0x00,0x6e]
+#CHECK: alsi 0(%r1), 42 # encoding: [0xeb,0x2a,0x10,0x00,0x00,0x6e]
+#CHECK: alsi 0(%r15), 42 # encoding: [0xeb,0x2a,0xf0,0x00,0x00,0x6e]
+#CHECK: alsi 524287(%r1), 42 # encoding: [0xeb,0x2a,0x1f,0xff,0x7f,0x6e]
+#CHECK: alsi 524287(%r15), 42 # encoding: [0xeb,0x2a,0xff,0xff,0x7f,0x6e]
+
+ alsi -524288, 0
+ alsi -1, 0
+ alsi 0, 0
+ alsi 1, 0
+ alsi 524287, 0
+ alsi 0, -128
+ alsi 0, -1
+ alsi 0, 1
+ alsi 0, 127
+ alsi 0(%r1), 42
+ alsi 0(%r15), 42
+ alsi 524287(%r1), 42
+ alsi 524287(%r15), 42
+
#CHECK: aly %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x5e]
#CHECK: aly %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x5e]
#CHECK: aly %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x5e]
@@ -447,6 +503,36 @@
aly %r0, 524287(%r15,%r1)
aly %r15, 0
+#CHECK: ap 0(1), 0(1) # encoding: [0xfa,0x00,0x00,0x00,0x00,0x00]
+#CHECK: ap 0(1), 0(1,%r1) # encoding: [0xfa,0x00,0x00,0x00,0x10,0x00]
+#CHECK: ap 0(1), 0(1,%r15) # encoding: [0xfa,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: ap 0(1), 4095(1) # encoding: [0xfa,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: ap 0(1), 4095(1,%r1) # encoding: [0xfa,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: ap 0(1), 4095(1,%r15) # encoding: [0xfa,0x00,0x00,0x00,0xff,0xff]
+#CHECK: ap 0(1,%r1), 0(1) # encoding: [0xfa,0x00,0x10,0x00,0x00,0x00]
+#CHECK: ap 0(1,%r15), 0(1) # encoding: [0xfa,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: ap 4095(1,%r1), 0(1) # encoding: [0xfa,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: ap 4095(1,%r15), 0(1) # encoding: [0xfa,0x00,0xff,0xff,0x00,0x00]
+#CHECK: ap 0(16,%r1), 0(1) # encoding: [0xfa,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: ap 0(16,%r15), 0(1) # encoding: [0xfa,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: ap 0(1), 0(16,%r1) # encoding: [0xfa,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: ap 0(1), 0(16,%r15) # encoding: [0xfa,0x0f,0x00,0x00,0xf0,0x00]
+
+ ap 0(1), 0(1)
+ ap 0(1), 0(1,%r1)
+ ap 0(1), 0(1,%r15)
+ ap 0(1), 4095(1)
+ ap 0(1), 4095(1,%r1)
+ ap 0(1), 4095(1,%r15)
+ ap 0(1,%r1), 0(1)
+ ap 0(1,%r15), 0(1)
+ ap 4095(1,%r1), 0(1)
+ ap 4095(1,%r15), 0(1)
+ ap 0(16,%r1), 0(1)
+ ap 0(16,%r15), 0(1)
+ ap 0(1), 0(16,%r1)
+ ap 0(1), 0(16,%r15)
+
#CHECK: ar %r0, %r0 # encoding: [0x1a,0x00]
#CHECK: ar %r0, %r15 # encoding: [0x1a,0x0f]
#CHECK: ar %r15, %r0 # encoding: [0x1a,0xf0]
@@ -1965,6 +2051,20 @@
cegbr %f7, %r8
cegbr %f15, %r15
+#CHECK: cfc 0 # encoding: [0xb2,0x1a,0x00,0x00]
+#CHECK: cfc 0(%r1) # encoding: [0xb2,0x1a,0x10,0x00]
+#CHECK: cfc 0(%r15) # encoding: [0xb2,0x1a,0xf0,0x00]
+#CHECK: cfc 4095 # encoding: [0xb2,0x1a,0x0f,0xff]
+#CHECK: cfc 4095(%r1) # encoding: [0xb2,0x1a,0x1f,0xff]
+#CHECK: cfc 4095(%r15) # encoding: [0xb2,0x1a,0xff,0xff]
+
+ cfc 0
+ cfc 0(%r1)
+ cfc 0(%r15)
+ cfc 4095
+ cfc 4095(%r1)
+ cfc 4095(%r15)
+
#CHECK: cfdbr %r0, 0, %f0 # encoding: [0xb3,0x99,0x00,0x00]
#CHECK: cfdbr %r0, 0, %f15 # encoding: [0xb3,0x99,0x00,0x0f]
#CHECK: cfdbr %r0, 15, %f0 # encoding: [0xb3,0x99,0xf0,0x00]
@@ -3435,6 +3535,16 @@
citnl %r15, 1
citnh %r15, 1
+#CHECK: cksm %r0, %r8 # encoding: [0xb2,0x41,0x00,0x08]
+#CHECK: cksm %r0, %r14 # encoding: [0xb2,0x41,0x00,0x0e]
+#CHECK: cksm %r15, %r0 # encoding: [0xb2,0x41,0x00,0xf0]
+#CHECK: cksm %r15, %r8 # encoding: [0xb2,0x41,0x00,0xf8]
+
+ cksm %r0, %r8
+ cksm %r0, %r14
+ cksm %r15, %r0
+ cksm %r15, %r8
+
#CHECK: cl %r0, 0 # encoding: [0x55,0x00,0x00,0x00]
#CHECK: cl %r0, 4095 # encoding: [0x55,0x00,0x0f,0xff]
#CHECK: cl %r0, 0(%r1) # encoding: [0x55,0x00,0x10,0x00]
@@ -3477,6 +3587,54 @@
clc 0(256,%r1), 0
clc 0(256,%r15), 0
+#CHECK: clcl %r0, %r8 # encoding: [0x0f,0x08]
+#CHECK: clcl %r0, %r14 # encoding: [0x0f,0x0e]
+#CHECK: clcl %r14, %r0 # encoding: [0x0f,0xe0]
+#CHECK: clcl %r14, %r8 # encoding: [0x0f,0xe8]
+
+ clcl %r0, %r8
+ clcl %r0, %r14
+ clcl %r14, %r0
+ clcl %r14, %r8
+
+#CHECK: clcle %r0, %r0, 0 # encoding: [0xa9,0x00,0x00,0x00]
+#CHECK: clcle %r0, %r14, 4095 # encoding: [0xa9,0x0e,0x0f,0xff]
+#CHECK: clcle %r0, %r0, 0(%r1) # encoding: [0xa9,0x00,0x10,0x00]
+#CHECK: clcle %r0, %r0, 0(%r15) # encoding: [0xa9,0x00,0xf0,0x00]
+#CHECK: clcle %r14, %r14, 4095(%r1) # encoding: [0xa9,0xee,0x1f,0xff]
+#CHECK: clcle %r0, %r0, 4095(%r15) # encoding: [0xa9,0x00,0xff,0xff]
+#CHECK: clcle %r14, %r0, 0 # encoding: [0xa9,0xe0,0x00,0x00]
+
+ clcle %r0, %r0, 0
+ clcle %r0, %r14, 4095
+ clcle %r0, %r0, 0(%r1)
+ clcle %r0, %r0, 0(%r15)
+ clcle %r14, %r14, 4095(%r1)
+ clcle %r0, %r0, 4095(%r15)
+ clcle %r14, %r0, 0
+
+#CHECK: clclu %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x8f]
+#CHECK: clclu %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x8f]
+#CHECK: clclu %r0, %r14, 0 # encoding: [0xeb,0x0e,0x00,0x00,0x00,0x8f]
+#CHECK: clclu %r0, %r14, 1 # encoding: [0xeb,0x0e,0x00,0x01,0x00,0x8f]
+#CHECK: clclu %r0, %r8, 524287 # encoding: [0xeb,0x08,0x0f,0xff,0x7f,0x8f]
+#CHECK: clclu %r0, %r8, 0(%r1) # encoding: [0xeb,0x08,0x10,0x00,0x00,0x8f]
+#CHECK: clclu %r0, %r4, 0(%r15) # encoding: [0xeb,0x04,0xf0,0x00,0x00,0x8f]
+#CHECK: clclu %r0, %r4, 524287(%r15) # encoding: [0xeb,0x04,0xff,0xff,0x7f,0x8f]
+#CHECK: clclu %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x8f]
+#CHECK: clclu %r14, %r0, 0 # encoding: [0xeb,0xe0,0x00,0x00,0x00,0x8f]
+
+ clclu %r0, %r0, -524288
+ clclu %r0, %r0, -1
+ clclu %r0, %r14, 0
+ clclu %r0, %r14, 1
+ clclu %r0, %r8, 524287
+ clclu %r0, %r8, 0(%r1)
+ clclu %r0, %r4, 0(%r15)
+ clclu %r0, %r4, 524287(%r15)
+ clclu %r0, %r0, 524287(%r1)
+ clclu %r14, %r0, 0
+
#CHECK: clfhsi 0, 0 # encoding: [0xe5,0x5d,0x00,0x00,0x00,0x00]
#CHECK: clfhsi 4095, 0 # encoding: [0xe5,0x5d,0x0f,0xff,0x00,0x00]
#CHECK: clfhsi 0, 65535 # encoding: [0xe5,0x5d,0x00,0x00,0xff,0xff]
@@ -4751,6 +4909,66 @@
cliy 524287(%r1), 42
cliy 524287(%r15), 42
+#CHECK: clm %r0, 0, 0 # encoding: [0xbd,0x00,0x00,0x00]
+#CHECK: clm %r0, 15, 4095 # encoding: [0xbd,0x0f,0x0f,0xff]
+#CHECK: clm %r0, 0, 0(%r1) # encoding: [0xbd,0x00,0x10,0x00]
+#CHECK: clm %r0, 0, 0(%r15) # encoding: [0xbd,0x00,0xf0,0x00]
+#CHECK: clm %r15, 15, 4095(%r1) # encoding: [0xbd,0xff,0x1f,0xff]
+#CHECK: clm %r0, 0, 4095(%r15) # encoding: [0xbd,0x00,0xff,0xff]
+#CHECK: clm %r15, 0, 0 # encoding: [0xbd,0xf0,0x00,0x00]
+
+ clm %r0, 0, 0
+ clm %r0, 15, 4095
+ clm %r0, 0, 0(%r1)
+ clm %r0, 0, 0(%r15)
+ clm %r15, 15, 4095(%r1)
+ clm %r0, 0, 4095(%r15)
+ clm %r15, 0, 0
+
+#CHECK: clmh %r0, 0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x20]
+#CHECK: clmh %r0, 0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x20]
+#CHECK: clmh %r0, 15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x20]
+#CHECK: clmh %r0, 15, 1 # encoding: [0xeb,0x0f,0x00,0x01,0x00,0x20]
+#CHECK: clmh %r0, 8, 524287 # encoding: [0xeb,0x08,0x0f,0xff,0x7f,0x20]
+#CHECK: clmh %r0, 8, 0(%r1) # encoding: [0xeb,0x08,0x10,0x00,0x00,0x20]
+#CHECK: clmh %r0, 4, 0(%r15) # encoding: [0xeb,0x04,0xf0,0x00,0x00,0x20]
+#CHECK: clmh %r0, 4, 524287(%r15) # encoding: [0xeb,0x04,0xff,0xff,0x7f,0x20]
+#CHECK: clmh %r0, 0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x20]
+#CHECK: clmh %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0x20]
+
+ clmh %r0, 0, -524288
+ clmh %r0, 0, -1
+ clmh %r0, 15, 0
+ clmh %r0, 15, 1
+ clmh %r0, 8, 524287
+ clmh %r0, 8, 0(%r1)
+ clmh %r0, 4, 0(%r15)
+ clmh %r0, 4, 524287(%r15)
+ clmh %r0, 0, 524287(%r1)
+ clmh %r15, 0, 0
+
+#CHECK: clmy %r0, 0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x21]
+#CHECK: clmy %r0, 0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x21]
+#CHECK: clmy %r0, 15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x21]
+#CHECK: clmy %r0, 15, 1 # encoding: [0xeb,0x0f,0x00,0x01,0x00,0x21]
+#CHECK: clmy %r0, 8, 524287 # encoding: [0xeb,0x08,0x0f,0xff,0x7f,0x21]
+#CHECK: clmy %r0, 8, 0(%r1) # encoding: [0xeb,0x08,0x10,0x00,0x00,0x21]
+#CHECK: clmy %r0, 4, 0(%r15) # encoding: [0xeb,0x04,0xf0,0x00,0x00,0x21]
+#CHECK: clmy %r0, 4, 524287(%r15) # encoding: [0xeb,0x04,0xff,0xff,0x7f,0x21]
+#CHECK: clmy %r0, 0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x21]
+#CHECK: clmy %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0x21]
+
+ clmy %r0, 0, -524288
+ clmy %r0, 0, -1
+ clmy %r0, 15, 0
+ clmy %r0, 15, 1
+ clmy %r0, 8, 524287
+ clmy %r0, 8, 0(%r1)
+ clmy %r0, 4, 0(%r15)
+ clmy %r0, 4, 524287(%r15)
+ clmy %r0, 0, 524287(%r1)
+ clmy %r15, 0, 0
+
#CHECK: clr %r0, %r0 # encoding: [0x15,0x00]
#CHECK: clr %r0, %r15 # encoding: [0x15,0x0f]
#CHECK: clr %r15, %r0 # encoding: [0x15,0xf0]
@@ -5180,6 +5398,46 @@
cly %r0, 524287(%r15,%r1)
cly %r15, 0
+#CHECK: cmpsc %r0, %r8 # encoding: [0xb2,0x63,0x00,0x08]
+#CHECK: cmpsc %r0, %r14 # encoding: [0xb2,0x63,0x00,0x0e]
+#CHECK: cmpsc %r14, %r0 # encoding: [0xb2,0x63,0x00,0xe0]
+#CHECK: cmpsc %r14, %r8 # encoding: [0xb2,0x63,0x00,0xe8]
+
+ cmpsc %r0, %r8
+ cmpsc %r0, %r14
+ cmpsc %r14, %r0
+ cmpsc %r14, %r8
+
+#CHECK: cp 0(1), 0(1) # encoding: [0xf9,0x00,0x00,0x00,0x00,0x00]
+#CHECK: cp 0(1), 0(1,%r1) # encoding: [0xf9,0x00,0x00,0x00,0x10,0x00]
+#CHECK: cp 0(1), 0(1,%r15) # encoding: [0xf9,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: cp 0(1), 4095(1) # encoding: [0xf9,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: cp 0(1), 4095(1,%r1) # encoding: [0xf9,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: cp 0(1), 4095(1,%r15) # encoding: [0xf9,0x00,0x00,0x00,0xff,0xff]
+#CHECK: cp 0(1,%r1), 0(1) # encoding: [0xf9,0x00,0x10,0x00,0x00,0x00]
+#CHECK: cp 0(1,%r15), 0(1) # encoding: [0xf9,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: cp 4095(1,%r1), 0(1) # encoding: [0xf9,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: cp 4095(1,%r15), 0(1) # encoding: [0xf9,0x00,0xff,0xff,0x00,0x00]
+#CHECK: cp 0(16,%r1), 0(1) # encoding: [0xf9,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: cp 0(16,%r15), 0(1) # encoding: [0xf9,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: cp 0(1), 0(16,%r1) # encoding: [0xf9,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: cp 0(1), 0(16,%r15) # encoding: [0xf9,0x0f,0x00,0x00,0xf0,0x00]
+
+ cp 0(1), 0(1)
+ cp 0(1), 0(1,%r1)
+ cp 0(1), 0(1,%r15)
+ cp 0(1), 4095(1)
+ cp 0(1), 4095(1,%r1)
+ cp 0(1), 4095(1,%r15)
+ cp 0(1,%r1), 0(1)
+ cp 0(1,%r15), 0(1)
+ cp 4095(1,%r1), 0(1)
+ cp 4095(1,%r15), 0(1)
+ cp 0(16,%r1), 0(1)
+ cp 0(16,%r15), 0(1)
+ cp 0(1), 0(16,%r1)
+ cp 0(1), 0(16,%r15)
+
#CHECK: cpsdr %f0, %f0, %f0 # encoding: [0xb3,0x72,0x00,0x00]
#CHECK: cpsdr %f0, %f0, %f15 # encoding: [0xb3,0x72,0x00,0x0f]
#CHECK: cpsdr %f0, %f15, %f0 # encoding: [0xb3,0x72,0xf0,0x00]
@@ -5623,6 +5881,20 @@
csg %r0, %r15, 0
csg %r15, %r0, 0
+#CHECK: csst 0, 0, %r0 # encoding: [0xc8,0x02,0x00,0x00,0x00,0x00]
+#CHECK: csst 0(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x00,0xf0,0x00]
+#CHECK: csst 1(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x01,0xf0,0x00]
+#CHECK: csst 4095(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x1f,0xff,0xf0,0x00]
+#CHECK: csst 0(%r1), 1(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x00,0xf0,0x01]
+#CHECK: csst 0(%r1), 4095(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x00,0xff,0xff]
+
+ csst 0, 0, %r0
+ csst 0(%r1), 0(%r15), %r2
+ csst 1(%r1), 0(%r15), %r2
+ csst 4095(%r1), 0(%r15), %r2
+ csst 0(%r1), 1(%r15), %r2
+ csst 0(%r1), 4095(%r15), %r2
+
#CHECK: csy %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x14]
#CHECK: csy %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x14]
#CHECK: csy %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x14]
@@ -5647,19 +5919,239 @@
csy %r0, %r15, 0
csy %r15, %r0, 0
-#CHECK: csst 0, 0, %r0 # encoding: [0xc8,0x02,0x00,0x00,0x00,0x00]
-#CHECK: csst 0(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x00,0xf0,0x00]
-#CHECK: csst 1(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x01,0xf0,0x00]
-#CHECK: csst 4095(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x1f,0xff,0xf0,0x00]
-#CHECK: csst 0(%r1), 1(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x00,0xf0,0x01]
-#CHECK: csst 0(%r1), 4095(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x00,0xff,0xff]
-
- csst 0, 0, %r0
- csst 0(%r1), 0(%r15), %r2
- csst 1(%r1), 0(%r15), %r2
- csst 4095(%r1), 0(%r15), %r2
- csst 0(%r1), 1(%r15), %r2
- csst 0(%r1), 4095(%r15), %r2
+#CHECK: cu12 %r0, %r0 # encoding: [0xb2,0xa7,0x00,0x00]
+#CHECK: cu12 %r0, %r14 # encoding: [0xb2,0xa7,0x00,0x0e]
+#CHECK: cu12 %r14, %r0 # encoding: [0xb2,0xa7,0x00,0xe0]
+#CHECK: cu12 %r6, %r8 # encoding: [0xb2,0xa7,0x00,0x68]
+#CHECK: cu12 %r4, %r12, 0 # encoding: [0xb2,0xa7,0x00,0x4c]
+#CHECK: cu12 %r4, %r12, 15 # encoding: [0xb2,0xa7,0xf0,0x4c]
+
+ cu12 %r0, %r0
+ cu12 %r0, %r14
+ cu12 %r14, %r0
+ cu12 %r6, %r8
+ cu12 %r4, %r12, 0
+ cu12 %r4, %r12, 15
+
+#CHECK: cu14 %r0, %r0 # encoding: [0xb9,0xb0,0x00,0x00]
+#CHECK: cu14 %r0, %r14 # encoding: [0xb9,0xb0,0x00,0x0e]
+#CHECK: cu14 %r14, %r0 # encoding: [0xb9,0xb0,0x00,0xe0]
+#CHECK: cu14 %r6, %r8 # encoding: [0xb9,0xb0,0x00,0x68]
+#CHECK: cu14 %r4, %r12, 0 # encoding: [0xb9,0xb0,0x00,0x4c]
+#CHECK: cu14 %r4, %r12, 15 # encoding: [0xb9,0xb0,0xf0,0x4c]
+
+ cu14 %r0, %r0
+ cu14 %r0, %r14
+ cu14 %r14, %r0
+ cu14 %r6, %r8
+ cu14 %r4, %r12, 0
+ cu14 %r4, %r12, 15
+
+#CHECK: cu21 %r0, %r0 # encoding: [0xb2,0xa6,0x00,0x00]
+#CHECK: cu21 %r0, %r14 # encoding: [0xb2,0xa6,0x00,0x0e]
+#CHECK: cu21 %r14, %r0 # encoding: [0xb2,0xa6,0x00,0xe0]
+#CHECK: cu21 %r6, %r8 # encoding: [0xb2,0xa6,0x00,0x68]
+#CHECK: cu21 %r4, %r12, 0 # encoding: [0xb2,0xa6,0x00,0x4c]
+#CHECK: cu21 %r4, %r12, 15 # encoding: [0xb2,0xa6,0xf0,0x4c]
+
+ cu21 %r0, %r0
+ cu21 %r0, %r14
+ cu21 %r14, %r0
+ cu21 %r6, %r8
+ cu21 %r4, %r12, 0
+ cu21 %r4, %r12, 15
+
+#CHECK: cu24 %r0, %r0 # encoding: [0xb9,0xb1,0x00,0x00]
+#CHECK: cu24 %r0, %r14 # encoding: [0xb9,0xb1,0x00,0x0e]
+#CHECK: cu24 %r14, %r0 # encoding: [0xb9,0xb1,0x00,0xe0]
+#CHECK: cu24 %r6, %r8 # encoding: [0xb9,0xb1,0x00,0x68]
+#CHECK: cu24 %r4, %r12, 0 # encoding: [0xb9,0xb1,0x00,0x4c]
+#CHECK: cu24 %r4, %r12, 15 # encoding: [0xb9,0xb1,0xf0,0x4c]
+
+ cu24 %r0, %r0
+ cu24 %r0, %r14
+ cu24 %r14, %r0
+ cu24 %r6, %r8
+ cu24 %r4, %r12, 0
+ cu24 %r4, %r12, 15
+
+#CHECK: cu41 %r0, %r0 # encoding: [0xb9,0xb2,0x00,0x00]
+#CHECK: cu41 %r0, %r14 # encoding: [0xb9,0xb2,0x00,0x0e]
+#CHECK: cu41 %r14, %r0 # encoding: [0xb9,0xb2,0x00,0xe0]
+#CHECK: cu41 %r6, %r8 # encoding: [0xb9,0xb2,0x00,0x68]
+
+ cu41 %r0, %r0
+ cu41 %r0, %r14
+ cu41 %r14, %r0
+ cu41 %r6, %r8
+
+#CHECK: cu42 %r0, %r0 # encoding: [0xb9,0xb3,0x00,0x00]
+#CHECK: cu42 %r0, %r14 # encoding: [0xb9,0xb3,0x00,0x0e]
+#CHECK: cu42 %r14, %r0 # encoding: [0xb9,0xb3,0x00,0xe0]
+#CHECK: cu42 %r6, %r8 # encoding: [0xb9,0xb3,0x00,0x68]
+
+ cu42 %r0, %r0
+ cu42 %r0, %r14
+ cu42 %r14, %r0
+ cu42 %r6, %r8
+
+#CHECK: cuse %r0, %r8 # encoding: [0xb2,0x57,0x00,0x08]
+#CHECK: cuse %r0, %r14 # encoding: [0xb2,0x57,0x00,0x0e]
+#CHECK: cuse %r14, %r0 # encoding: [0xb2,0x57,0x00,0xe0]
+#CHECK: cuse %r14, %r8 # encoding: [0xb2,0x57,0x00,0xe8]
+
+ cuse %r0, %r8
+ cuse %r0, %r14
+ cuse %r14, %r0
+ cuse %r14, %r8
+
+#CHECK: cutfu %r0, %r0 # encoding: [0xb2,0xa7,0x00,0x00]
+#CHECK: cutfu %r0, %r14 # encoding: [0xb2,0xa7,0x00,0x0e]
+#CHECK: cutfu %r14, %r0 # encoding: [0xb2,0xa7,0x00,0xe0]
+#CHECK: cutfu %r6, %r8 # encoding: [0xb2,0xa7,0x00,0x68]
+#CHECK: cutfu %r4, %r12, 0 # encoding: [0xb2,0xa7,0x00,0x4c]
+#CHECK: cutfu %r4, %r12, 15 # encoding: [0xb2,0xa7,0xf0,0x4c]
+
+ cutfu %r0, %r0
+ cutfu %r0, %r14
+ cutfu %r14, %r0
+ cutfu %r6, %r8
+ cutfu %r4, %r12, 0
+ cutfu %r4, %r12, 15
+
+#CHECK: cuutf %r0, %r0 # encoding: [0xb2,0xa6,0x00,0x00]
+#CHECK: cuutf %r0, %r14 # encoding: [0xb2,0xa6,0x00,0x0e]
+#CHECK: cuutf %r14, %r0 # encoding: [0xb2,0xa6,0x00,0xe0]
+#CHECK: cuutf %r6, %r8 # encoding: [0xb2,0xa6,0x00,0x68]
+#CHECK: cuutf %r4, %r12, 0 # encoding: [0xb2,0xa6,0x00,0x4c]
+#CHECK: cuutf %r4, %r12, 15 # encoding: [0xb2,0xa6,0xf0,0x4c]
+
+ cuutf %r0, %r0
+ cuutf %r0, %r14
+ cuutf %r14, %r0
+ cuutf %r6, %r8
+ cuutf %r4, %r12, 0
+ cuutf %r4, %r12, 15
+
+#CHECK: cvb %r0, 0 # encoding: [0x4f,0x00,0x00,0x00]
+#CHECK: cvb %r0, 4095 # encoding: [0x4f,0x00,0x0f,0xff]
+#CHECK: cvb %r0, 0(%r1) # encoding: [0x4f,0x00,0x10,0x00]
+#CHECK: cvb %r0, 0(%r15) # encoding: [0x4f,0x00,0xf0,0x00]
+#CHECK: cvb %r0, 4095(%r1,%r15) # encoding: [0x4f,0x01,0xff,0xff]
+#CHECK: cvb %r0, 4095(%r15,%r1) # encoding: [0x4f,0x0f,0x1f,0xff]
+#CHECK: cvb %r15, 0 # encoding: [0x4f,0xf0,0x00,0x00]
+
+ cvb %r0, 0
+ cvb %r0, 4095
+ cvb %r0, 0(%r1)
+ cvb %r0, 0(%r15)
+ cvb %r0, 4095(%r1,%r15)
+ cvb %r0, 4095(%r15,%r1)
+ cvb %r15, 0
+
+#CHECK: cvbg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x0e]
+#CHECK: cvbg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x0e]
+#CHECK: cvbg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x0e]
+#CHECK: cvbg %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x0e]
+#CHECK: cvbg %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x0e]
+#CHECK: cvbg %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x0e]
+#CHECK: cvbg %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x0e]
+#CHECK: cvbg %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x0e]
+#CHECK: cvbg %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x0e]
+#CHECK: cvbg %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x0e]
+
+ cvbg %r0, -524288
+ cvbg %r0, -1
+ cvbg %r0, 0
+ cvbg %r0, 1
+ cvbg %r0, 524287
+ cvbg %r0, 0(%r1)
+ cvbg %r0, 0(%r15)
+ cvbg %r0, 524287(%r1,%r15)
+ cvbg %r0, 524287(%r15,%r1)
+ cvbg %r15, 0
+
+#CHECK: cvby %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x06]
+#CHECK: cvby %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x06]
+#CHECK: cvby %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x06]
+#CHECK: cvby %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x06]
+#CHECK: cvby %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x06]
+#CHECK: cvby %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x06]
+#CHECK: cvby %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x06]
+#CHECK: cvby %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x06]
+#CHECK: cvby %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x06]
+#CHECK: cvby %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x06]
+
+ cvby %r0, -524288
+ cvby %r0, -1
+ cvby %r0, 0
+ cvby %r0, 1
+ cvby %r0, 524287
+ cvby %r0, 0(%r1)
+ cvby %r0, 0(%r15)
+ cvby %r0, 524287(%r1,%r15)
+ cvby %r0, 524287(%r15,%r1)
+ cvby %r15, 0
+
+#CHECK: cvd %r0, 0 # encoding: [0x4e,0x00,0x00,0x00]
+#CHECK: cvd %r0, 4095 # encoding: [0x4e,0x00,0x0f,0xff]
+#CHECK: cvd %r0, 0(%r1) # encoding: [0x4e,0x00,0x10,0x00]
+#CHECK: cvd %r0, 0(%r15) # encoding: [0x4e,0x00,0xf0,0x00]
+#CHECK: cvd %r0, 4095(%r1,%r15) # encoding: [0x4e,0x01,0xff,0xff]
+#CHECK: cvd %r0, 4095(%r15,%r1) # encoding: [0x4e,0x0f,0x1f,0xff]
+#CHECK: cvd %r15, 0 # encoding: [0x4e,0xf0,0x00,0x00]
+
+ cvd %r0, 0
+ cvd %r0, 4095
+ cvd %r0, 0(%r1)
+ cvd %r0, 0(%r15)
+ cvd %r0, 4095(%r1,%r15)
+ cvd %r0, 4095(%r15,%r1)
+ cvd %r15, 0
+
+#CHECK: cvdg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x2e]
+#CHECK: cvdg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x2e]
+#CHECK: cvdg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x2e]
+#CHECK: cvdg %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x2e]
+#CHECK: cvdg %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x2e]
+#CHECK: cvdg %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x2e]
+#CHECK: cvdg %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x2e]
+#CHECK: cvdg %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x2e]
+#CHECK: cvdg %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x2e]
+#CHECK: cvdg %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x2e]
+
+ cvdg %r0, -524288
+ cvdg %r0, -1
+ cvdg %r0, 0
+ cvdg %r0, 1
+ cvdg %r0, 524287
+ cvdg %r0, 0(%r1)
+ cvdg %r0, 0(%r15)
+ cvdg %r0, 524287(%r1,%r15)
+ cvdg %r0, 524287(%r15,%r1)
+ cvdg %r15, 0
+
+#CHECK: cvdy %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x26]
+#CHECK: cvdy %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x26]
+#CHECK: cvdy %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x26]
+#CHECK: cvdy %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x26]
+#CHECK: cvdy %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x26]
+#CHECK: cvdy %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x26]
+#CHECK: cvdy %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x26]
+#CHECK: cvdy %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x26]
+#CHECK: cvdy %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x26]
+#CHECK: cvdy %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x26]
+
+ cvdy %r0, -524288
+ cvdy %r0, -1
+ cvdy %r0, 0
+ cvdy %r0, 1
+ cvdy %r0, 524287
+ cvdy %r0, 0(%r1)
+ cvdy %r0, 0(%r15)
+ cvdy %r0, 524287(%r1,%r15)
+ cvdy %r0, 524287(%r15,%r1)
+ cvdy %r15, 0
#CHECK: cxbr %f0, %f0 # encoding: [0xb3,0x49,0x00,0x00]
#CHECK: cxbr %f0, %f13 # encoding: [0xb3,0x49,0x00,0x0d]
@@ -5717,6 +6209,22 @@
cy %r0, 524287(%r15,%r1)
cy %r15, 0
+#CHECK: d %r0, 0 # encoding: [0x5d,0x00,0x00,0x00]
+#CHECK: d %r0, 4095 # encoding: [0x5d,0x00,0x0f,0xff]
+#CHECK: d %r0, 0(%r1) # encoding: [0x5d,0x00,0x10,0x00]
+#CHECK: d %r0, 0(%r15) # encoding: [0x5d,0x00,0xf0,0x00]
+#CHECK: d %r0, 4095(%r1,%r15) # encoding: [0x5d,0x01,0xff,0xff]
+#CHECK: d %r0, 4095(%r15,%r1) # encoding: [0x5d,0x0f,0x1f,0xff]
+#CHECK: d %r14, 0 # encoding: [0x5d,0xe0,0x00,0x00]
+
+ d %r0, 0
+ d %r0, 4095
+ d %r0, 0(%r1)
+ d %r0, 0(%r15)
+ d %r0, 4095(%r1,%r15)
+ d %r0, 4095(%r15,%r1)
+ d %r14, 0
+
#CHECK: ddb %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x1d]
#CHECK: ddb %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x1d]
#CHECK: ddb %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x1d]
@@ -5769,6 +6277,34 @@
debr %f7, %f8
debr %f15, %f0
+#CHECK: didbr %f0, %f0, %f0, 0 # encoding: [0xb3,0x5b,0x00,0x00]
+#CHECK: didbr %f0, %f0, %f0, 15 # encoding: [0xb3,0x5b,0x0f,0x00]
+#CHECK: didbr %f0, %f0, %f15, 0 # encoding: [0xb3,0x5b,0x00,0x0f]
+#CHECK: didbr %f0, %f15, %f0, 0 # encoding: [0xb3,0x5b,0xf0,0x00]
+#CHECK: didbr %f4, %f5, %f6, 7 # encoding: [0xb3,0x5b,0x57,0x46]
+#CHECK: didbr %f15, %f0, %f0, 0 # encoding: [0xb3,0x5b,0x00,0xf0]
+
+ didbr %f0, %f0, %f0, 0
+ didbr %f0, %f0, %f0, 15
+ didbr %f0, %f0, %f15, 0
+ didbr %f0, %f15, %f0, 0
+ didbr %f4, %f5, %f6, 7
+ didbr %f15, %f0, %f0, 0
+
+#CHECK: diebr %f0, %f0, %f0, 0 # encoding: [0xb3,0x53,0x00,0x00]
+#CHECK: diebr %f0, %f0, %f0, 15 # encoding: [0xb3,0x53,0x0f,0x00]
+#CHECK: diebr %f0, %f0, %f15, 0 # encoding: [0xb3,0x53,0x00,0x0f]
+#CHECK: diebr %f0, %f15, %f0, 0 # encoding: [0xb3,0x53,0xf0,0x00]
+#CHECK: diebr %f4, %f5, %f6, 7 # encoding: [0xb3,0x53,0x57,0x46]
+#CHECK: diebr %f15, %f0, %f0, 0 # encoding: [0xb3,0x53,0x00,0xf0]
+
+ diebr %f0, %f0, %f0, 0
+ diebr %f0, %f0, %f0, 15
+ diebr %f0, %f0, %f15, 0
+ diebr %f0, %f15, %f0, 0
+ diebr %f4, %f5, %f6, 7
+ diebr %f15, %f0, %f0, 0
+
#CHECK: dl %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x97]
#CHECK: dl %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x97]
#CHECK: dl %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x97]
@@ -5833,6 +6369,46 @@
dlr %r14,%r0
dlr %r6,%r9
+#CHECK: dp 0(1), 0(1) # encoding: [0xfd,0x00,0x00,0x00,0x00,0x00]
+#CHECK: dp 0(1), 0(1,%r1) # encoding: [0xfd,0x00,0x00,0x00,0x10,0x00]
+#CHECK: dp 0(1), 0(1,%r15) # encoding: [0xfd,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: dp 0(1), 4095(1) # encoding: [0xfd,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: dp 0(1), 4095(1,%r1) # encoding: [0xfd,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: dp 0(1), 4095(1,%r15) # encoding: [0xfd,0x00,0x00,0x00,0xff,0xff]
+#CHECK: dp 0(1,%r1), 0(1) # encoding: [0xfd,0x00,0x10,0x00,0x00,0x00]
+#CHECK: dp 0(1,%r15), 0(1) # encoding: [0xfd,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: dp 4095(1,%r1), 0(1) # encoding: [0xfd,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: dp 4095(1,%r15), 0(1) # encoding: [0xfd,0x00,0xff,0xff,0x00,0x00]
+#CHECK: dp 0(16,%r1), 0(1) # encoding: [0xfd,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: dp 0(16,%r15), 0(1) # encoding: [0xfd,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: dp 0(1), 0(16,%r1) # encoding: [0xfd,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: dp 0(1), 0(16,%r15) # encoding: [0xfd,0x0f,0x00,0x00,0xf0,0x00]
+
+ dp 0(1), 0(1)
+ dp 0(1), 0(1,%r1)
+ dp 0(1), 0(1,%r15)
+ dp 0(1), 4095(1)
+ dp 0(1), 4095(1,%r1)
+ dp 0(1), 4095(1,%r15)
+ dp 0(1,%r1), 0(1)
+ dp 0(1,%r15), 0(1)
+ dp 4095(1,%r1), 0(1)
+ dp 4095(1,%r15), 0(1)
+ dp 0(16,%r1), 0(1)
+ dp 0(16,%r15), 0(1)
+ dp 0(1), 0(16,%r1)
+ dp 0(1), 0(16,%r15)
+
+#CHECK: dr %r0, %r0 # encoding: [0x1d,0x00]
+#CHECK: dr %r0, %r15 # encoding: [0x1d,0x0f]
+#CHECK: dr %r14, %r0 # encoding: [0x1d,0xe0]
+#CHECK: dr %r6, %r9 # encoding: [0x1d,0x69]
+
+ dr %r0,%r0
+ dr %r0,%r15
+ dr %r14,%r0
+ dr %r6,%r9
+
#CHECK: dsg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x0d]
#CHECK: dsg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x0d]
#CHECK: dsg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x0d]
@@ -5919,6 +6495,34 @@
ear %r7, %a8
ear %r15, %a15
+#CHECK: ecag %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x4c]
+#CHECK: ecag %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x4c]
+#CHECK: ecag %r14, %r15, 0 # encoding: [0xeb,0xef,0x00,0x00,0x00,0x4c]
+#CHECK: ecag %r15, %r15, 0 # encoding: [0xeb,0xff,0x00,0x00,0x00,0x4c]
+#CHECK: ecag %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x4c]
+#CHECK: ecag %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x4c]
+#CHECK: ecag %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x4c]
+#CHECK: ecag %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0x4c]
+#CHECK: ecag %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0x4c]
+#CHECK: ecag %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0x4c]
+#CHECK: ecag %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0x4c]
+#CHECK: ecag %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x4c]
+#CHECK: ecag %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0x4c]
+
+ ecag %r0,%r0,0
+ ecag %r0,%r15,0
+ ecag %r14,%r15,0
+ ecag %r15,%r15,0
+ ecag %r0,%r0,-524288
+ ecag %r0,%r0,-1
+ ecag %r0,%r0,0
+ ecag %r0,%r0,1
+ ecag %r0,%r0,524287
+ ecag %r0,%r0,0(%r1)
+ ecag %r0,%r0,0(%r15)
+ ecag %r0,%r0,524287(%r1)
+ ecag %r0,%r0,524287(%r15)
+
#CHECK: ectg 0, 0, %r0 # encoding: [0xc8,0x01,0x00,0x00,0x00,0x00]
#CHECK: ectg 0(%r1), 0(%r15), %r2 # encoding: [0xc8,0x21,0x10,0x00,0xf0,0x00]
#CHECK: ectg 1(%r1), 0(%r15), %r2 # encoding: [0xc8,0x21,0x10,0x01,0xf0,0x00]
@@ -5933,6 +6537,58 @@
ectg 0(%r1),1(%r15),%r2
ectg 0(%r1),4095(%r15),%r2
+#CHECK: ed 0(1), 0 # encoding: [0xde,0x00,0x00,0x00,0x00,0x00]
+#CHECK: ed 0(1), 0(%r1) # encoding: [0xde,0x00,0x00,0x00,0x10,0x00]
+#CHECK: ed 0(1), 0(%r15) # encoding: [0xde,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: ed 0(1), 4095 # encoding: [0xde,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: ed 0(1), 4095(%r1) # encoding: [0xde,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: ed 0(1), 4095(%r15) # encoding: [0xde,0x00,0x00,0x00,0xff,0xff]
+#CHECK: ed 0(1,%r1), 0 # encoding: [0xde,0x00,0x10,0x00,0x00,0x00]
+#CHECK: ed 0(1,%r15), 0 # encoding: [0xde,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: ed 4095(1,%r1), 0 # encoding: [0xde,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: ed 4095(1,%r15), 0 # encoding: [0xde,0x00,0xff,0xff,0x00,0x00]
+#CHECK: ed 0(256,%r1), 0 # encoding: [0xde,0xff,0x10,0x00,0x00,0x00]
+#CHECK: ed 0(256,%r15), 0 # encoding: [0xde,0xff,0xf0,0x00,0x00,0x00]
+
+ ed 0(1), 0
+ ed 0(1), 0(%r1)
+ ed 0(1), 0(%r15)
+ ed 0(1), 4095
+ ed 0(1), 4095(%r1)
+ ed 0(1), 4095(%r15)
+ ed 0(1,%r1), 0
+ ed 0(1,%r15), 0
+ ed 4095(1,%r1), 0
+ ed 4095(1,%r15), 0
+ ed 0(256,%r1), 0
+ ed 0(256,%r15), 0
+
+#CHECK: edmk 0(1), 0 # encoding: [0xdf,0x00,0x00,0x00,0x00,0x00]
+#CHECK: edmk 0(1), 0(%r1) # encoding: [0xdf,0x00,0x00,0x00,0x10,0x00]
+#CHECK: edmk 0(1), 0(%r15) # encoding: [0xdf,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: edmk 0(1), 4095 # encoding: [0xdf,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: edmk 0(1), 4095(%r1) # encoding: [0xdf,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: edmk 0(1), 4095(%r15) # encoding: [0xdf,0x00,0x00,0x00,0xff,0xff]
+#CHECK: edmk 0(1,%r1), 0 # encoding: [0xdf,0x00,0x10,0x00,0x00,0x00]
+#CHECK: edmk 0(1,%r15), 0 # encoding: [0xdf,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: edmk 4095(1,%r1), 0 # encoding: [0xdf,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: edmk 4095(1,%r15), 0 # encoding: [0xdf,0x00,0xff,0xff,0x00,0x00]
+#CHECK: edmk 0(256,%r1), 0 # encoding: [0xdf,0xff,0x10,0x00,0x00,0x00]
+#CHECK: edmk 0(256,%r15), 0 # encoding: [0xdf,0xff,0xf0,0x00,0x00,0x00]
+
+ edmk 0(1), 0
+ edmk 0(1), 0(%r1)
+ edmk 0(1), 0(%r15)
+ edmk 0(1), 4095
+ edmk 0(1), 4095(%r1)
+ edmk 0(1), 4095(%r15)
+ edmk 0(1,%r1), 0
+ edmk 0(1,%r15), 0
+ edmk 4095(1,%r1), 0
+ edmk 4095(1,%r15), 0
+ edmk 0(256,%r1), 0
+ edmk 0(256,%r15), 0
+
#CHECK: efpc %r0 # encoding: [0xb3,0x8c,0x00,0x00]
#CHECK: efpc %r1 # encoding: [0xb3,0x8c,0x00,0x10]
#CHECK: efpc %r15 # encoding: [0xb3,0x8c,0x00,0xf0]
@@ -5941,6 +6597,16 @@
efpc %r1
efpc %r15
+#CHECK: epsw %r0, %r8 # encoding: [0xb9,0x8d,0x00,0x08]
+#CHECK: epsw %r0, %r15 # encoding: [0xb9,0x8d,0x00,0x0f]
+#CHECK: epsw %r15, %r0 # encoding: [0xb9,0x8d,0x00,0xf0]
+#CHECK: epsw %r15, %r8 # encoding: [0xb9,0x8d,0x00,0xf8]
+
+ epsw %r0, %r8
+ epsw %r0, %r15
+ epsw %r15, %r0
+ epsw %r15, %r8
+
#CHECK: ex %r0, 0 # encoding: [0x44,0x00,0x00,0x00]
#CHECK: ex %r0, 4095 # encoding: [0x44,0x00,0x0f,0xff]
#CHECK: ex %r0, 0(%r1) # encoding: [0x44,0x00,0x10,0x00]
@@ -6202,6 +6868,118 @@
ipm %r1
ipm %r15
+#CHECK: kdb %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x18]
+#CHECK: kdb %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x18]
+#CHECK: kdb %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x18]
+#CHECK: kdb %f0, 0(%r15) # encoding: [0xed,0x00,0xf0,0x00,0x00,0x18]
+#CHECK: kdb %f0, 4095(%r1,%r15) # encoding: [0xed,0x01,0xff,0xff,0x00,0x18]
+#CHECK: kdb %f0, 4095(%r15,%r1) # encoding: [0xed,0x0f,0x1f,0xff,0x00,0x18]
+#CHECK: kdb %f15, 0 # encoding: [0xed,0xf0,0x00,0x00,0x00,0x18]
+
+ kdb %f0, 0
+ kdb %f0, 4095
+ kdb %f0, 0(%r1)
+ kdb %f0, 0(%r15)
+ kdb %f0, 4095(%r1,%r15)
+ kdb %f0, 4095(%r15,%r1)
+ kdb %f15, 0
+
+#CHECK: kdbr %f0, %f0 # encoding: [0xb3,0x18,0x00,0x00]
+#CHECK: kdbr %f0, %f15 # encoding: [0xb3,0x18,0x00,0x0f]
+#CHECK: kdbr %f7, %f8 # encoding: [0xb3,0x18,0x00,0x78]
+#CHECK: kdbr %f15, %f0 # encoding: [0xb3,0x18,0x00,0xf0]
+
+ kdbr %f0, %f0
+ kdbr %f0, %f15
+ kdbr %f7, %f8
+ kdbr %f15, %f0
+
+#CHECK: keb %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x08]
+#CHECK: keb %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x08]
+#CHECK: keb %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x08]
+#CHECK: keb %f0, 0(%r15) # encoding: [0xed,0x00,0xf0,0x00,0x00,0x08]
+#CHECK: keb %f0, 4095(%r1,%r15) # encoding: [0xed,0x01,0xff,0xff,0x00,0x08]
+#CHECK: keb %f0, 4095(%r15,%r1) # encoding: [0xed,0x0f,0x1f,0xff,0x00,0x08]
+#CHECK: keb %f15, 0 # encoding: [0xed,0xf0,0x00,0x00,0x00,0x08]
+
+ keb %f0, 0
+ keb %f0, 4095
+ keb %f0, 0(%r1)
+ keb %f0, 0(%r15)
+ keb %f0, 4095(%r1,%r15)
+ keb %f0, 4095(%r15,%r1)
+ keb %f15, 0
+
+#CHECK: kebr %f0, %f0 # encoding: [0xb3,0x08,0x00,0x00]
+#CHECK: kebr %f0, %f15 # encoding: [0xb3,0x08,0x00,0x0f]
+#CHECK: kebr %f7, %f8 # encoding: [0xb3,0x08,0x00,0x78]
+#CHECK: kebr %f15, %f0 # encoding: [0xb3,0x08,0x00,0xf0]
+
+ kebr %f0, %f0
+ kebr %f0, %f15
+ kebr %f7, %f8
+ kebr %f15, %f0
+
+#CHECK: kimd %r0, %r2 # encoding: [0xb9,0x3e,0x00,0x02]
+#CHECK: kimd %r0, %r14 # encoding: [0xb9,0x3e,0x00,0x0e]
+#CHECK: kimd %r15, %r2 # encoding: [0xb9,0x3e,0x00,0xf2]
+#CHECK: kimd %r7, %r10 # encoding: [0xb9,0x3e,0x00,0x7a]
+
+ kimd %r0, %r2
+ kimd %r0, %r14
+ kimd %r15, %r2
+ kimd %r7, %r10
+
+#CHECK: klmd %r0, %r2 # encoding: [0xb9,0x3f,0x00,0x02]
+#CHECK: klmd %r0, %r14 # encoding: [0xb9,0x3f,0x00,0x0e]
+#CHECK: klmd %r15, %r2 # encoding: [0xb9,0x3f,0x00,0xf2]
+#CHECK: klmd %r7, %r10 # encoding: [0xb9,0x3f,0x00,0x7a]
+
+ klmd %r0, %r2
+ klmd %r0, %r14
+ klmd %r15, %r2
+ klmd %r7, %r10
+
+#CHECK: km %r2, %r2 # encoding: [0xb9,0x2e,0x00,0x22]
+#CHECK: km %r2, %r14 # encoding: [0xb9,0x2e,0x00,0x2e]
+#CHECK: km %r14, %r2 # encoding: [0xb9,0x2e,0x00,0xe2]
+#CHECK: km %r6, %r10 # encoding: [0xb9,0x2e,0x00,0x6a]
+
+ km %r2, %r2
+ km %r2, %r14
+ km %r14, %r2
+ km %r6, %r10
+
+#CHECK: kmac %r0, %r2 # encoding: [0xb9,0x1e,0x00,0x02]
+#CHECK: kmac %r0, %r14 # encoding: [0xb9,0x1e,0x00,0x0e]
+#CHECK: kmac %r15, %r2 # encoding: [0xb9,0x1e,0x00,0xf2]
+#CHECK: kmac %r7, %r10 # encoding: [0xb9,0x1e,0x00,0x7a]
+
+ kmac %r0, %r2
+ kmac %r0, %r14
+ kmac %r15, %r2
+ kmac %r7, %r10
+
+#CHECK: kmc %r2, %r2 # encoding: [0xb9,0x2f,0x00,0x22]
+#CHECK: kmc %r2, %r14 # encoding: [0xb9,0x2f,0x00,0x2e]
+#CHECK: kmc %r14, %r2 # encoding: [0xb9,0x2f,0x00,0xe2]
+#CHECK: kmc %r6, %r10 # encoding: [0xb9,0x2f,0x00,0x6a]
+
+ kmc %r2, %r2
+ kmc %r2, %r14
+ kmc %r14, %r2
+ kmc %r6, %r10
+
+#CHECK: kxbr %f0, %f0 # encoding: [0xb3,0x48,0x00,0x00]
+#CHECK: kxbr %f0, %f13 # encoding: [0xb3,0x48,0x00,0x0d]
+#CHECK: kxbr %f8, %f8 # encoding: [0xb3,0x48,0x00,0x88]
+#CHECK: kxbr %f13, %f0 # encoding: [0xb3,0x48,0x00,0xd0]
+
+ kxbr %f0, %f0
+ kxbr %f0, %f13
+ kxbr %f8, %f8
+ kxbr %f13, %f0
+
#CHECK: l %r0, 0 # encoding: [0x58,0x00,0x00,0x00]
#CHECK: l %r0, 4095 # encoding: [0x58,0x00,0x0f,0xff]
#CHECK: l %r0, 0(%r1) # encoding: [0x58,0x00,0x10,0x00]
@@ -7101,36 +7879,6 @@
llgcr %r7, %r8
llgcr %r15, %r0
-#CHECK: llgt %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x17]
-#CHECK: llgt %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x17]
-#CHECK: llgt %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x17]
-#CHECK: llgt %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x17]
-#CHECK: llgt %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x17]
-#CHECK: llgt %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x17]
-#CHECK: llgt %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x17]
-#CHECK: llgt %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x17]
-#CHECK: llgt %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x17]
-#CHECK: llgt %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x17]
-
- llgt %r0, -524288
- llgt %r0, -1
- llgt %r0, 0
- llgt %r0, 1
- llgt %r0, 524287
- llgt %r0, 0(%r1)
- llgt %r0, 0(%r15)
- llgt %r0, 524287(%r1,%r15)
- llgt %r0, 524287(%r15,%r1)
- llgt %r15, 0
-
-#CHECK: llgtr %r0, %r15 # encoding: [0xb9,0x17,0x00,0x0f]
-#CHECK: llgtr %r7, %r8 # encoding: [0xb9,0x17,0x00,0x78]
-#CHECK: llgtr %r15, %r0 # encoding: [0xb9,0x17,0x00,0xf0]
-
- llgtr %r0, %r15
- llgtr %r7, %r8
- llgtr %r15, %r0
-
#CHECK: llgf %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x16]
#CHECK: llgf %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x16]
#CHECK: llgf %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x16]
@@ -7221,7 +7969,6 @@
llgh %r0, 524287(%r15,%r1)
llgh %r15, 0
-
#CHECK: llghr %r0, %r15 # encoding: [0xb9,0x85,0x00,0x0f]
#CHECK: llghr %r7, %r8 # encoding: [0xb9,0x85,0x00,0x78]
#CHECK: llghr %r15, %r0 # encoding: [0xb9,0x85,0x00,0xf0]
@@ -7267,6 +8014,36 @@
llghrl %r7,frob@PLT
llghrl %r8,frob@PLT
+#CHECK: llgt %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x17]
+#CHECK: llgt %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x17]
+#CHECK: llgt %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x17]
+#CHECK: llgt %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x17]
+#CHECK: llgt %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x17]
+#CHECK: llgt %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x17]
+#CHECK: llgt %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x17]
+#CHECK: llgt %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x17]
+#CHECK: llgt %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x17]
+#CHECK: llgt %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x17]
+
+ llgt %r0, -524288
+ llgt %r0, -1
+ llgt %r0, 0
+ llgt %r0, 1
+ llgt %r0, 524287
+ llgt %r0, 0(%r1)
+ llgt %r0, 0(%r15)
+ llgt %r0, 524287(%r1,%r15)
+ llgt %r0, 524287(%r15,%r1)
+ llgt %r15, 0
+
+#CHECK: llgtr %r0, %r15 # encoding: [0xb9,0x17,0x00,0x0f]
+#CHECK: llgtr %r7, %r8 # encoding: [0xb9,0x17,0x00,0x78]
+#CHECK: llgtr %r15, %r0 # encoding: [0xb9,0x17,0x00,0xf0]
+
+ llgtr %r0, %r15
+ llgtr %r7, %r8
+ llgtr %r15, %r0
+
#CHECK: llh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x95]
#CHECK: llh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x95]
#CHECK: llh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x95]
@@ -7412,6 +8189,26 @@
lm %r0,%r0,4095(%r1)
lm %r0,%r0,4095(%r15)
+#CHECK: lmd %r0, %r0, 0, 0 # encoding: [0xef,0x00,0x00,0x00,0x00,0x00]
+#CHECK: lmd %r0, %r15, 0, 0 # encoding: [0xef,0x0f,0x00,0x00,0x00,0x00]
+#CHECK: lmd %r14, %r15, 0, 0 # encoding: [0xef,0xef,0x00,0x00,0x00,0x00]
+#CHECK: lmd %r15, %r15, 0, 0 # encoding: [0xef,0xff,0x00,0x00,0x00,0x00]
+#CHECK: lmd %r2, %r4, 0(%r1), 0(%r15) # encoding: [0xef,0x24,0x10,0x00,0xf0,0x00]
+#CHECK: lmd %r2, %r4, 1(%r1), 0(%r15) # encoding: [0xef,0x24,0x10,0x01,0xf0,0x00]
+#CHECK: lmd %r2, %r4, 4095(%r1), 0(%r15) # encoding: [0xef,0x24,0x1f,0xff,0xf0,0x00]
+#CHECK: lmd %r2, %r4, 0(%r1), 1(%r15) # encoding: [0xef,0x24,0x10,0x00,0xf0,0x01]
+#CHECK: lmd %r2, %r4, 0(%r1), 4095(%r15) # encoding: [0xef,0x24,0x10,0x00,0xff,0xff]
+
+ lmd %r0, %r0, 0, 0
+ lmd %r0, %r15, 0, 0
+ lmd %r14, %r15, 0, 0
+ lmd %r15, %r15, 0, 0
+ lmd %r2, %r4, 0(%r1), 0(%r15)
+ lmd %r2, %r4, 1(%r1), 0(%r15)
+ lmd %r2, %r4, 4095(%r1), 0(%r15)
+ lmd %r2, %r4, 0(%r1), 1(%r15)
+ lmd %r2, %r4, 0(%r1), 4095(%r15)
+
#CHECK: lmg %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x04]
#CHECK: lmg %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x04]
#CHECK: lmg %r14, %r15, 0 # encoding: [0xeb,0xef,0x00,0x00,0x00,0x04]
@@ -7685,28 +8482,6 @@
lrl %r7,frob@PLT
lrl %r8,frob@PLT
-#CHECK: lrvh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x1f]
-#CHECK: lrvh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x1f]
-#CHECK: lrvh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x1f]
-#CHECK: lrvh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x1f]
-#CHECK: lrvh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x1f]
-#CHECK: lrvh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x1f]
-#CHECK: lrvh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x1f]
-#CHECK: lrvh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x1f]
-#CHECK: lrvh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x1f]
-#CHECK: lrvh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x1f]
-
- lrvh %r0,-524288
- lrvh %r0,-1
- lrvh %r0,0
- lrvh %r0,1
- lrvh %r0,524287
- lrvh %r0,0(%r1)
- lrvh %r0,0(%r15)
- lrvh %r0,524287(%r1,%r15)
- lrvh %r0,524287(%r15,%r1)
- lrvh %r15,0
-
#CHECK: lrv %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x1e]
#CHECK: lrv %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x1e]
#CHECK: lrv %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x1e]
@@ -7763,6 +8538,28 @@
lrvgr %r7,%r8
lrvgr %r15,%r15
+#CHECK: lrvh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x1f]
+#CHECK: lrvh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x1f]
+#CHECK: lrvh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x1f]
+#CHECK: lrvh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x1f]
+#CHECK: lrvh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x1f]
+#CHECK: lrvh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x1f]
+#CHECK: lrvh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x1f]
+#CHECK: lrvh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x1f]
+#CHECK: lrvh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x1f]
+#CHECK: lrvh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x1f]
+
+ lrvh %r0,-524288
+ lrvh %r0,-1
+ lrvh %r0,0
+ lrvh %r0,1
+ lrvh %r0,524287
+ lrvh %r0,0(%r1)
+ lrvh %r0,0(%r15)
+ lrvh %r0,524287(%r1,%r15)
+ lrvh %r0,524287(%r15,%r1)
+ lrvh %r15,0
+
#CHECK: lrvr %r0, %r0 # encoding: [0xb9,0x1f,0x00,0x00]
#CHECK: lrvr %r0, %r15 # encoding: [0xb9,0x1f,0x00,0x0f]
#CHECK: lrvr %r15, %r0 # encoding: [0xb9,0x1f,0x00,0xf0]
@@ -7797,6 +8594,26 @@
lt %r0, 524287(%r15,%r1)
lt %r15, 0
+#CHECK: ltdbr %f0, %f9 # encoding: [0xb3,0x12,0x00,0x09]
+#CHECK: ltdbr %f0, %f15 # encoding: [0xb3,0x12,0x00,0x0f]
+#CHECK: ltdbr %f15, %f0 # encoding: [0xb3,0x12,0x00,0xf0]
+#CHECK: ltdbr %f15, %f9 # encoding: [0xb3,0x12,0x00,0xf9]
+
+ ltdbr %f0,%f9
+ ltdbr %f0,%f15
+ ltdbr %f15,%f0
+ ltdbr %f15,%f9
+
+#CHECK: ltebr %f0, %f9 # encoding: [0xb3,0x02,0x00,0x09]
+#CHECK: ltebr %f0, %f15 # encoding: [0xb3,0x02,0x00,0x0f]
+#CHECK: ltebr %f15, %f0 # encoding: [0xb3,0x02,0x00,0xf0]
+#CHECK: ltebr %f15, %f9 # encoding: [0xb3,0x02,0x00,0xf9]
+
+ ltebr %f0,%f9
+ ltebr %f0,%f15
+ ltebr %f15,%f0
+ ltebr %f15,%f9
+
#CHECK: ltg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x02]
#CHECK: ltg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x02]
#CHECK: ltg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x02]
@@ -7841,26 +8658,6 @@
ltgf %r0, 524287(%r15,%r1)
ltgf %r15, 0
-#CHECK: ltdbr %f0, %f9 # encoding: [0xb3,0x12,0x00,0x09]
-#CHECK: ltdbr %f0, %f15 # encoding: [0xb3,0x12,0x00,0x0f]
-#CHECK: ltdbr %f15, %f0 # encoding: [0xb3,0x12,0x00,0xf0]
-#CHECK: ltdbr %f15, %f9 # encoding: [0xb3,0x12,0x00,0xf9]
-
- ltdbr %f0,%f9
- ltdbr %f0,%f15
- ltdbr %f15,%f0
- ltdbr %f15,%f9
-
-#CHECK: ltebr %f0, %f9 # encoding: [0xb3,0x02,0x00,0x09]
-#CHECK: ltebr %f0, %f15 # encoding: [0xb3,0x02,0x00,0x0f]
-#CHECK: ltebr %f15, %f0 # encoding: [0xb3,0x02,0x00,0xf0]
-#CHECK: ltebr %f15, %f9 # encoding: [0xb3,0x02,0x00,0xf9]
-
- ltebr %f0,%f9
- ltebr %f0,%f15
- ltebr %f15,%f0
- ltebr %f15,%f9
-
#CHECK: ltgfr %r0, %r9 # encoding: [0xb9,0x12,0x00,0x09]
#CHECK: ltgfr %r0, %r15 # encoding: [0xb9,0x12,0x00,0x0f]
#CHECK: ltgfr %r15, %r0 # encoding: [0xb9,0x12,0x00,0xf0]
@@ -7957,6 +8754,22 @@
lzxr %f8
lzxr %f13
+#CHECK: m %r0, 0 # encoding: [0x5c,0x00,0x00,0x00]
+#CHECK: m %r0, 4095 # encoding: [0x5c,0x00,0x0f,0xff]
+#CHECK: m %r0, 0(%r1) # encoding: [0x5c,0x00,0x10,0x00]
+#CHECK: m %r0, 0(%r15) # encoding: [0x5c,0x00,0xf0,0x00]
+#CHECK: m %r0, 4095(%r1,%r15) # encoding: [0x5c,0x01,0xff,0xff]
+#CHECK: m %r0, 4095(%r15,%r1) # encoding: [0x5c,0x0f,0x1f,0xff]
+#CHECK: m %r14, 0 # encoding: [0x5c,0xe0,0x00,0x00]
+
+ m %r0, 0
+ m %r0, 4095
+ m %r0, 0(%r1)
+ m %r0, 0(%r15)
+ m %r0, 4095(%r1,%r15)
+ m %r0, 4095(%r15,%r1)
+ m %r14, 0
+
#CHECK: madb %f0, %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x1e]
#CHECK: madb %f0, %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x1e]
#CHECK: madb %f0, %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x1e]
@@ -8025,6 +8838,22 @@
maebr %f7, %f8, %f9
maebr %f15, %f15, %f15
+#CHECK: mc 0, 0 # encoding: [0xaf,0x00,0x00,0x00]
+#CHECK: mc 4095, 0 # encoding: [0xaf,0x00,0x0f,0xff]
+#CHECK: mc 0, 255 # encoding: [0xaf,0xff,0x00,0x00]
+#CHECK: mc 0(%r1), 42 # encoding: [0xaf,0x2a,0x10,0x00]
+#CHECK: mc 0(%r15), 42 # encoding: [0xaf,0x2a,0xf0,0x00]
+#CHECK: mc 4095(%r1), 42 # encoding: [0xaf,0x2a,0x1f,0xff]
+#CHECK: mc 4095(%r15), 42 # encoding: [0xaf,0x2a,0xff,0xff]
+
+ mc 0, 0
+ mc 4095, 0
+ mc 0, 255
+ mc 0(%r1), 42
+ mc 0(%r15), 42
+ mc 4095(%r1), 42
+ mc 4095(%r15), 42
+
#CHECK: mdb %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x1c]
#CHECK: mdb %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x1c]
#CHECK: mdb %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x1c]
@@ -8103,6 +8932,28 @@
meebr %f7, %f8
meebr %f15, %f0
+#CHECK: mfy %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x5c]
+#CHECK: mfy %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x5c]
+#CHECK: mfy %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x5c]
+#CHECK: mfy %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x5c]
+#CHECK: mfy %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x5c]
+#CHECK: mfy %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x5c]
+#CHECK: mfy %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x5c]
+#CHECK: mfy %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x5c]
+#CHECK: mfy %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x5c]
+#CHECK: mfy %r14, 0 # encoding: [0xe3,0xe0,0x00,0x00,0x00,0x5c]
+
+ mfy %r0, -524288
+ mfy %r0, -1
+ mfy %r0, 0
+ mfy %r0, 1
+ mfy %r0, 524287
+ mfy %r0, 0(%r1)
+ mfy %r0, 0(%r15)
+ mfy %r0, 524287(%r1,%r15)
+ mfy %r0, 524287(%r15,%r1)
+ mfy %r14, 0
+
#CHECK: mghi %r0, -32768 # encoding: [0xa7,0x0d,0x80,0x00]
#CHECK: mghi %r0, -1 # encoding: [0xa7,0x0d,0xff,0xff]
#CHECK: mghi %r0, 0 # encoding: [0xa7,0x0d,0x00,0x00]
@@ -8169,6 +9020,28 @@
mhy %r0, 524287(%r15,%r1)
mhy %r15, 0
+#CHECK: ml %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x96]
+#CHECK: ml %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x96]
+#CHECK: ml %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x96]
+#CHECK: ml %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x96]
+#CHECK: ml %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x96]
+#CHECK: ml %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x96]
+#CHECK: ml %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x96]
+#CHECK: ml %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x96]
+#CHECK: ml %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x96]
+#CHECK: ml %r14, 0 # encoding: [0xe3,0xe0,0x00,0x00,0x00,0x96]
+
+ ml %r0, -524288
+ ml %r0, -1
+ ml %r0, 0
+ ml %r0, 1
+ ml %r0, 524287
+ ml %r0, 0(%r1)
+ ml %r0, 0(%r15)
+ ml %r0, 524287(%r1,%r15)
+ ml %r0, 524287(%r15,%r1)
+ ml %r14, 0
+
#CHECK: mlg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x86]
#CHECK: mlg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x86]
#CHECK: mlg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x86]
@@ -8201,6 +9074,56 @@
mlgr %r14,%r0
mlgr %r6,%r9
+#CHECK: mlr %r0, %r0 # encoding: [0xb9,0x96,0x00,0x00]
+#CHECK: mlr %r0, %r15 # encoding: [0xb9,0x96,0x00,0x0f]
+#CHECK: mlr %r14, %r0 # encoding: [0xb9,0x96,0x00,0xe0]
+#CHECK: mlr %r6, %r9 # encoding: [0xb9,0x96,0x00,0x69]
+
+ mlr %r0,%r0
+ mlr %r0,%r15
+ mlr %r14,%r0
+ mlr %r6,%r9
+
+#CHECK: mp 0(1), 0(1) # encoding: [0xfc,0x00,0x00,0x00,0x00,0x00]
+#CHECK: mp 0(1), 0(1,%r1) # encoding: [0xfc,0x00,0x00,0x00,0x10,0x00]
+#CHECK: mp 0(1), 0(1,%r15) # encoding: [0xfc,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: mp 0(1), 4095(1) # encoding: [0xfc,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: mp 0(1), 4095(1,%r1) # encoding: [0xfc,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: mp 0(1), 4095(1,%r15) # encoding: [0xfc,0x00,0x00,0x00,0xff,0xff]
+#CHECK: mp 0(1,%r1), 0(1) # encoding: [0xfc,0x00,0x10,0x00,0x00,0x00]
+#CHECK: mp 0(1,%r15), 0(1) # encoding: [0xfc,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: mp 4095(1,%r1), 0(1) # encoding: [0xfc,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: mp 4095(1,%r15), 0(1) # encoding: [0xfc,0x00,0xff,0xff,0x00,0x00]
+#CHECK: mp 0(16,%r1), 0(1) # encoding: [0xfc,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: mp 0(16,%r15), 0(1) # encoding: [0xfc,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: mp 0(1), 0(16,%r1) # encoding: [0xfc,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: mp 0(1), 0(16,%r15) # encoding: [0xfc,0x0f,0x00,0x00,0xf0,0x00]
+
+ mp 0(1), 0(1)
+ mp 0(1), 0(1,%r1)
+ mp 0(1), 0(1,%r15)
+ mp 0(1), 4095(1)
+ mp 0(1), 4095(1,%r1)
+ mp 0(1), 4095(1,%r15)
+ mp 0(1,%r1), 0(1)
+ mp 0(1,%r15), 0(1)
+ mp 4095(1,%r1), 0(1)
+ mp 4095(1,%r15), 0(1)
+ mp 0(16,%r1), 0(1)
+ mp 0(16,%r15), 0(1)
+ mp 0(1), 0(16,%r1)
+ mp 0(1), 0(16,%r15)
+
+#CHECK: mr %r0, %r0 # encoding: [0x1c,0x00]
+#CHECK: mr %r0, %r15 # encoding: [0x1c,0x0f]
+#CHECK: mr %r14, %r0 # encoding: [0x1c,0xe0]
+#CHECK: mr %r6, %r9 # encoding: [0x1c,0x69]
+
+ mr %r0,%r0
+ mr %r0,%r15
+ mr %r14,%r0
+ mr %r6,%r9
+
#CHECK: ms %r0, 0 # encoding: [0x71,0x00,0x00,0x00]
#CHECK: ms %r0, 4095 # encoding: [0x71,0x00,0x0f,0xff]
#CHECK: ms %r0, 0(%r1) # encoding: [0x71,0x00,0x10,0x00]
@@ -8435,6 +9358,32 @@
mvc 0(256,%r1), 0
mvc 0(256,%r15), 0
+#CHECK: mvcin 0(1), 0 # encoding: [0xe8,0x00,0x00,0x00,0x00,0x00]
+#CHECK: mvcin 0(1), 0(%r1) # encoding: [0xe8,0x00,0x00,0x00,0x10,0x00]
+#CHECK: mvcin 0(1), 0(%r15) # encoding: [0xe8,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: mvcin 0(1), 4095 # encoding: [0xe8,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: mvcin 0(1), 4095(%r1) # encoding: [0xe8,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: mvcin 0(1), 4095(%r15) # encoding: [0xe8,0x00,0x00,0x00,0xff,0xff]
+#CHECK: mvcin 0(1,%r1), 0 # encoding: [0xe8,0x00,0x10,0x00,0x00,0x00]
+#CHECK: mvcin 0(1,%r15), 0 # encoding: [0xe8,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: mvcin 4095(1,%r1), 0 # encoding: [0xe8,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: mvcin 4095(1,%r15), 0 # encoding: [0xe8,0x00,0xff,0xff,0x00,0x00]
+#CHECK: mvcin 0(256,%r1), 0 # encoding: [0xe8,0xff,0x10,0x00,0x00,0x00]
+#CHECK: mvcin 0(256,%r15), 0 # encoding: [0xe8,0xff,0xf0,0x00,0x00,0x00]
+
+ mvcin 0(1), 0
+ mvcin 0(1), 0(%r1)
+ mvcin 0(1), 0(%r15)
+ mvcin 0(1), 4095
+ mvcin 0(1), 4095(%r1)
+ mvcin 0(1), 4095(%r15)
+ mvcin 0(1,%r1), 0
+ mvcin 0(1,%r15), 0
+ mvcin 4095(1,%r1), 0
+ mvcin 4095(1,%r15), 0
+ mvcin 0(256,%r1), 0
+ mvcin 0(256,%r15), 0
+
#CHECK: mvck 0(%r0), 0, %r3 # encoding: [0xd9,0x03,0x00,0x00,0x00,0x00]
#CHECK: mvck 0(%r1), 0, %r3 # encoding: [0xd9,0x13,0x00,0x00,0x00,0x00]
#CHECK: mvck 0(%r1), 0(%r1), %r3 # encoding: [0xd9,0x13,0x00,0x00,0x10,0x00]
@@ -8463,6 +9412,54 @@
mvck 0(%r2,%r1), 0, %r3
mvck 0(%r2,%r15), 0, %r3
+#CHECK: mvcl %r0, %r8 # encoding: [0x0e,0x08]
+#CHECK: mvcl %r0, %r14 # encoding: [0x0e,0x0e]
+#CHECK: mvcl %r14, %r0 # encoding: [0x0e,0xe0]
+#CHECK: mvcl %r14, %r8 # encoding: [0x0e,0xe8]
+
+ mvcl %r0, %r8
+ mvcl %r0, %r14
+ mvcl %r14, %r0
+ mvcl %r14, %r8
+
+#CHECK: mvcle %r0, %r0, 0 # encoding: [0xa8,0x00,0x00,0x00]
+#CHECK: mvcle %r0, %r14, 4095 # encoding: [0xa8,0x0e,0x0f,0xff]
+#CHECK: mvcle %r0, %r0, 0(%r1) # encoding: [0xa8,0x00,0x10,0x00]
+#CHECK: mvcle %r0, %r0, 0(%r15) # encoding: [0xa8,0x00,0xf0,0x00]
+#CHECK: mvcle %r14, %r14, 4095(%r1) # encoding: [0xa8,0xee,0x1f,0xff]
+#CHECK: mvcle %r0, %r0, 4095(%r15) # encoding: [0xa8,0x00,0xff,0xff]
+#CHECK: mvcle %r14, %r0, 0 # encoding: [0xa8,0xe0,0x00,0x00]
+
+ mvcle %r0, %r0, 0
+ mvcle %r0, %r14, 4095
+ mvcle %r0, %r0, 0(%r1)
+ mvcle %r0, %r0, 0(%r15)
+ mvcle %r14, %r14, 4095(%r1)
+ mvcle %r0, %r0, 4095(%r15)
+ mvcle %r14, %r0, 0
+
+#CHECK: mvclu %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x8e]
+#CHECK: mvclu %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x8e]
+#CHECK: mvclu %r0, %r14, 0 # encoding: [0xeb,0x0e,0x00,0x00,0x00,0x8e]
+#CHECK: mvclu %r0, %r14, 1 # encoding: [0xeb,0x0e,0x00,0x01,0x00,0x8e]
+#CHECK: mvclu %r0, %r8, 524287 # encoding: [0xeb,0x08,0x0f,0xff,0x7f,0x8e]
+#CHECK: mvclu %r0, %r8, 0(%r1) # encoding: [0xeb,0x08,0x10,0x00,0x00,0x8e]
+#CHECK: mvclu %r0, %r4, 0(%r15) # encoding: [0xeb,0x04,0xf0,0x00,0x00,0x8e]
+#CHECK: mvclu %r0, %r4, 524287(%r15) # encoding: [0xeb,0x04,0xff,0xff,0x7f,0x8e]
+#CHECK: mvclu %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x8e]
+#CHECK: mvclu %r14, %r0, 0 # encoding: [0xeb,0xe0,0x00,0x00,0x00,0x8e]
+
+ mvclu %r0, %r0, -524288
+ mvclu %r0, %r0, -1
+ mvclu %r0, %r14, 0
+ mvclu %r0, %r14, 1
+ mvclu %r0, %r8, 524287
+ mvclu %r0, %r8, 0(%r1)
+ mvclu %r0, %r4, 0(%r15)
+ mvclu %r0, %r4, 524287(%r15)
+ mvclu %r0, %r0, 524287(%r1)
+ mvclu %r14, %r0, 0
+
#CHECK: mvghi 0, 0 # encoding: [0xe5,0x48,0x00,0x00,0x00,0x00]
#CHECK: mvghi 4095, 0 # encoding: [0xe5,0x48,0x0f,0xff,0x00,0x00]
#CHECK: mvghi 0, -32768 # encoding: [0xe5,0x48,0x00,0x00,0x80,0x00]
@@ -8573,6 +9570,62 @@
mviy 524287(%r1), 42
mviy 524287(%r15), 42
+#CHECK: mvn 0(1), 0 # encoding: [0xd1,0x00,0x00,0x00,0x00,0x00]
+#CHECK: mvn 0(1), 0(%r1) # encoding: [0xd1,0x00,0x00,0x00,0x10,0x00]
+#CHECK: mvn 0(1), 0(%r15) # encoding: [0xd1,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: mvn 0(1), 4095 # encoding: [0xd1,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: mvn 0(1), 4095(%r1) # encoding: [0xd1,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: mvn 0(1), 4095(%r15) # encoding: [0xd1,0x00,0x00,0x00,0xff,0xff]
+#CHECK: mvn 0(1,%r1), 0 # encoding: [0xd1,0x00,0x10,0x00,0x00,0x00]
+#CHECK: mvn 0(1,%r15), 0 # encoding: [0xd1,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: mvn 4095(1,%r1), 0 # encoding: [0xd1,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: mvn 4095(1,%r15), 0 # encoding: [0xd1,0x00,0xff,0xff,0x00,0x00]
+#CHECK: mvn 0(256,%r1), 0 # encoding: [0xd1,0xff,0x10,0x00,0x00,0x00]
+#CHECK: mvn 0(256,%r15), 0 # encoding: [0xd1,0xff,0xf0,0x00,0x00,0x00]
+
+ mvn 0(1), 0
+ mvn 0(1), 0(%r1)
+ mvn 0(1), 0(%r15)
+ mvn 0(1), 4095
+ mvn 0(1), 4095(%r1)
+ mvn 0(1), 4095(%r15)
+ mvn 0(1,%r1), 0
+ mvn 0(1,%r15), 0
+ mvn 4095(1,%r1), 0
+ mvn 4095(1,%r15), 0
+ mvn 0(256,%r1), 0
+ mvn 0(256,%r15), 0
+
+#CHECK: mvo 0(1), 0(1) # encoding: [0xf1,0x00,0x00,0x00,0x00,0x00]
+#CHECK: mvo 0(1), 0(1,%r1) # encoding: [0xf1,0x00,0x00,0x00,0x10,0x00]
+#CHECK: mvo 0(1), 0(1,%r15) # encoding: [0xf1,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: mvo 0(1), 4095(1) # encoding: [0xf1,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: mvo 0(1), 4095(1,%r1) # encoding: [0xf1,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: mvo 0(1), 4095(1,%r15) # encoding: [0xf1,0x00,0x00,0x00,0xff,0xff]
+#CHECK: mvo 0(1,%r1), 0(1) # encoding: [0xf1,0x00,0x10,0x00,0x00,0x00]
+#CHECK: mvo 0(1,%r15), 0(1) # encoding: [0xf1,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: mvo 4095(1,%r1), 0(1) # encoding: [0xf1,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: mvo 4095(1,%r15), 0(1) # encoding: [0xf1,0x00,0xff,0xff,0x00,0x00]
+#CHECK: mvo 0(16,%r1), 0(1) # encoding: [0xf1,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: mvo 0(16,%r15), 0(1) # encoding: [0xf1,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: mvo 0(1), 0(16,%r1) # encoding: [0xf1,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: mvo 0(1), 0(16,%r15) # encoding: [0xf1,0x0f,0x00,0x00,0xf0,0x00]
+
+ mvo 0(1), 0(1)
+ mvo 0(1), 0(1,%r1)
+ mvo 0(1), 0(1,%r15)
+ mvo 0(1), 4095(1)
+ mvo 0(1), 4095(1,%r1)
+ mvo 0(1), 4095(1,%r15)
+ mvo 0(1,%r1), 0(1)
+ mvo 0(1,%r15), 0(1)
+ mvo 4095(1,%r1), 0(1)
+ mvo 4095(1,%r15), 0(1)
+ mvo 0(16,%r1), 0(1)
+ mvo 0(16,%r15), 0(1)
+ mvo 0(1), 0(16,%r1)
+ mvo 0(1), 0(16,%r15)
+
#CHECK: mvst %r0, %r0 # encoding: [0xb2,0x55,0x00,0x00]
#CHECK: mvst %r0, %r15 # encoding: [0xb2,0x55,0x00,0x0f]
#CHECK: mvst %r15, %r0 # encoding: [0xb2,0x55,0x00,0xf0]
@@ -8583,6 +9636,32 @@
mvst %r15,%r0
mvst %r7,%r8
+#CHECK: mvz 0(1), 0 # encoding: [0xd3,0x00,0x00,0x00,0x00,0x00]
+#CHECK: mvz 0(1), 0(%r1) # encoding: [0xd3,0x00,0x00,0x00,0x10,0x00]
+#CHECK: mvz 0(1), 0(%r15) # encoding: [0xd3,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: mvz 0(1), 4095 # encoding: [0xd3,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: mvz 0(1), 4095(%r1) # encoding: [0xd3,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: mvz 0(1), 4095(%r15) # encoding: [0xd3,0x00,0x00,0x00,0xff,0xff]
+#CHECK: mvz 0(1,%r1), 0 # encoding: [0xd3,0x00,0x10,0x00,0x00,0x00]
+#CHECK: mvz 0(1,%r15), 0 # encoding: [0xd3,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: mvz 4095(1,%r1), 0 # encoding: [0xd3,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: mvz 4095(1,%r15), 0 # encoding: [0xd3,0x00,0xff,0xff,0x00,0x00]
+#CHECK: mvz 0(256,%r1), 0 # encoding: [0xd3,0xff,0x10,0x00,0x00,0x00]
+#CHECK: mvz 0(256,%r15), 0 # encoding: [0xd3,0xff,0xf0,0x00,0x00,0x00]
+
+ mvz 0(1), 0
+ mvz 0(1), 0(%r1)
+ mvz 0(1), 0(%r15)
+ mvz 0(1), 4095
+ mvz 0(1), 4095(%r1)
+ mvz 0(1), 4095(%r15)
+ mvz 0(1,%r1), 0
+ mvz 0(1,%r15), 0
+ mvz 4095(1,%r1), 0
+ mvz 4095(1,%r15), 0
+ mvz 0(256,%r1), 0
+ mvz 0(256,%r15), 0
+
#CHECK: mxbr %f0, %f0 # encoding: [0xb3,0x4c,0x00,0x00]
#CHECK: mxbr %f0, %f13 # encoding: [0xb3,0x4c,0x00,0x0d]
#CHECK: mxbr %f8, %f5 # encoding: [0xb3,0x4c,0x00,0x85]
@@ -9025,6 +10104,36 @@
oy %r0, 524287(%r15,%r1)
oy %r15, 0
+#CHECK: pack 0(1), 0(1) # encoding: [0xf2,0x00,0x00,0x00,0x00,0x00]
+#CHECK: pack 0(1), 0(1,%r1) # encoding: [0xf2,0x00,0x00,0x00,0x10,0x00]
+#CHECK: pack 0(1), 0(1,%r15) # encoding: [0xf2,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: pack 0(1), 4095(1) # encoding: [0xf2,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: pack 0(1), 4095(1,%r1) # encoding: [0xf2,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: pack 0(1), 4095(1,%r15) # encoding: [0xf2,0x00,0x00,0x00,0xff,0xff]
+#CHECK: pack 0(1,%r1), 0(1) # encoding: [0xf2,0x00,0x10,0x00,0x00,0x00]
+#CHECK: pack 0(1,%r15), 0(1) # encoding: [0xf2,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: pack 4095(1,%r1), 0(1) # encoding: [0xf2,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: pack 4095(1,%r15), 0(1) # encoding: [0xf2,0x00,0xff,0xff,0x00,0x00]
+#CHECK: pack 0(16,%r1), 0(1) # encoding: [0xf2,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: pack 0(16,%r15), 0(1) # encoding: [0xf2,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: pack 0(1), 0(16,%r1) # encoding: [0xf2,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: pack 0(1), 0(16,%r15) # encoding: [0xf2,0x0f,0x00,0x00,0xf0,0x00]
+
+ pack 0(1), 0(1)
+ pack 0(1), 0(1,%r1)
+ pack 0(1), 0(1,%r15)
+ pack 0(1), 4095(1)
+ pack 0(1), 4095(1,%r1)
+ pack 0(1), 4095(1,%r15)
+ pack 0(1,%r1), 0(1)
+ pack 0(1,%r15), 0(1)
+ pack 4095(1,%r1), 0(1)
+ pack 4095(1,%r15), 0(1)
+ pack 0(16,%r1), 0(1)
+ pack 0(16,%r15), 0(1)
+ pack 0(1), 0(16,%r1)
+ pack 0(1), 0(16,%r15)
+
#CHECK: pfd 0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x36]
#CHECK: pfd 0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x36]
#CHECK: pfd 0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x36]
@@ -9084,6 +10193,58 @@
pfdrl 7, frob@PLT
pfdrl 8, frob@PLT
+#CHECK: pka 0, 0(1) # encoding: [0xe9,0x00,0x00,0x00,0x00,0x00]
+#CHECK: pka 0, 0(1,%r1) # encoding: [0xe9,0x00,0x00,0x00,0x10,0x00]
+#CHECK: pka 0, 0(1,%r15) # encoding: [0xe9,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: pka 0, 4095(1) # encoding: [0xe9,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: pka 0, 4095(1,%r1) # encoding: [0xe9,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: pka 0, 4095(1,%r15) # encoding: [0xe9,0x00,0x00,0x00,0xff,0xff]
+#CHECK: pka 0(%r1), 0(1) # encoding: [0xe9,0x00,0x10,0x00,0x00,0x00]
+#CHECK: pka 0(%r15), 0(1) # encoding: [0xe9,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: pka 4095(%r1), 0(1) # encoding: [0xe9,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: pka 4095(%r15), 0(1) # encoding: [0xe9,0x00,0xff,0xff,0x00,0x00]
+#CHECK: pka 0, 0(256,%r1) # encoding: [0xe9,0xff,0x00,0x00,0x10,0x00]
+#CHECK: pka 0, 0(256,%r15) # encoding: [0xe9,0xff,0x00,0x00,0xf0,0x00]
+
+ pka 0, 0(1)
+ pka 0, 0(1,%r1)
+ pka 0, 0(1,%r15)
+ pka 0, 4095(1)
+ pka 0, 4095(1,%r1)
+ pka 0, 4095(1,%r15)
+ pka 0(%r1), 0(1)
+ pka 0(%r15), 0(1)
+ pka 4095(%r1), 0(1)
+ pka 4095(%r15), 0(1)
+ pka 0, 0(256,%r1)
+ pka 0, 0(256,%r15)
+
+#CHECK: pku 0, 0(1) # encoding: [0xe1,0x00,0x00,0x00,0x00,0x00]
+#CHECK: pku 0, 0(1,%r1) # encoding: [0xe1,0x00,0x00,0x00,0x10,0x00]
+#CHECK: pku 0, 0(1,%r15) # encoding: [0xe1,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: pku 0, 4095(1) # encoding: [0xe1,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: pku 0, 4095(1,%r1) # encoding: [0xe1,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: pku 0, 4095(1,%r15) # encoding: [0xe1,0x00,0x00,0x00,0xff,0xff]
+#CHECK: pku 0(%r1), 0(1) # encoding: [0xe1,0x00,0x10,0x00,0x00,0x00]
+#CHECK: pku 0(%r15), 0(1) # encoding: [0xe1,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: pku 4095(%r1), 0(1) # encoding: [0xe1,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: pku 4095(%r15), 0(1) # encoding: [0xe1,0x00,0xff,0xff,0x00,0x00]
+#CHECK: pku 0, 0(256,%r1) # encoding: [0xe1,0xff,0x00,0x00,0x10,0x00]
+#CHECK: pku 0, 0(256,%r15) # encoding: [0xe1,0xff,0x00,0x00,0xf0,0x00]
+
+ pku 0, 0(1)
+ pku 0, 0(1,%r1)
+ pku 0, 0(1,%r15)
+ pku 0, 4095(1)
+ pku 0, 4095(1,%r1)
+ pku 0, 4095(1,%r15)
+ pku 0(%r1), 0(1)
+ pku 0(%r15), 0(1)
+ pku 4095(%r1), 0(1)
+ pku 4095(%r15), 0(1)
+ pku 0, 0(256,%r1)
+ pku 0, 0(256,%r15)
+
#CHECK: plo %r0, 0, %r0, 0 # encoding: [0xee,0x00,0x00,0x00,0x00,0x00]
#CHECK: plo %r2, 0(%r1), %r4, 0(%r15) # encoding: [0xee,0x24,0x10,0x00,0xf0,0x00]
#CHECK: plo %r2, 1(%r1), %r4, 0(%r15) # encoding: [0xee,0x24,0x10,0x01,0xf0,0x00]
@@ -9117,54 +10278,6 @@
risbg %r15,%r0,0,0,0
risbg %r4,%r5,6,7,8
-#CHECK: rnsbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x54]
-#CHECK: rnsbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x54]
-#CHECK: rnsbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x54]
-#CHECK: rnsbg %r0, %r0, 255, 0, 0 # encoding: [0xec,0x00,0xff,0x00,0x00,0x54]
-#CHECK: rnsbg %r0, %r15, 0, 0, 0 # encoding: [0xec,0x0f,0x00,0x00,0x00,0x54]
-#CHECK: rnsbg %r15, %r0, 0, 0, 0 # encoding: [0xec,0xf0,0x00,0x00,0x00,0x54]
-#CHECK: rnsbg %r4, %r5, 6, 7, 8 # encoding: [0xec,0x45,0x06,0x07,0x08,0x54]
-
- rnsbg %r0,%r0,0,0,0
- rnsbg %r0,%r0,0,0,63
- rnsbg %r0,%r0,0,255,0
- rnsbg %r0,%r0,255,0,0
- rnsbg %r0,%r15,0,0,0
- rnsbg %r15,%r0,0,0,0
- rnsbg %r4,%r5,6,7,8
-
-#CHECK: rosbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x56]
-#CHECK: rosbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x56]
-#CHECK: rosbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x56]
-#CHECK: rosbg %r0, %r0, 255, 0, 0 # encoding: [0xec,0x00,0xff,0x00,0x00,0x56]
-#CHECK: rosbg %r0, %r15, 0, 0, 0 # encoding: [0xec,0x0f,0x00,0x00,0x00,0x56]
-#CHECK: rosbg %r15, %r0, 0, 0, 0 # encoding: [0xec,0xf0,0x00,0x00,0x00,0x56]
-#CHECK: rosbg %r4, %r5, 6, 7, 8 # encoding: [0xec,0x45,0x06,0x07,0x08,0x56]
-
- rosbg %r0,%r0,0,0,0
- rosbg %r0,%r0,0,0,63
- rosbg %r0,%r0,0,255,0
- rosbg %r0,%r0,255,0,0
- rosbg %r0,%r15,0,0,0
- rosbg %r15,%r0,0,0,0
- rosbg %r4,%r5,6,7,8
-
-#CHECK: rxsbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x57]
-#CHECK: rxsbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x57]
-#CHECK: rxsbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x57]
-#CHECK: rxsbg %r0, %r0, 255, 0, 0 # encoding: [0xec,0x00,0xff,0x00,0x00,0x57]
-#CHECK: rxsbg %r0, %r15, 0, 0, 0 # encoding: [0xec,0x0f,0x00,0x00,0x00,0x57]
-#CHECK: rxsbg %r15, %r0, 0, 0, 0 # encoding: [0xec,0xf0,0x00,0x00,0x00,0x57]
-#CHECK: rxsbg %r4, %r5, 6, 7, 8 # encoding: [0xec,0x45,0x06,0x07,0x08,0x57]
-
- rxsbg %r0,%r0,0,0,0
- rxsbg %r0,%r0,0,0,63
- rxsbg %r0,%r0,0,255,0
- rxsbg %r0,%r0,255,0,0
- rxsbg %r0,%r15,0,0,0
- rxsbg %r15,%r0,0,0,0
- rxsbg %r4,%r5,6,7,8
-
#CHECK: rll %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x1d]
#CHECK: rll %r15, %r1, 0 # encoding: [0xeb,0xf1,0x00,0x00,0x00,0x1d]
#CHECK: rll %r1, %r15, 0 # encoding: [0xeb,0x1f,0x00,0x00,0x00,0x1d]
@@ -9217,6 +10330,54 @@
rllg %r0,%r0,524287(%r1)
rllg %r0,%r0,524287(%r15)
+#CHECK: rnsbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x54]
+#CHECK: rnsbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x54]
+#CHECK: rnsbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x54]
+#CHECK: rnsbg %r0, %r0, 255, 0, 0 # encoding: [0xec,0x00,0xff,0x00,0x00,0x54]
+#CHECK: rnsbg %r0, %r15, 0, 0, 0 # encoding: [0xec,0x0f,0x00,0x00,0x00,0x54]
+#CHECK: rnsbg %r15, %r0, 0, 0, 0 # encoding: [0xec,0xf0,0x00,0x00,0x00,0x54]
+#CHECK: rnsbg %r4, %r5, 6, 7, 8 # encoding: [0xec,0x45,0x06,0x07,0x08,0x54]
+
+ rnsbg %r0,%r0,0,0,0
+ rnsbg %r0,%r0,0,0,63
+ rnsbg %r0,%r0,0,255,0
+ rnsbg %r0,%r0,255,0,0
+ rnsbg %r0,%r15,0,0,0
+ rnsbg %r15,%r0,0,0,0
+ rnsbg %r4,%r5,6,7,8
+
+#CHECK: rosbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x56]
+#CHECK: rosbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x56]
+#CHECK: rosbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x56]
+#CHECK: rosbg %r0, %r0, 255, 0, 0 # encoding: [0xec,0x00,0xff,0x00,0x00,0x56]
+#CHECK: rosbg %r0, %r15, 0, 0, 0 # encoding: [0xec,0x0f,0x00,0x00,0x00,0x56]
+#CHECK: rosbg %r15, %r0, 0, 0, 0 # encoding: [0xec,0xf0,0x00,0x00,0x00,0x56]
+#CHECK: rosbg %r4, %r5, 6, 7, 8 # encoding: [0xec,0x45,0x06,0x07,0x08,0x56]
+
+ rosbg %r0,%r0,0,0,0
+ rosbg %r0,%r0,0,0,63
+ rosbg %r0,%r0,0,255,0
+ rosbg %r0,%r0,255,0,0
+ rosbg %r0,%r15,0,0,0
+ rosbg %r15,%r0,0,0,0
+ rosbg %r4,%r5,6,7,8
+
+#CHECK: rxsbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x57]
+#CHECK: rxsbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x57]
+#CHECK: rxsbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x57]
+#CHECK: rxsbg %r0, %r0, 255, 0, 0 # encoding: [0xec,0x00,0xff,0x00,0x00,0x57]
+#CHECK: rxsbg %r0, %r15, 0, 0, 0 # encoding: [0xec,0x0f,0x00,0x00,0x00,0x57]
+#CHECK: rxsbg %r15, %r0, 0, 0, 0 # encoding: [0xec,0xf0,0x00,0x00,0x00,0x57]
+#CHECK: rxsbg %r4, %r5, 6, 7, 8 # encoding: [0xec,0x45,0x06,0x07,0x08,0x57]
+
+ rxsbg %r0,%r0,0,0,0
+ rxsbg %r0,%r0,0,0,63
+ rxsbg %r0,%r0,0,255,0
+ rxsbg %r0,%r0,255,0,0
+ rxsbg %r0,%r15,0,0,0
+ rxsbg %r15,%r0,0,0,0
+ rxsbg %r4,%r5,6,7,8
+
#CHECK: s %r0, 0 # encoding: [0x5b,0x00,0x00,0x00]
#CHECK: s %r0, 4095 # encoding: [0x5b,0x00,0x0f,0xff]
#CHECK: s %r0, 0(%r1) # encoding: [0x5b,0x00,0x10,0x00]
@@ -9233,6 +10394,14 @@
s %r0, 4095(%r15,%r1)
s %r15, 0
+#CHECK: sam24 # encoding: [0x01,0x0c]
+#CHECK: sam31 # encoding: [0x01,0x0d]
+#CHECK: sam64 # encoding: [0x01,0x0e]
+
+ sam24
+ sam31
+ sam64
+
#CHECK: sar %a0, %r0 # encoding: [0xb2,0x4e,0x00,0x00]
#CHECK: sar %a0, %r15 # encoding: [0xb2,0x4e,0x00,0x0f]
#CHECK: sar %a15, %r0 # encoding: [0xb2,0x4e,0x00,0xf0]
@@ -9245,14 +10414,6 @@
sar %a7, %r8
sar %a15, %r15
-#CHECK: sam24 # encoding: [0x01,0x0c]
-#CHECK: sam31 # encoding: [0x01,0x0d]
-#CHECK: sam64 # encoding: [0x01,0x0e]
-
- sam24
- sam31
- sam64
-
#CHECK: sdb %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x1b]
#CHECK: sdb %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x1b]
#CHECK: sdb %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x1b]
@@ -9439,6 +10600,50 @@
sl %r0, 4095(%r15,%r1)
sl %r15, 0
+#CHECK: sla %r0, 0 # encoding: [0x8b,0x00,0x00,0x00]
+#CHECK: sla %r7, 0 # encoding: [0x8b,0x70,0x00,0x00]
+#CHECK: sla %r15, 0 # encoding: [0x8b,0xf0,0x00,0x00]
+#CHECK: sla %r0, 4095 # encoding: [0x8b,0x00,0x0f,0xff]
+#CHECK: sla %r0, 0(%r1) # encoding: [0x8b,0x00,0x10,0x00]
+#CHECK: sla %r0, 0(%r15) # encoding: [0x8b,0x00,0xf0,0x00]
+#CHECK: sla %r0, 4095(%r1) # encoding: [0x8b,0x00,0x1f,0xff]
+#CHECK: sla %r0, 4095(%r15) # encoding: [0x8b,0x00,0xff,0xff]
+
+ sla %r0,0
+ sla %r7,0
+ sla %r15,0
+ sla %r0,4095
+ sla %r0,0(%r1)
+ sla %r0,0(%r15)
+ sla %r0,4095(%r1)
+ sla %r0,4095(%r15)
+
+#CHECK: slag %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x0b]
+#CHECK: slag %r15, %r1, 0 # encoding: [0xeb,0xf1,0x00,0x00,0x00,0x0b]
+#CHECK: slag %r1, %r15, 0 # encoding: [0xeb,0x1f,0x00,0x00,0x00,0x0b]
+#CHECK: slag %r15, %r15, 0 # encoding: [0xeb,0xff,0x00,0x00,0x00,0x0b]
+#CHECK: slag %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x0b]
+#CHECK: slag %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x0b]
+#CHECK: slag %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0x0b]
+#CHECK: slag %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0x0b]
+#CHECK: slag %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0x0b]
+#CHECK: slag %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0x0b]
+#CHECK: slag %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x0b]
+#CHECK: slag %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0x0b]
+
+ slag %r0,%r0,0
+ slag %r15,%r1,0
+ slag %r1,%r15,0
+ slag %r15,%r15,0
+ slag %r0,%r0,-524288
+ slag %r0,%r0,-1
+ slag %r0,%r0,1
+ slag %r0,%r0,524287
+ slag %r0,%r0,0(%r1)
+ slag %r0,%r0,0(%r15)
+ slag %r0,%r0,524287(%r1)
+ slag %r0,%r0,524287(%r15)
+
#CHECK: slb %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x99]
#CHECK: slb %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x99]
#CHECK: slb %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x99]
@@ -9503,6 +10708,42 @@
slbr %r15,%r0
slbr %r7,%r8
+#CHECK: slda %r0, 0 # encoding: [0x8f,0x00,0x00,0x00]
+#CHECK: slda %r6, 0 # encoding: [0x8f,0x60,0x00,0x00]
+#CHECK: slda %r14, 0 # encoding: [0x8f,0xe0,0x00,0x00]
+#CHECK: slda %r0, 4095 # encoding: [0x8f,0x00,0x0f,0xff]
+#CHECK: slda %r0, 0(%r1) # encoding: [0x8f,0x00,0x10,0x00]
+#CHECK: slda %r0, 0(%r15) # encoding: [0x8f,0x00,0xf0,0x00]
+#CHECK: slda %r0, 4095(%r1) # encoding: [0x8f,0x00,0x1f,0xff]
+#CHECK: slda %r0, 4095(%r15) # encoding: [0x8f,0x00,0xff,0xff]
+
+ slda %r0,0
+ slda %r6,0
+ slda %r14,0
+ slda %r0,4095
+ slda %r0,0(%r1)
+ slda %r0,0(%r15)
+ slda %r0,4095(%r1)
+ slda %r0,4095(%r15)
+
+#CHECK: sldl %r0, 0 # encoding: [0x8d,0x00,0x00,0x00]
+#CHECK: sldl %r6, 0 # encoding: [0x8d,0x60,0x00,0x00]
+#CHECK: sldl %r14, 0 # encoding: [0x8d,0xe0,0x00,0x00]
+#CHECK: sldl %r0, 4095 # encoding: [0x8d,0x00,0x0f,0xff]
+#CHECK: sldl %r0, 0(%r1) # encoding: [0x8d,0x00,0x10,0x00]
+#CHECK: sldl %r0, 0(%r15) # encoding: [0x8d,0x00,0xf0,0x00]
+#CHECK: sldl %r0, 4095(%r1) # encoding: [0x8d,0x00,0x1f,0xff]
+#CHECK: sldl %r0, 4095(%r15) # encoding: [0x8d,0x00,0xff,0xff]
+
+ sldl %r0,0
+ sldl %r6,0
+ sldl %r14,0
+ sldl %r0,4095
+ sldl %r0,0(%r1)
+ sldl %r0,0(%r15)
+ sldl %r0,4095(%r1)
+ sldl %r0,4095(%r15)
+
#CHECK: slfi %r0, 0 # encoding: [0xc2,0x05,0x00,0x00,0x00,0x00]
#CHECK: slfi %r0, 4294967295 # encoding: [0xc2,0x05,0xff,0xff,0xff,0xff]
#CHECK: slfi %r15, 0 # encoding: [0xc2,0xf5,0x00,0x00,0x00,0x00]
@@ -9583,24 +10824,6 @@
slgr %r15,%r0
slgr %r7,%r8
-#CHECK: sla %r0, 0 # encoding: [0x8b,0x00,0x00,0x00]
-#CHECK: sla %r7, 0 # encoding: [0x8b,0x70,0x00,0x00]
-#CHECK: sla %r15, 0 # encoding: [0x8b,0xf0,0x00,0x00]
-#CHECK: sla %r0, 4095 # encoding: [0x8b,0x00,0x0f,0xff]
-#CHECK: sla %r0, 0(%r1) # encoding: [0x8b,0x00,0x10,0x00]
-#CHECK: sla %r0, 0(%r15) # encoding: [0x8b,0x00,0xf0,0x00]
-#CHECK: sla %r0, 4095(%r1) # encoding: [0x8b,0x00,0x1f,0xff]
-#CHECK: sla %r0, 4095(%r15) # encoding: [0x8b,0x00,0xff,0xff]
-
- sla %r0,0
- sla %r7,0
- sla %r15,0
- sla %r0,4095
- sla %r0,0(%r1)
- sla %r0,0(%r15)
- sla %r0,4095(%r1)
- sla %r0,4095(%r15)
-
#CHECK: sll %r0, 0 # encoding: [0x89,0x00,0x00,0x00]
#CHECK: sll %r7, 0 # encoding: [0x89,0x70,0x00,0x00]
#CHECK: sll %r15, 0 # encoding: [0x89,0xf0,0x00,0x00]
@@ -9677,6 +10900,36 @@
sly %r0, 524287(%r15,%r1)
sly %r15, 0
+#CHECK: sp 0(1), 0(1) # encoding: [0xfb,0x00,0x00,0x00,0x00,0x00]
+#CHECK: sp 0(1), 0(1,%r1) # encoding: [0xfb,0x00,0x00,0x00,0x10,0x00]
+#CHECK: sp 0(1), 0(1,%r15) # encoding: [0xfb,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: sp 0(1), 4095(1) # encoding: [0xfb,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: sp 0(1), 4095(1,%r1) # encoding: [0xfb,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: sp 0(1), 4095(1,%r15) # encoding: [0xfb,0x00,0x00,0x00,0xff,0xff]
+#CHECK: sp 0(1,%r1), 0(1) # encoding: [0xfb,0x00,0x10,0x00,0x00,0x00]
+#CHECK: sp 0(1,%r15), 0(1) # encoding: [0xfb,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: sp 4095(1,%r1), 0(1) # encoding: [0xfb,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: sp 4095(1,%r15), 0(1) # encoding: [0xfb,0x00,0xff,0xff,0x00,0x00]
+#CHECK: sp 0(16,%r1), 0(1) # encoding: [0xfb,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: sp 0(16,%r15), 0(1) # encoding: [0xfb,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: sp 0(1), 0(16,%r1) # encoding: [0xfb,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: sp 0(1), 0(16,%r15) # encoding: [0xfb,0x0f,0x00,0x00,0xf0,0x00]
+
+ sp 0(1), 0(1)
+ sp 0(1), 0(1,%r1)
+ sp 0(1), 0(1,%r15)
+ sp 0(1), 4095(1)
+ sp 0(1), 4095(1,%r1)
+ sp 0(1), 4095(1,%r15)
+ sp 0(1,%r1), 0(1)
+ sp 0(1,%r15), 0(1)
+ sp 4095(1,%r1), 0(1)
+ sp 4095(1,%r15), 0(1)
+ sp 0(16,%r1), 0(1)
+ sp 0(16,%r15), 0(1)
+ sp 0(1), 0(16,%r1)
+ sp 0(1), 0(16,%r15)
+
#CHECK: spm %r0 # encoding: [0x04,0x00]
#CHECK: spm %r1 # encoding: [0x04,0x10]
#CHECK: spm %r15 # encoding: [0x04,0xf0]
@@ -9801,6 +11054,42 @@
srag %r0,%r0,524287(%r1)
srag %r0,%r0,524287(%r15)
+#CHECK: srda %r0, 0 # encoding: [0x8e,0x00,0x00,0x00]
+#CHECK: srda %r6, 0 # encoding: [0x8e,0x60,0x00,0x00]
+#CHECK: srda %r14, 0 # encoding: [0x8e,0xe0,0x00,0x00]
+#CHECK: srda %r0, 4095 # encoding: [0x8e,0x00,0x0f,0xff]
+#CHECK: srda %r0, 0(%r1) # encoding: [0x8e,0x00,0x10,0x00]
+#CHECK: srda %r0, 0(%r15) # encoding: [0x8e,0x00,0xf0,0x00]
+#CHECK: srda %r0, 4095(%r1) # encoding: [0x8e,0x00,0x1f,0xff]
+#CHECK: srda %r0, 4095(%r15) # encoding: [0x8e,0x00,0xff,0xff]
+
+ srda %r0,0
+ srda %r6,0
+ srda %r14,0
+ srda %r0,4095
+ srda %r0,0(%r1)
+ srda %r0,0(%r15)
+ srda %r0,4095(%r1)
+ srda %r0,4095(%r15)
+
+#CHECK: srdl %r0, 0 # encoding: [0x8c,0x00,0x00,0x00]
+#CHECK: srdl %r6, 0 # encoding: [0x8c,0x60,0x00,0x00]
+#CHECK: srdl %r14, 0 # encoding: [0x8c,0xe0,0x00,0x00]
+#CHECK: srdl %r0, 4095 # encoding: [0x8c,0x00,0x0f,0xff]
+#CHECK: srdl %r0, 0(%r1) # encoding: [0x8c,0x00,0x10,0x00]
+#CHECK: srdl %r0, 0(%r15) # encoding: [0x8c,0x00,0xf0,0x00]
+#CHECK: srdl %r0, 4095(%r1) # encoding: [0x8c,0x00,0x1f,0xff]
+#CHECK: srdl %r0, 4095(%r15) # encoding: [0x8c,0x00,0xff,0xff]
+
+ srdl %r0,0
+ srdl %r6,0
+ srdl %r14,0
+ srdl %r0,4095
+ srdl %r0,0(%r1)
+ srdl %r0,0(%r15)
+ srdl %r0,4095(%r1)
+ srdl %r0,4095(%r15)
+
#CHECK: srl %r0, 0 # encoding: [0x88,0x00,0x00,0x00]
#CHECK: srl %r7, 0 # encoding: [0x88,0x70,0x00,0x00]
#CHECK: srl %r15, 0 # encoding: [0x88,0xf0,0x00,0x00]
@@ -9873,6 +11162,34 @@
srnmt 4095(%r1)
srnmt 4095(%r15)
+#CHECK: srp 0(1), 0, 0 # encoding: [0xf0,0x00,0x00,0x00,0x00,0x00]
+#CHECK: srp 0(1), 0, 15 # encoding: [0xf0,0x0f,0x00,0x00,0x00,0x00]
+#CHECK: srp 0(1), 0(%r1), 0 # encoding: [0xf0,0x00,0x00,0x00,0x10,0x00]
+#CHECK: srp 0(1), 0(%r15), 0 # encoding: [0xf0,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: srp 0(1), 4095, 0 # encoding: [0xf0,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: srp 0(1), 4095(%r1), 0 # encoding: [0xf0,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: srp 0(1), 4095(%r15), 0 # encoding: [0xf0,0x00,0x00,0x00,0xff,0xff]
+#CHECK: srp 0(1,%r1), 0, 0 # encoding: [0xf0,0x00,0x10,0x00,0x00,0x00]
+#CHECK: srp 0(1,%r15), 0, 0 # encoding: [0xf0,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: srp 4095(1,%r1), 0, 0 # encoding: [0xf0,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: srp 4095(1,%r15), 0, 0 # encoding: [0xf0,0x00,0xff,0xff,0x00,0x00]
+#CHECK: srp 0(16,%r1), 0, 0 # encoding: [0xf0,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: srp 0(16,%r15), 0, 0 # encoding: [0xf0,0xf0,0xf0,0x00,0x00,0x00]
+
+ srp 0(1), 0, 0
+ srp 0(1), 0, 15
+ srp 0(1), 0(%r1), 0
+ srp 0(1), 0(%r15), 0
+ srp 0(1), 4095, 0
+ srp 0(1), 4095(%r1), 0
+ srp 0(1), 4095(%r15), 0
+ srp 0(1,%r1), 0, 0
+ srp 0(1,%r15), 0, 0
+ srp 4095(1,%r1), 0, 0
+ srp 4095(1,%r15), 0, 0
+ srp 0(16,%r1), 0, 0
+ srp 0(16,%r15), 0, 0
+
#CHECK: srst %r0, %r0 # encoding: [0xb2,0x5e,0x00,0x00]
#CHECK: srst %r0, %r15 # encoding: [0xb2,0x5e,0x00,0x0f]
#CHECK: srst %r15, %r0 # encoding: [0xb2,0x5e,0x00,0xf0]
@@ -9883,6 +11200,16 @@
srst %r15,%r0
srst %r7,%r8
+#CHECK: srstu %r0, %r0 # encoding: [0xb9,0xbe,0x00,0x00]
+#CHECK: srstu %r0, %r15 # encoding: [0xb9,0xbe,0x00,0x0f]
+#CHECK: srstu %r15, %r0 # encoding: [0xb9,0xbe,0x00,0xf0]
+#CHECK: srstu %r7, %r8 # encoding: [0xb9,0xbe,0x00,0x78]
+
+ srstu %r0,%r0
+ srstu %r0,%r15
+ srstu %r15,%r0
+ srstu %r7,%r8
+
#CHECK: st %r0, 0 # encoding: [0x50,0x00,0x00,0x00]
#CHECK: st %r0, 4095 # encoding: [0x50,0x00,0x0f,0xff]
#CHECK: st %r0, 0(%r1) # encoding: [0x50,0x00,0x10,0x00]
@@ -9970,56 +11297,102 @@
#CHECK: stck 0(%r15) # encoding: [0xb2,0x05,0xf0,0x00]
#CHECK: stck 4095 # encoding: [0xb2,0x05,0x0f,0xff]
#CHECK: stck 4095(%r1) # encoding: [0xb2,0x05,0x1f,0xff]
-#CHECK: stck 4095(%r15) # encoding: [0xb2,0x05,0xff,0xff]
+#CHECK: stck 4095(%r15) # encoding: [0xb2,0x05,0xff,0xff]
stck 0
stck 0(%r1)
stck 0(%r15)
- stck 4095
+ stck 4095
stck 4095(%r1)
stck 4095(%r15)
-#CHECK: stckf 0 # encoding: [0xb2,0x7c,0x00,0x00]
-#CHECK: stckf 0(%r1) # encoding: [0xb2,0x7c,0x10,0x00]
-#CHECK: stckf 0(%r15) # encoding: [0xb2,0x7c,0xf0,0x00]
-#CHECK: stckf 4095 # encoding: [0xb2,0x7c,0x0f,0xff]
-#CHECK: stckf 4095(%r1) # encoding: [0xb2,0x7c,0x1f,0xff]
-#CHECK: stckf 4095(%r15) # encoding: [0xb2,0x7c,0xff,0xff]
-
- stckf 0
- stckf 0(%r1)
- stckf 0(%r15)
- stckf 4095
- stckf 4095(%r1)
- stckf 4095(%r15)
-
#CHECK: stcke 0 # encoding: [0xb2,0x78,0x00,0x00]
#CHECK: stcke 0(%r1) # encoding: [0xb2,0x78,0x10,0x00]
#CHECK: stcke 0(%r15) # encoding: [0xb2,0x78,0xf0,0x00]
#CHECK: stcke 4095 # encoding: [0xb2,0x78,0x0f,0xff]
#CHECK: stcke 4095(%r1) # encoding: [0xb2,0x78,0x1f,0xff]
-#CHECK: stcke 4095(%r15) # encoding: [0xb2,0x78,0xff,0xff]
+#CHECK: stcke 4095(%r15) # encoding: [0xb2,0x78,0xff,0xff]
stcke 0
stcke 0(%r1)
stcke 0(%r15)
- stcke 4095
+ stcke 4095
stcke 4095(%r1)
stcke 4095(%r15)
-#CHECK: stfle 0 # encoding: [0xb2,0xb0,0x00,0x00]
-#CHECK: stfle 0(%r1) # encoding: [0xb2,0xb0,0x10,0x00]
-#CHECK: stfle 0(%r15) # encoding: [0xb2,0xb0,0xf0,0x00]
-#CHECK: stfle 4095 # encoding: [0xb2,0xb0,0x0f,0xff]
-#CHECK: stfle 4095(%r1) # encoding: [0xb2,0xb0,0x1f,0xff]
-#CHECK: stfle 4095(%r15) # encoding: [0xb2,0xb0,0xff,0xff]
+#CHECK: stckf 0 # encoding: [0xb2,0x7c,0x00,0x00]
+#CHECK: stckf 0(%r1) # encoding: [0xb2,0x7c,0x10,0x00]
+#CHECK: stckf 0(%r15) # encoding: [0xb2,0x7c,0xf0,0x00]
+#CHECK: stckf 4095 # encoding: [0xb2,0x7c,0x0f,0xff]
+#CHECK: stckf 4095(%r1) # encoding: [0xb2,0x7c,0x1f,0xff]
+#CHECK: stckf 4095(%r15) # encoding: [0xb2,0x7c,0xff,0xff]
- stfle 0
- stfle 0(%r1)
- stfle 0(%r15)
- stfle 4095
- stfle 4095(%r1)
- stfle 4095(%r15)
+ stckf 0
+ stckf 0(%r1)
+ stckf 0(%r15)
+ stckf 4095
+ stckf 4095(%r1)
+ stckf 4095(%r15)
+
+#CHECK: stcm %r0, 0, 0 # encoding: [0xbe,0x00,0x00,0x00]
+#CHECK: stcm %r0, 15, 4095 # encoding: [0xbe,0x0f,0x0f,0xff]
+#CHECK: stcm %r0, 0, 0(%r1) # encoding: [0xbe,0x00,0x10,0x00]
+#CHECK: stcm %r0, 0, 0(%r15) # encoding: [0xbe,0x00,0xf0,0x00]
+#CHECK: stcm %r15, 15, 4095(%r1) # encoding: [0xbe,0xff,0x1f,0xff]
+#CHECK: stcm %r0, 0, 4095(%r15) # encoding: [0xbe,0x00,0xff,0xff]
+#CHECK: stcm %r15, 0, 0 # encoding: [0xbe,0xf0,0x00,0x00]
+
+ stcm %r0, 0, 0
+ stcm %r0, 15, 4095
+ stcm %r0, 0, 0(%r1)
+ stcm %r0, 0, 0(%r15)
+ stcm %r15, 15, 4095(%r1)
+ stcm %r0, 0, 4095(%r15)
+ stcm %r15, 0, 0
+
+#CHECK: stcmh %r0, 0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x2c]
+#CHECK: stcmh %r0, 0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x2c]
+#CHECK: stcmh %r0, 15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x2c]
+#CHECK: stcmh %r0, 15, 1 # encoding: [0xeb,0x0f,0x00,0x01,0x00,0x2c]
+#CHECK: stcmh %r0, 8, 524287 # encoding: [0xeb,0x08,0x0f,0xff,0x7f,0x2c]
+#CHECK: stcmh %r0, 8, 0(%r1) # encoding: [0xeb,0x08,0x10,0x00,0x00,0x2c]
+#CHECK: stcmh %r0, 4, 0(%r15) # encoding: [0xeb,0x04,0xf0,0x00,0x00,0x2c]
+#CHECK: stcmh %r0, 4, 524287(%r15) # encoding: [0xeb,0x04,0xff,0xff,0x7f,0x2c]
+#CHECK: stcmh %r0, 0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x2c]
+#CHECK: stcmh %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0x2c]
+
+ stcmh %r0, 0, -524288
+ stcmh %r0, 0, -1
+ stcmh %r0, 15, 0
+ stcmh %r0, 15, 1
+ stcmh %r0, 8, 524287
+ stcmh %r0, 8, 0(%r1)
+ stcmh %r0, 4, 0(%r15)
+ stcmh %r0, 4, 524287(%r15)
+ stcmh %r0, 0, 524287(%r1)
+ stcmh %r15, 0, 0
+
+#CHECK: stcmy %r0, 0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x2d]
+#CHECK: stcmy %r0, 0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x2d]
+#CHECK: stcmy %r0, 15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x2d]
+#CHECK: stcmy %r0, 15, 1 # encoding: [0xeb,0x0f,0x00,0x01,0x00,0x2d]
+#CHECK: stcmy %r0, 8, 524287 # encoding: [0xeb,0x08,0x0f,0xff,0x7f,0x2d]
+#CHECK: stcmy %r0, 8, 0(%r1) # encoding: [0xeb,0x08,0x10,0x00,0x00,0x2d]
+#CHECK: stcmy %r0, 4, 0(%r15) # encoding: [0xeb,0x04,0xf0,0x00,0x00,0x2d]
+#CHECK: stcmy %r0, 4, 524287(%r15) # encoding: [0xeb,0x04,0xff,0xff,0x7f,0x2d]
+#CHECK: stcmy %r0, 0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x2d]
+#CHECK: stcmy %r15, 0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0x2d]
+
+ stcmy %r0, 0, -524288
+ stcmy %r0, 0, -1
+ stcmy %r0, 15, 0
+ stcmy %r0, 15, 1
+ stcmy %r0, 8, 524287
+ stcmy %r0, 8, 0(%r1)
+ stcmy %r0, 4, 0(%r15)
+ stcmy %r0, 4, 524287(%r15)
+ stcmy %r0, 0, 524287(%r1)
+ stcmy %r15, 0, 0
#CHECK: stcy %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x72]
#CHECK: stcy %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x72]
@@ -10119,6 +11492,20 @@
stey %f0, 524287(%r15,%r1)
stey %f15, 0
+#CHECK: stfle 0 # encoding: [0xb2,0xb0,0x00,0x00]
+#CHECK: stfle 0(%r1) # encoding: [0xb2,0xb0,0x10,0x00]
+#CHECK: stfle 0(%r15) # encoding: [0xb2,0xb0,0xf0,0x00]
+#CHECK: stfle 4095 # encoding: [0xb2,0xb0,0x0f,0xff]
+#CHECK: stfle 4095(%r1) # encoding: [0xb2,0xb0,0x1f,0xff]
+#CHECK: stfle 4095(%r15) # encoding: [0xb2,0xb0,0xff,0xff]
+
+ stfle 0
+ stfle 0(%r1)
+ stfle 0(%r15)
+ stfle 4095
+ stfle 4095(%r1)
+ stfle 4095(%r15)
+
#CHECK: stfpc 0 # encoding: [0xb2,0x9c,0x00,0x00]
#CHECK: stfpc 0(%r1) # encoding: [0xb2,0x9c,0x10,0x00]
#CHECK: stfpc 0(%r15) # encoding: [0xb2,0x9c,0xf0,0x00]
@@ -10454,28 +11841,6 @@
strl %r7,frob@PLT
strl %r8,frob@PLT
-#CHECK: strvh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3f]
-#CHECK: strvh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3f]
-#CHECK: strvh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3f]
-#CHECK: strvh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x3f]
-#CHECK: strvh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x3f]
-#CHECK: strvh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x3f]
-#CHECK: strvh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x3f]
-#CHECK: strvh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x3f]
-#CHECK: strvh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x3f]
-#CHECK: strvh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x3f]
-
- strvh %r0,-524288
- strvh %r0,-1
- strvh %r0,0
- strvh %r0,1
- strvh %r0,524287
- strvh %r0,0(%r1)
- strvh %r0,0(%r15)
- strvh %r0,524287(%r1,%r15)
- strvh %r0,524287(%r15,%r1)
- strvh %r15,0
-
#CHECK: strv %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3e]
#CHECK: strv %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3e]
#CHECK: strv %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3e]
@@ -10520,15 +11885,27 @@
strvg %r0,524287(%r15,%r1)
strvg %r15,0
-#CHECK: svc 0 # encoding: [0x0a,0x00]
-#CHECK: svc 3 # encoding: [0x0a,0x03]
-#CHECK: svc 128 # encoding: [0x0a,0x80]
-#CHECK: svc 255 # encoding: [0x0a,0xff]
+#CHECK: strvh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3f]
+#CHECK: strvh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3f]
+#CHECK: strvh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3f]
+#CHECK: strvh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x3f]
+#CHECK: strvh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x3f]
+#CHECK: strvh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x3f]
+#CHECK: strvh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x3f]
+#CHECK: strvh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x3f]
+#CHECK: strvh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x3f]
+#CHECK: strvh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x3f]
- svc 0
- svc 3
- svc 128
- svc 0xff
+ strvh %r0,-524288
+ strvh %r0,-1
+ strvh %r0,0
+ strvh %r0,1
+ strvh %r0,524287
+ strvh %r0,0(%r1)
+ strvh %r0,0(%r15)
+ strvh %r0,524287(%r1,%r15)
+ strvh %r0,524287(%r15,%r1)
+ strvh %r15,0
#CHECK: sty %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x50]
#CHECK: sty %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x50]
@@ -10552,6 +11929,16 @@
sty %r0, 524287(%r15,%r1)
sty %r15, 0
+#CHECK: svc 0 # encoding: [0x0a,0x00]
+#CHECK: svc 3 # encoding: [0x0a,0x03]
+#CHECK: svc 128 # encoding: [0x0a,0x80]
+#CHECK: svc 255 # encoding: [0x0a,0xff]
+
+ svc 0
+ svc 3
+ svc 128
+ svc 0xff
+
#CHECK: sxbr %f0, %f0 # encoding: [0xb3,0x4b,0x00,0x00]
#CHECK: sxbr %f0, %f13 # encoding: [0xb3,0x4b,0x00,0x0d]
#CHECK: sxbr %f8, %f8 # encoding: [0xb3,0x4b,0x00,0x88]
@@ -10734,6 +12121,194 @@
tmy 524287(%r1), 42
tmy 524287(%r15), 42
+#CHECK: tp 0(1) # encoding: [0xeb,0x00,0x00,0x00,0x00,0xc0]
+#CHECK: tp 0(1,%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xc0]
+#CHECK: tp 0(1,%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xc0]
+#CHECK: tp 4095(1,%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x00,0xc0]
+#CHECK: tp 4095(1,%r15) # encoding: [0xeb,0x00,0xff,0xff,0x00,0xc0]
+#CHECK: tp 0(16,%r1) # encoding: [0xeb,0xf0,0x10,0x00,0x00,0xc0]
+#CHECK: tp 0(16,%r15) # encoding: [0xeb,0xf0,0xf0,0x00,0x00,0xc0]
+
+ tp 0(1)
+ tp 0(1,%r1)
+ tp 0(1,%r15)
+ tp 4095(1,%r1)
+ tp 4095(1,%r15)
+ tp 0(16,%r1)
+ tp 0(16,%r15)
+
+#CHECK: tr 0(1), 0 # encoding: [0xdc,0x00,0x00,0x00,0x00,0x00]
+#CHECK: tr 0(1), 0(%r1) # encoding: [0xdc,0x00,0x00,0x00,0x10,0x00]
+#CHECK: tr 0(1), 0(%r15) # encoding: [0xdc,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: tr 0(1), 4095 # encoding: [0xdc,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: tr 0(1), 4095(%r1) # encoding: [0xdc,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: tr 0(1), 4095(%r15) # encoding: [0xdc,0x00,0x00,0x00,0xff,0xff]
+#CHECK: tr 0(1,%r1), 0 # encoding: [0xdc,0x00,0x10,0x00,0x00,0x00]
+#CHECK: tr 0(1,%r15), 0 # encoding: [0xdc,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: tr 4095(1,%r1), 0 # encoding: [0xdc,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: tr 4095(1,%r15), 0 # encoding: [0xdc,0x00,0xff,0xff,0x00,0x00]
+#CHECK: tr 0(256,%r1), 0 # encoding: [0xdc,0xff,0x10,0x00,0x00,0x00]
+#CHECK: tr 0(256,%r15), 0 # encoding: [0xdc,0xff,0xf0,0x00,0x00,0x00]
+
+ tr 0(1), 0
+ tr 0(1), 0(%r1)
+ tr 0(1), 0(%r15)
+ tr 0(1), 4095
+ tr 0(1), 4095(%r1)
+ tr 0(1), 4095(%r15)
+ tr 0(1,%r1), 0
+ tr 0(1,%r15), 0
+ tr 4095(1,%r1), 0
+ tr 4095(1,%r15), 0
+ tr 0(256,%r1), 0
+ tr 0(256,%r15), 0
+
+#CHECK: tre %r0, %r0 # encoding: [0xb2,0xa5,0x00,0x00]
+#CHECK: tre %r0, %r15 # encoding: [0xb2,0xa5,0x00,0x0f]
+#CHECK: tre %r14, %r0 # encoding: [0xb2,0xa5,0x00,0xe0]
+#CHECK: tre %r6, %r8 # encoding: [0xb2,0xa5,0x00,0x68]
+
+ tre %r0, %r0
+ tre %r0, %r15
+ tre %r14, %r0
+ tre %r6, %r8
+
+#CHECK: troo %r0, %r0 # encoding: [0xb9,0x93,0x00,0x00]
+#CHECK: troo %r0, %r15 # encoding: [0xb9,0x93,0x00,0x0f]
+#CHECK: troo %r14, %r0 # encoding: [0xb9,0x93,0x00,0xe0]
+#CHECK: troo %r6, %r8 # encoding: [0xb9,0x93,0x00,0x68]
+#CHECK: troo %r4, %r13, 0 # encoding: [0xb9,0x93,0x00,0x4d]
+#CHECK: troo %r4, %r13, 15 # encoding: [0xb9,0x93,0xf0,0x4d]
+
+ troo %r0, %r0
+ troo %r0, %r15
+ troo %r14, %r0
+ troo %r6, %r8
+ troo %r4, %r13, 0
+ troo %r4, %r13, 15
+
+#CHECK: trot %r0, %r0 # encoding: [0xb9,0x92,0x00,0x00]
+#CHECK: trot %r0, %r15 # encoding: [0xb9,0x92,0x00,0x0f]
+#CHECK: trot %r14, %r0 # encoding: [0xb9,0x92,0x00,0xe0]
+#CHECK: trot %r6, %r8 # encoding: [0xb9,0x92,0x00,0x68]
+#CHECK: trot %r4, %r13, 0 # encoding: [0xb9,0x92,0x00,0x4d]
+#CHECK: trot %r4, %r13, 15 # encoding: [0xb9,0x92,0xf0,0x4d]
+
+ trot %r0, %r0
+ trot %r0, %r15
+ trot %r14, %r0
+ trot %r6, %r8
+ trot %r4, %r13, 0
+ trot %r4, %r13, 15
+
+#CHECK: trt 0(1), 0 # encoding: [0xdd,0x00,0x00,0x00,0x00,0x00]
+#CHECK: trt 0(1), 0(%r1) # encoding: [0xdd,0x00,0x00,0x00,0x10,0x00]
+#CHECK: trt 0(1), 0(%r15) # encoding: [0xdd,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: trt 0(1), 4095 # encoding: [0xdd,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: trt 0(1), 4095(%r1) # encoding: [0xdd,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: trt 0(1), 4095(%r15) # encoding: [0xdd,0x00,0x00,0x00,0xff,0xff]
+#CHECK: trt 0(1,%r1), 0 # encoding: [0xdd,0x00,0x10,0x00,0x00,0x00]
+#CHECK: trt 0(1,%r15), 0 # encoding: [0xdd,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: trt 4095(1,%r1), 0 # encoding: [0xdd,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: trt 4095(1,%r15), 0 # encoding: [0xdd,0x00,0xff,0xff,0x00,0x00]
+#CHECK: trt 0(256,%r1), 0 # encoding: [0xdd,0xff,0x10,0x00,0x00,0x00]
+#CHECK: trt 0(256,%r15), 0 # encoding: [0xdd,0xff,0xf0,0x00,0x00,0x00]
+
+ trt 0(1), 0
+ trt 0(1), 0(%r1)
+ trt 0(1), 0(%r15)
+ trt 0(1), 4095
+ trt 0(1), 4095(%r1)
+ trt 0(1), 4095(%r15)
+ trt 0(1,%r1), 0
+ trt 0(1,%r15), 0
+ trt 4095(1,%r1), 0
+ trt 4095(1,%r15), 0
+ trt 0(256,%r1), 0
+ trt 0(256,%r15), 0
+
+#CHECK: trte %r0, %r0 # encoding: [0xb9,0xbf,0x00,0x00]
+#CHECK: trte %r0, %r15 # encoding: [0xb9,0xbf,0x00,0x0f]
+#CHECK: trte %r14, %r0 # encoding: [0xb9,0xbf,0x00,0xe0]
+#CHECK: trte %r6, %r8 # encoding: [0xb9,0xbf,0x00,0x68]
+#CHECK: trte %r4, %r13, 0 # encoding: [0xb9,0xbf,0x00,0x4d]
+#CHECK: trte %r4, %r13, 15 # encoding: [0xb9,0xbf,0xf0,0x4d]
+
+ trte %r0, %r0
+ trte %r0, %r15
+ trte %r14, %r0
+ trte %r6, %r8
+ trte %r4, %r13, 0
+ trte %r4, %r13, 15
+
+#CHECK: trto %r0, %r0 # encoding: [0xb9,0x91,0x00,0x00]
+#CHECK: trto %r0, %r15 # encoding: [0xb9,0x91,0x00,0x0f]
+#CHECK: trto %r14, %r0 # encoding: [0xb9,0x91,0x00,0xe0]
+#CHECK: trto %r6, %r8 # encoding: [0xb9,0x91,0x00,0x68]
+#CHECK: trto %r4, %r13, 0 # encoding: [0xb9,0x91,0x00,0x4d]
+#CHECK: trto %r4, %r13, 15 # encoding: [0xb9,0x91,0xf0,0x4d]
+
+ trto %r0, %r0
+ trto %r0, %r15
+ trto %r14, %r0
+ trto %r6, %r8
+ trto %r4, %r13, 0
+ trto %r4, %r13, 15
+
+#CHECK: trtr 0(1), 0 # encoding: [0xd0,0x00,0x00,0x00,0x00,0x00]
+#CHECK: trtr 0(1), 0(%r1) # encoding: [0xd0,0x00,0x00,0x00,0x10,0x00]
+#CHECK: trtr 0(1), 0(%r15) # encoding: [0xd0,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: trtr 0(1), 4095 # encoding: [0xd0,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: trtr 0(1), 4095(%r1) # encoding: [0xd0,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: trtr 0(1), 4095(%r15) # encoding: [0xd0,0x00,0x00,0x00,0xff,0xff]
+#CHECK: trtr 0(1,%r1), 0 # encoding: [0xd0,0x00,0x10,0x00,0x00,0x00]
+#CHECK: trtr 0(1,%r15), 0 # encoding: [0xd0,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: trtr 4095(1,%r1), 0 # encoding: [0xd0,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: trtr 4095(1,%r15), 0 # encoding: [0xd0,0x00,0xff,0xff,0x00,0x00]
+#CHECK: trtr 0(256,%r1), 0 # encoding: [0xd0,0xff,0x10,0x00,0x00,0x00]
+#CHECK: trtr 0(256,%r15), 0 # encoding: [0xd0,0xff,0xf0,0x00,0x00,0x00]
+
+ trtr 0(1), 0
+ trtr 0(1), 0(%r1)
+ trtr 0(1), 0(%r15)
+ trtr 0(1), 4095
+ trtr 0(1), 4095(%r1)
+ trtr 0(1), 4095(%r15)
+ trtr 0(1,%r1), 0
+ trtr 0(1,%r15), 0
+ trtr 4095(1,%r1), 0
+ trtr 4095(1,%r15), 0
+ trtr 0(256,%r1), 0
+ trtr 0(256,%r15), 0
+
+#CHECK: trtre %r0, %r0 # encoding: [0xb9,0xbd,0x00,0x00]
+#CHECK: trtre %r0, %r15 # encoding: [0xb9,0xbd,0x00,0x0f]
+#CHECK: trtre %r14, %r0 # encoding: [0xb9,0xbd,0x00,0xe0]
+#CHECK: trtre %r6, %r8 # encoding: [0xb9,0xbd,0x00,0x68]
+#CHECK: trtre %r4, %r13, 0 # encoding: [0xb9,0xbd,0x00,0x4d]
+#CHECK: trtre %r4, %r13, 15 # encoding: [0xb9,0xbd,0xf0,0x4d]
+
+ trtre %r0, %r0
+ trtre %r0, %r15
+ trtre %r14, %r0
+ trtre %r6, %r8
+ trtre %r4, %r13, 0
+ trtre %r4, %r13, 15
+
+#CHECK: trtt %r0, %r0 # encoding: [0xb9,0x90,0x00,0x00]
+#CHECK: trtt %r0, %r15 # encoding: [0xb9,0x90,0x00,0x0f]
+#CHECK: trtt %r14, %r0 # encoding: [0xb9,0x90,0x00,0xe0]
+#CHECK: trtt %r6, %r8 # encoding: [0xb9,0x90,0x00,0x68]
+#CHECK: trtt %r4, %r13, 0 # encoding: [0xb9,0x90,0x00,0x4d]
+#CHECK: trtt %r4, %r13, 15 # encoding: [0xb9,0x90,0xf0,0x4d]
+
+ trtt %r0, %r0
+ trtt %r0, %r15
+ trtt %r14, %r0
+ trtt %r6, %r8
+ trtt %r4, %r13, 0
+ trtt %r4, %r13, 15
+
#CHECK: ts 0 # encoding: [0x93,0x00,0x00,0x00]
#CHECK: ts 0(%r1) # encoding: [0x93,0x00,0x10,0x00]
#CHECK: ts 0(%r15) # encoding: [0x93,0x00,0xf0,0x00]
@@ -10748,6 +12323,92 @@
ts 4095(%r1)
ts 4095(%r15)
+#CHECK: unpk 0(1), 0(1) # encoding: [0xf3,0x00,0x00,0x00,0x00,0x00]
+#CHECK: unpk 0(1), 0(1,%r1) # encoding: [0xf3,0x00,0x00,0x00,0x10,0x00]
+#CHECK: unpk 0(1), 0(1,%r15) # encoding: [0xf3,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: unpk 0(1), 4095(1) # encoding: [0xf3,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: unpk 0(1), 4095(1,%r1) # encoding: [0xf3,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: unpk 0(1), 4095(1,%r15) # encoding: [0xf3,0x00,0x00,0x00,0xff,0xff]
+#CHECK: unpk 0(1,%r1), 0(1) # encoding: [0xf3,0x00,0x10,0x00,0x00,0x00]
+#CHECK: unpk 0(1,%r15), 0(1) # encoding: [0xf3,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: unpk 4095(1,%r1), 0(1) # encoding: [0xf3,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: unpk 4095(1,%r15), 0(1) # encoding: [0xf3,0x00,0xff,0xff,0x00,0x00]
+#CHECK: unpk 0(16,%r1), 0(1) # encoding: [0xf3,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: unpk 0(16,%r15), 0(1) # encoding: [0xf3,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: unpk 0(1), 0(16,%r1) # encoding: [0xf3,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: unpk 0(1), 0(16,%r15) # encoding: [0xf3,0x0f,0x00,0x00,0xf0,0x00]
+
+ unpk 0(1), 0(1)
+ unpk 0(1), 0(1,%r1)
+ unpk 0(1), 0(1,%r15)
+ unpk 0(1), 4095(1)
+ unpk 0(1), 4095(1,%r1)
+ unpk 0(1), 4095(1,%r15)
+ unpk 0(1,%r1), 0(1)
+ unpk 0(1,%r15), 0(1)
+ unpk 4095(1,%r1), 0(1)
+ unpk 4095(1,%r15), 0(1)
+ unpk 0(16,%r1), 0(1)
+ unpk 0(16,%r15), 0(1)
+ unpk 0(1), 0(16,%r1)
+ unpk 0(1), 0(16,%r15)
+
+#CHECK: unpka 0(1), 0 # encoding: [0xea,0x00,0x00,0x00,0x00,0x00]
+#CHECK: unpka 0(1), 0(%r1) # encoding: [0xea,0x00,0x00,0x00,0x10,0x00]
+#CHECK: unpka 0(1), 0(%r15) # encoding: [0xea,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: unpka 0(1), 4095 # encoding: [0xea,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: unpka 0(1), 4095(%r1) # encoding: [0xea,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: unpka 0(1), 4095(%r15) # encoding: [0xea,0x00,0x00,0x00,0xff,0xff]
+#CHECK: unpka 0(1,%r1), 0 # encoding: [0xea,0x00,0x10,0x00,0x00,0x00]
+#CHECK: unpka 0(1,%r15), 0 # encoding: [0xea,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: unpka 4095(1,%r1), 0 # encoding: [0xea,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: unpka 4095(1,%r15), 0 # encoding: [0xea,0x00,0xff,0xff,0x00,0x00]
+#CHECK: unpka 0(256,%r1), 0 # encoding: [0xea,0xff,0x10,0x00,0x00,0x00]
+#CHECK: unpka 0(256,%r15), 0 # encoding: [0xea,0xff,0xf0,0x00,0x00,0x00]
+
+ unpka 0(1), 0
+ unpka 0(1), 0(%r1)
+ unpka 0(1), 0(%r15)
+ unpka 0(1), 4095
+ unpka 0(1), 4095(%r1)
+ unpka 0(1), 4095(%r15)
+ unpka 0(1,%r1), 0
+ unpka 0(1,%r15), 0
+ unpka 4095(1,%r1), 0
+ unpka 4095(1,%r15), 0
+ unpka 0(256,%r1), 0
+ unpka 0(256,%r15), 0
+
+#CHECK: unpku 0(1), 0 # encoding: [0xe2,0x00,0x00,0x00,0x00,0x00]
+#CHECK: unpku 0(1), 0(%r1) # encoding: [0xe2,0x00,0x00,0x00,0x10,0x00]
+#CHECK: unpku 0(1), 0(%r15) # encoding: [0xe2,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: unpku 0(1), 4095 # encoding: [0xe2,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: unpku 0(1), 4095(%r1) # encoding: [0xe2,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: unpku 0(1), 4095(%r15) # encoding: [0xe2,0x00,0x00,0x00,0xff,0xff]
+#CHECK: unpku 0(1,%r1), 0 # encoding: [0xe2,0x00,0x10,0x00,0x00,0x00]
+#CHECK: unpku 0(1,%r15), 0 # encoding: [0xe2,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: unpku 4095(1,%r1), 0 # encoding: [0xe2,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: unpku 4095(1,%r15), 0 # encoding: [0xe2,0x00,0xff,0xff,0x00,0x00]
+#CHECK: unpku 0(256,%r1), 0 # encoding: [0xe2,0xff,0x10,0x00,0x00,0x00]
+#CHECK: unpku 0(256,%r15), 0 # encoding: [0xe2,0xff,0xf0,0x00,0x00,0x00]
+
+ unpku 0(1), 0
+ unpku 0(1), 0(%r1)
+ unpku 0(1), 0(%r15)
+ unpku 0(1), 4095
+ unpku 0(1), 4095(%r1)
+ unpku 0(1), 4095(%r15)
+ unpku 0(1,%r1), 0
+ unpku 0(1,%r15), 0
+ unpku 4095(1,%r1), 0
+ unpku 4095(1,%r15), 0
+ unpku 0(256,%r1), 0
+ unpku 0(256,%r15), 0
+
+#CHECK: upt # encoding: [0x01,0x02]
+
+ upt
+
#CHECK: x %r0, 0 # encoding: [0x57,0x00,0x00,0x00]
#CHECK: x %r0, 4095 # encoding: [0x57,0x00,0x0f,0xff]
#CHECK: x %r0, 0(%r1) # encoding: [0x57,0x00,0x10,0x00]
@@ -10907,3 +12568,33 @@
xy %r0, 524287(%r1,%r15)
xy %r0, 524287(%r15,%r1)
xy %r15, 0
+
+#CHECK: zap 0(1), 0(1) # encoding: [0xf8,0x00,0x00,0x00,0x00,0x00]
+#CHECK: zap 0(1), 0(1,%r1) # encoding: [0xf8,0x00,0x00,0x00,0x10,0x00]
+#CHECK: zap 0(1), 0(1,%r15) # encoding: [0xf8,0x00,0x00,0x00,0xf0,0x00]
+#CHECK: zap 0(1), 4095(1) # encoding: [0xf8,0x00,0x00,0x00,0x0f,0xff]
+#CHECK: zap 0(1), 4095(1,%r1) # encoding: [0xf8,0x00,0x00,0x00,0x1f,0xff]
+#CHECK: zap 0(1), 4095(1,%r15) # encoding: [0xf8,0x00,0x00,0x00,0xff,0xff]
+#CHECK: zap 0(1,%r1), 0(1) # encoding: [0xf8,0x00,0x10,0x00,0x00,0x00]
+#CHECK: zap 0(1,%r15), 0(1) # encoding: [0xf8,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: zap 4095(1,%r1), 0(1) # encoding: [0xf8,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: zap 4095(1,%r15), 0(1) # encoding: [0xf8,0x00,0xff,0xff,0x00,0x00]
+#CHECK: zap 0(16,%r1), 0(1) # encoding: [0xf8,0xf0,0x10,0x00,0x00,0x00]
+#CHECK: zap 0(16,%r15), 0(1) # encoding: [0xf8,0xf0,0xf0,0x00,0x00,0x00]
+#CHECK: zap 0(1), 0(16,%r1) # encoding: [0xf8,0x0f,0x00,0x00,0x10,0x00]
+#CHECK: zap 0(1), 0(16,%r15) # encoding: [0xf8,0x0f,0x00,0x00,0xf0,0x00]
+
+ zap 0(1), 0(1)
+ zap 0(1), 0(1,%r1)
+ zap 0(1), 0(1,%r15)
+ zap 0(1), 4095(1)
+ zap 0(1), 4095(1,%r1)
+ zap 0(1), 4095(1,%r15)
+ zap 0(1,%r1), 0(1)
+ zap 0(1,%r15), 0(1)
+ zap 4095(1,%r1), 0(1)
+ zap 4095(1,%r15), 0(1)
+ zap 0(16,%r1), 0(1)
+ zap 0(16,%r15), 0(1)
+ zap 0(1), 0(16,%r1)
+ zap 0(1), 0(16,%r15)
diff --git a/test/Object/Inputs/COFF/empty-drectve.yaml b/test/Object/Inputs/COFF/empty-drectve.yaml
new file mode 100644
index 000000000000..af288807e3ad
--- /dev/null
+++ b/test/Object/Inputs/COFF/empty-drectve.yaml
@@ -0,0 +1,14 @@
+--- !COFF
+header:
+ Machine: IMAGE_FILE_MACHINE_I386
+sections:
+ - Name: .drectve
+ Characteristics: [ IMAGE_SCN_LNK_INFO, IMAGE_SCN_LNK_REMOVE ]
+ SectionData: ''
+symbols:
+ - Name: .drectve
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
diff --git a/test/Object/X86/archive-symbol-table.s b/test/Object/X86/archive-symbol-table.s
new file mode 100644
index 000000000000..2e6fcbed60b1
--- /dev/null
+++ b/test/Object/X86/archive-symbol-table.s
@@ -0,0 +1,19 @@
+# RUN: llvm-mc %s -o %t.o -filetype=obj -triple=x86_64-pc-linux
+# RUN: rm -f %t
+# RUN: llvm-ar rcs %t %t.o
+# RUN: llvm-nm -M %t | FileCheck %s
+
+# Test that weak undefined symbols don't show up in the archive symbol
+# table.
+
+.global foo
+foo:
+.weak bar
+.quad bar
+
+# CHECK: Archive map
+# CHECK-NEXT: foo in archive-symbol-table.s.tmp.o
+# CHECK-NOT: in
+# CHECK: archive-symbol-table.s.tmp.o
+# CHECK-NEXT: w bar
+# CHECK-NEXT: T foo
diff --git a/test/Object/X86/nm-ir.ll b/test/Object/X86/nm-ir.ll
index 29f7a5c7018c..c90f67b15160 100644
--- a/test/Object/X86/nm-ir.ll
+++ b/test/Object/X86/nm-ir.ll
@@ -12,7 +12,7 @@
; CHECK-NEXT: C g3
; CHECK-NOT: g4
; CHECK-NEXT: T global_asm_sym
-; CHECK-NEXT: D ifunc_f1
+; CHECK-NEXT: T ifunc_f1
; CHECK-NEXT: t local_asm_sym
; CHECK-NEXT: U undef_asm_sy
diff --git a/test/Object/coff-empty-drectve.test b/test/Object/coff-empty-drectve.test
new file mode 100644
index 000000000000..f76d7bf72716
--- /dev/null
+++ b/test/Object/coff-empty-drectve.test
@@ -0,0 +1,3 @@
+RUN: yaml2obj %p/Inputs/COFF/empty-drectve.yaml | llvm-readobj -coff-directives - | FileCheck %s
+
+CHECK: Directive(s): {{$}}
diff --git a/test/Object/invalid.test b/test/Object/invalid.test
index fc1a77b2c0c0..dcbac32f7196 100644
--- a/test/Object/invalid.test
+++ b/test/Object/invalid.test
@@ -53,7 +53,7 @@ INVALID-SYMTAB-SIZE: size is not a multiple of sh_entsize
RUN: not llvm-readobj -t %p/Inputs/invalid-xindex-size.elf 2>&1 | FileCheck --check-prefix=INVALID-XINDEX-SIZE %s
-INVALID-XINDEX-SIZE: Invalid data was encountered while parsing the file.
+INVALID-XINDEX-SIZE: Invalid data was encountered while parsing the file
RUN: not llvm-readobj -t %p/Inputs/invalid-e_shnum.elf 2>&1 | FileCheck --check-prefix=INVALID-SH-NUM %s
INVALID-SH-NUM: invalid e_phentsize
@@ -77,7 +77,7 @@ RUN: FileCheck --check-prefix=INVALID-SECTION-SIZE2 %s
INVALID-SECTION-SIZE2: invalid section offset
RUN: not llvm-readobj -t %p/Inputs/invalid-sections-num.elf 2>&1 | FileCheck --check-prefix=INVALID-SECTION-NUM %s
-INVALID-SECTION-NUM: Invalid data was encountered while parsing the file.
+INVALID-SECTION-NUM: Invalid data was encountered while parsing the file
RUN: not llvm-readobj -r %p/Inputs/invalid-rel-sym.elf 2>&1 | FileCheck --check-prefix=INVALID-REL-SYM %s
INVALID-REL-SYM: invalid section offset
diff --git a/test/Object/wasm-invalid-start.test b/test/Object/wasm-invalid-start.test
new file mode 100644
index 000000000000..12f75676345f
--- /dev/null
+++ b/test/Object/wasm-invalid-start.test
@@ -0,0 +1,10 @@
+# RUN: yaml2obj %s | not llvm-objdump -h - 2>&1 | FileCheck %s
+
+!WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: START
+ StartFunction: 0
+
+# CHECK: {{.*}}: Invalid start function
diff --git a/test/ObjectYAML/wasm/export_section.yaml b/test/ObjectYAML/wasm/export_section.yaml
index 1d1a16fb8335..89ebee328246 100644
--- a/test/ObjectYAML/wasm/export_section.yaml
+++ b/test/ObjectYAML/wasm/export_section.yaml
@@ -5,12 +5,18 @@ FileHeader:
Sections:
- Type: EXPORT
Exports:
- - Name: foo
- Kind: FUNCTION
- Index: 0
- - Name: bar
+ - Name: function_export
Kind: FUNCTION
Index: 1
+ - Name: global_export
+ Kind: GLOBAL
+ Index: 1
+ - Name: memory_export
+ Kind: MEMORY
+ Index: 0
+ - Name: table_export
+ Kind: TABLE
+ Index: 0
...
# CHECK: --- !WASM
# CHECK: FileHeader:
@@ -18,10 +24,16 @@ Sections:
# CHECK: Sections:
# CHECK: - Type: EXPORT
# CHECK: Exports:
-# CHECK: - Name: foo
-# CHECK: Kind: FUNCTION
-# CHECK: Index: 0
-# CHECK: - Name: bar
+# CHECK: - Name: function_export
# CHECK: Kind: FUNCTION
# CHECK: Index: 1
+# CHECK: - Name: global_export
+# CHECK: Kind: GLOBAL
+# CHECK: Index: 1
+# CHECK: - Name: memory_export
+# CHECK: Kind: MEMORY
+# CHECK: Index: 0
+# CHECK: - Name: table_export
+# CHECK: Kind: TABLE
+# CHECK: Index: 0
# CHECK: ...
diff --git a/test/ObjectYAML/wasm/function_section.yaml b/test/ObjectYAML/wasm/function_section.yaml
index 39e6b75d5cdc..571b762787a2 100644
--- a/test/ObjectYAML/wasm/function_section.yaml
+++ b/test/ObjectYAML/wasm/function_section.yaml
@@ -4,9 +4,7 @@ FileHeader:
Version: 0x00000001
Sections:
- Type: FUNCTION
- FunctionTypes:
- - 1
- - 0
+ FunctionTypes: [ 1, 0 ]
...
# CHECK: --- !WASM
# CHECK: FileHeader:
diff --git a/test/ObjectYAML/wasm/import_section.yaml b/test/ObjectYAML/wasm/import_section.yaml
index 52f466a00b66..115d4cc0bd6b 100644
--- a/test/ObjectYAML/wasm/import_section.yaml
+++ b/test/ObjectYAML/wasm/import_section.yaml
@@ -9,19 +9,32 @@ Sections:
ParamTypes:
- I32
- Type: IMPORT
- Imports:
+ Imports:
- Module: foo
- Field: bar
+ Field: imported_function
Kind: FUNCTION
SigIndex: 0
- Module: fiz
- Field: baz
+ Field: imported_global
Kind: GLOBAL
GlobalType: I32
GlobalMutable: false
- - Type: FUNCTION
- FunctionTypes:
- - 0
+ - Module: foo
+ Field: imported_memory
+ Kind: MEMORY
+ Memory:
+ Flags: 0x00000001
+ Initial: 0x00000010
+ Maximum: 0x00000011
+ - Module: foo
+ Field: imported_table
+ Kind: TABLE
+ Table:
+ ElemType: ANYFUNC
+ Limits:
+ Flags: 0x00000001
+ Initial: 0x00000020
+ Maximum: 0x00000022
...
# CHECK: --- !WASM
# CHECK: FileHeader:
@@ -30,12 +43,28 @@ Sections:
# CHECK: - Type: IMPORT
# CHECK: Imports:
# CHECK: - Module: foo
-# CHECK: Field: bar
+# CHECK: Field: imported_function
# CHECK: Kind: FUNCTION
# CHECK: SigIndex: 0
# CHECK: - Module: fiz
-# CHECK: Field: baz
+# CHECK: Field: imported_global
# CHECK: Kind: GLOBAL
# CHECK: GlobalType: I32
# CHECK: GlobalMutable: false
+# CHECK: - Module: foo
+# CHECK: Field: imported_memory
+# CHECK: Kind: MEMORY
+# CHECK: Memory:
+# CHECK: Flags: 0x00000001
+# CHECK: Initial: 0x00000010
+# CHECK: Maximum: 0x00000011
+# CHECK: - Module: foo
+# CHECK: Field: imported_table
+# CHECK: Kind: TABLE
+# CHECK: Table:
+# CHECK: ElemType: ANYFUNC
+# CHECK: Limits:
+# CHECK: Flags: 0x00000001
+# CHECK: Initial: 0x00000020
+# CHECK: Maximum: 0x00000022
# CHECK: ...
diff --git a/test/ObjectYAML/wasm/start_section.yaml b/test/ObjectYAML/wasm/start_section.yaml
index 41301a620037..38feebcdf993 100644
--- a/test/ObjectYAML/wasm/start_section.yaml
+++ b/test/ObjectYAML/wasm/start_section.yaml
@@ -1,8 +1,17 @@
# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
--- !WASM
FileHeader:
Version: 0x00000001
Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: I32
+ ParamTypes:
+ - F32
+ - F32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0, 0 ]
- Type: START
StartFunction: 1
...
diff --git a/test/TableGen/AsmVariant.td b/test/TableGen/AsmVariant.td
index 6c50241e5ae1..cb5d32385d3b 100644
--- a/test/TableGen/AsmVariant.td
+++ b/test/TableGen/AsmVariant.td
@@ -1,6 +1,6 @@
// RUN: llvm-tblgen -gen-asm-matcher -I %p/../../include %s | FileCheck %s
-// Check that cpecifying AsmVariant works correctly
+// Check that specifying AsmVariant works correctly
include "llvm/Target/Target.td"
diff --git a/test/TableGen/RegisterEncoder.td b/test/TableGen/RegisterEncoder.td
new file mode 100644
index 000000000000..a0472c5feffa
--- /dev/null
+++ b/test/TableGen/RegisterEncoder.td
@@ -0,0 +1,35 @@
+// RUN: llvm-tblgen -gen-emitter -I %p/../../include %s | FileCheck %s
+
+// Check that EncoderMethod for RegisterOperand is working correctly
+
+include "llvm/Target/Target.td"
+
+def ArchInstrInfo : InstrInfo { }
+
+def Arch : Target {
+ let InstructionSet = ArchInstrInfo;
+}
+
+def Reg : Register<"reg">;
+
+def RegClass : RegisterClass<"foo", [i32], 0, (add Reg)>;
+
+def RegOperand : RegisterOperand<RegClass> {
+ let EncoderMethod = "barEncoder";
+}
+
+def foo : Instruction {
+ let Size = 1;
+
+ let OutOperandList = (outs);
+ let InOperandList = (ins RegOperand:$bar);
+
+ bits<8> bar;
+ bits<8> Inst = bar;
+}
+
+// CHECK: case ::foo: {
+// CHECK: op = barEncoder
+// CHECK: Value |= op & UINT64_C(255);
+// CHECK: break;
+// CHECK: } \ No newline at end of file
diff --git a/test/Transforms/CodeExtractor/ExtractedFnEntryCount.ll b/test/Transforms/CodeExtractor/ExtractedFnEntryCount.ll
index 509a4d7bfa18..8313cfac04ee 100644
--- a/test/Transforms/CodeExtractor/ExtractedFnEntryCount.ll
+++ b/test/Transforms/CodeExtractor/ExtractedFnEntryCount.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -partial-inliner -S | FileCheck %s
+; RUN: opt < %s -partial-inliner -skip-partial-inlining-cost-analysis -S | FileCheck %s
; This test checks to make sure that the CodeExtractor
; properly sets the entry count for the function that is
diff --git a/test/Transforms/CodeExtractor/MultipleExitBranchProb.ll b/test/Transforms/CodeExtractor/MultipleExitBranchProb.ll
index 425e96973596..8e362080dc48 100644
--- a/test/Transforms/CodeExtractor/MultipleExitBranchProb.ll
+++ b/test/Transforms/CodeExtractor/MultipleExitBranchProb.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -partial-inliner -max-num-inline-blocks=2 -S | FileCheck %s
+; RUN: opt < %s -partial-inliner -max-num-inline-blocks=2 -skip-partial-inlining-cost-analysis -S | FileCheck %s
; This test checks to make sure that CodeExtractor updates
; the exit branch probabilities for multiple exit blocks.
diff --git a/test/Transforms/CodeExtractor/PartialInlineAnd.ll b/test/Transforms/CodeExtractor/PartialInlineAnd.ll
index e981a5ba5816..d32d834d2df3 100644
--- a/test/Transforms/CodeExtractor/PartialInlineAnd.ll
+++ b/test/Transforms/CodeExtractor/PartialInlineAnd.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -partial-inliner -S | FileCheck %s
; RUN: opt < %s -passes=partial-inliner -S | FileCheck %s
-; RUN: opt < %s -partial-inliner -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT %s
-; RUN: opt < %s -passes=partial-inliner -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT %s
+; RUN: opt < %s -partial-inliner -skip-partial-inlining-cost-analysis -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT %s
+; RUN: opt < %s -passes=partial-inliner -skip-partial-inlining-cost-analysis -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT %s
; Function Attrs: nounwind uwtable
define i32 @bar(i32 %arg) local_unnamed_addr #0 {
diff --git a/test/Transforms/CodeExtractor/PartialInlineEntryUpdate.ll b/test/Transforms/CodeExtractor/PartialInlineEntryUpdate.ll
new file mode 100644
index 000000000000..3a7a9752e507
--- /dev/null
+++ b/test/Transforms/CodeExtractor/PartialInlineEntryUpdate.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -skip-partial-inlining-cost-analysis -partial-inliner -S | FileCheck %s
+; RUN: opt < %s -skip-partial-inlining-cost-analysis -passes=partial-inliner -S | FileCheck %s
+
+define i32 @Func(i1 %cond, i32* align 4 %align.val) !prof !1 {
+; CHECK: @Func({{.*}}) !prof [[REMAINCOUNT:![0-9]+]]
+entry:
+ br i1 %cond, label %if.then, label %return
+if.then:
+ ; Dummy store to have more than 0 uses
+ store i32 10, i32* %align.val, align 4
+ br label %return
+return: ; preds = %entry
+ ret i32 0
+}
+
+define internal i32 @Caller1(i1 %cond, i32* align 2 %align.val) !prof !3{
+entry:
+; CHECK-LABEL: @Caller1
+; CHECK: br
+; CHECK: call void @Func.1_
+; CHECK: br
+; CHECK: call void @Func.1_
+ %val = call i32 @Func(i1 %cond, i32* %align.val)
+ %val2 = call i32 @Func(i1 %cond, i32* %align.val)
+ ret i32 %val
+}
+
+define internal i32 @Caller2(i1 %cond, i32* align 2 %align.val) !prof !2{
+entry:
+; CHECK-LABEL: @Caller2
+; CHECK: br
+; CHECK: call void @Func.1_
+ %val = call i32 @Func(i1 %cond, i32* %align.val)
+ ret i32 %val
+}
+
+; CHECK: [[REMAINCOUNT]] = !{!"function_entry_count", i64 150}
+!1 = !{!"function_entry_count", i64 200}
+!2 = !{!"function_entry_count", i64 10}
+!3 = !{!"function_entry_count", i64 20}
+
diff --git a/test/Transforms/CodeExtractor/PartialInlineHighCost.ll b/test/Transforms/CodeExtractor/PartialInlineHighCost.ll
new file mode 100644
index 000000000000..e43a94dc6c37
--- /dev/null
+++ b/test/Transforms/CodeExtractor/PartialInlineHighCost.ll
@@ -0,0 +1,107 @@
+; The outlined region has high frequency and the outlining
+; call sequence is expensive (input, output, multiple exit etc)
+; RUN: opt < %s -partial-inliner -max-num-inline-blocks=2 -S | FileCheck %s
+; RUN: opt < %s -passes=partial-inliner -max-num-inline-blocks=2 -S | FileCheck %s
+; RUN: opt < %s -partial-inliner -skip-partial-inlining-cost-analysis -max-num-inline-blocks=2 -S | FileCheck --check-prefix=NOCOST %s
+; RUN: opt < %s -passes=partial-inliner -skip-partial-inlining-cost-analysis -max-num-inline-blocks=2 -S | FileCheck --check-prefix=NOCOST %s
+
+
+; Function Attrs: nounwind
+define i32 @bar_hot_outline_region(i32 %arg) local_unnamed_addr #0 {
+bb:
+ %tmp = icmp slt i32 %arg, 0
+ br i1 %tmp, label %bb1, label %bb16, !prof !1
+
+bb1: ; preds = %bb
+ %tmp2 = tail call i32 (...) @foo() #0
+ %tmp3 = tail call i32 (...) @foo() #0
+ %tmp4 = tail call i32 (...) @foo() #0
+ %tmp5 = tail call i32 (...) @foo() #0
+ %tmp6 = tail call i32 (...) @foo() #0
+ %tmp7 = tail call i32 (...) @foo() #0
+ %tmp8 = add nsw i32 %arg, 1
+ %tmp9 = tail call i32 @goo(i32 %tmp8) #0
+ %tmp10 = tail call i32 (...) @foo() #0
+ %tmp11 = icmp eq i32 %tmp10, 0
+ br i1 %tmp11, label %bb12, label %bb16
+
+bb12: ; preds = %bb1
+ %tmp13 = tail call i32 (...) @foo() #0
+ %tmp14 = icmp eq i32 %tmp13, 0
+ %tmp15 = select i1 %tmp14, i32 0, i32 3
+ br label %bb16
+
+bb16: ; preds = %bb12, %bb1, %bb
+ %tmp17 = phi i32 [ 2, %bb1 ], [ %tmp15, %bb12 ], [ 0, %bb ]
+ ret i32 %tmp17
+}
+
+define i32 @bar_cold_outline_region(i32 %arg) local_unnamed_addr #0 {
+bb:
+ %tmp = icmp slt i32 %arg, 0
+ br i1 %tmp, label %bb1, label %bb16, !prof !2
+
+bb1: ; preds = %bb
+ %tmp2 = tail call i32 (...) @foo() #0
+ %tmp3 = tail call i32 (...) @foo() #0
+ %tmp4 = tail call i32 (...) @foo() #0
+ %tmp5 = tail call i32 (...) @foo() #0
+ %tmp6 = tail call i32 (...) @foo() #0
+ %tmp7 = tail call i32 (...) @foo() #0
+ %tmp8 = add nsw i32 %arg, 1
+ %tmp9 = tail call i32 @goo(i32 %tmp8) #0
+ %tmp10 = tail call i32 (...) @foo() #0
+ %tmp11 = icmp eq i32 %tmp10, 0
+ br i1 %tmp11, label %bb12, label %bb16
+
+bb12: ; preds = %bb1
+ %tmp13 = tail call i32 (...) @foo() #0
+ %tmp14 = icmp eq i32 %tmp13, 0
+ %tmp15 = select i1 %tmp14, i32 0, i32 3
+ br label %bb16
+
+bb16: ; preds = %bb12, %bb1, %bb
+ %tmp17 = phi i32 [ 2, %bb1 ], [ %tmp15, %bb12 ], [ 0, %bb ]
+ ret i32 %tmp17
+}
+
+; Function Attrs: nounwind
+declare i32 @foo(...) local_unnamed_addr #0
+
+; Function Attrs: nounwind
+declare i32 @goo(i32) local_unnamed_addr #0
+
+; Function Attrs: nounwind
+define i32 @dummy_caller(i32 %arg) local_unnamed_addr #0 {
+bb:
+; CHECK-LABEL: @dummy_caller
+; CHECK-NOT: br i1
+; CHECK-NOT: call{{.*}}bar_hot_outline_region.
+; NOCOST-LABEL: @dummy_caller
+; NOCOST: br i1
+; NOCOST: call{{.*}}bar_hot_outline_region.
+
+ %tmp = tail call i32 @bar_hot_outline_region(i32 %arg)
+ ret i32 %tmp
+}
+
+define i32 @dummy_caller2(i32 %arg) local_unnamed_addr #0 {
+bb:
+; CHECK-LABEL: @dummy_caller2
+; CHECK: br i1
+; CHECK: call{{.*}}bar_cold_outline_region.
+; NOCOST-LABEL: @dummy_caller2
+; NOCOST: br i1
+; NOCOST: call{{.*}}bar_cold_outline_region.
+
+ %tmp = tail call i32 @bar_cold_outline_region(i32 %arg)
+ ret i32 %tmp
+}
+
+attributes #0 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 5.0.0 (trunk 301898)"}
+!1 = !{!"branch_weights", i32 2000, i32 1}
+!2 = !{!"branch_weights", i32 1, i32 100}
diff --git a/test/Transforms/CodeExtractor/PartialInlineOr.ll b/test/Transforms/CodeExtractor/PartialInlineOr.ll
index 5408b4faaf70..758945c7ade5 100644
--- a/test/Transforms/CodeExtractor/PartialInlineOr.ll
+++ b/test/Transforms/CodeExtractor/PartialInlineOr.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -partial-inliner -S | FileCheck %s
-; RUN: opt < %s -passes=partial-inliner -S | FileCheck %s
+; RUN: opt < %s -partial-inliner -skip-partial-inlining-cost-analysis -S | FileCheck %s
+; RUN: opt < %s -passes=partial-inliner -skip-partial-inlining-cost-analysis -S | FileCheck %s
; RUN: opt < %s -partial-inliner -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT %s
; RUN: opt < %s -passes=partial-inliner -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT %s
diff --git a/test/Transforms/CodeExtractor/PartialInlineOrAnd.ll b/test/Transforms/CodeExtractor/PartialInlineOrAnd.ll
index 282d300fadb9..fb6d1c335361 100644
--- a/test/Transforms/CodeExtractor/PartialInlineOrAnd.ll
+++ b/test/Transforms/CodeExtractor/PartialInlineOrAnd.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -partial-inliner -S | FileCheck %s
; RUN: opt < %s -passes=partial-inliner -S | FileCheck %s
-; RUN: opt < %s -partial-inliner -max-num-inline-blocks=3 -S | FileCheck --check-prefix=LIMIT3 %s
-; RUN: opt < %s -passes=partial-inliner -max-num-inline-blocks=3 -S | FileCheck --check-prefix=LIMIT3 %s
+; RUN: opt < %s -partial-inliner -max-num-inline-blocks=3 -skip-partial-inlining-cost-analysis -S | FileCheck --check-prefix=LIMIT3 %s
+; RUN: opt < %s -passes=partial-inliner -max-num-inline-blocks=3 -skip-partial-inlining-cost-analysis -S | FileCheck --check-prefix=LIMIT3 %s
; RUN: opt < %s -partial-inliner -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT2 %s
; RUN: opt < %s -passes=partial-inliner -max-num-inline-blocks=2 -S | FileCheck --check-prefix=LIMIT2 %s
diff --git a/test/Transforms/CodeExtractor/SingleCondition.ll b/test/Transforms/CodeExtractor/SingleCondition.ll
index 90cda889a21b..4110cd95b7ee 100644
--- a/test/Transforms/CodeExtractor/SingleCondition.ll
+++ b/test/Transforms/CodeExtractor/SingleCondition.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -partial-inliner -S | FileCheck %s
-; RUN: opt < %s -passes=partial-inliner -S | FileCheck %s
+; RUN: opt < %s -skip-partial-inlining-cost-analysis -partial-inliner -S | FileCheck %s
+; RUN: opt < %s -skip-partial-inlining-cost-analysis -passes=partial-inliner -S | FileCheck %s
define internal i32 @inlinedFunc(i1 %cond, i32* align 4 %align.val) {
entry:
diff --git a/test/Transforms/CodeExtractor/X86/InheritTargetAttributes.ll b/test/Transforms/CodeExtractor/X86/InheritTargetAttributes.ll
index 41d883c8c378..0f8a71907d85 100644
--- a/test/Transforms/CodeExtractor/X86/InheritTargetAttributes.ll
+++ b/test/Transforms/CodeExtractor/X86/InheritTargetAttributes.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -partial-inliner | llc -filetype=null
-; RUN: opt < %s -partial-inliner -S | FileCheck %s
+; RUN: opt < %s -partial-inliner -skip-partial-inlining-cost-analysis | llc -filetype=null
+; RUN: opt < %s -partial-inliner -skip-partial-inlining-cost-analysis -S | FileCheck %s
; This testcase checks to see if CodeExtractor properly inherits
; target specific attributes for the extracted function. This can
; cause certain instructions that depend on the attributes to not
diff --git a/test/Transforms/CodeGenPrepare/section-samplepgo.ll b/test/Transforms/CodeGenPrepare/section-samplepgo.ll
new file mode 100644
index 000000000000..93d2a5f2542c
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/section-samplepgo.ll
@@ -0,0 +1,57 @@
+; RUN: opt < %s -codegenprepare -S | FileCheck %s
+
+target triple = "x86_64-pc-linux-gnu"
+
+; This tests that hot/cold functions get correct section prefix assigned
+
+; CHECK: hot_func{{.*}}!section_prefix ![[HOT_ID:[0-9]+]]
+; The entry is hot
+define void @hot_func() !prof !15 {
+ ret void
+}
+
+; CHECK: hot_call_func{{.*}}!section_prefix ![[HOT_ID]]
+; The sum of 2 callsites are hot
+define void @hot_call_func() !prof !16 {
+ call void @hot_func(), !prof !17
+ call void @hot_func(), !prof !17
+ ret void
+}
+
+; CHECK-NOT: normal_func{{.*}}!section_prefix
+; The sum of all callsites are neither hot or cold
+define void @normal_func() !prof !16 {
+ call void @hot_func(), !prof !17
+ call void @hot_func(), !prof !18
+ call void @hot_func(), !prof !18
+ ret void
+}
+
+; CHECK: cold_func{{.*}}!section_prefix ![[COLD_ID:[0-9]+]]
+; The entry and the callsite are both cold
+define void @cold_func() !prof !16 {
+ call void @hot_func(), !prof !18
+ ret void
+}
+
+; CHECK: ![[HOT_ID]] = !{!"function_section_prefix", !".hot"}
+; CHECK: ![[COLD_ID]] = !{!"function_section_prefix", !".unlikely"}
+!llvm.module.flags = !{!1}
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"SampleProfile"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 1000}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+!15 = !{!"function_entry_count", i64 1000}
+!16 = !{!"function_entry_count", i64 1}
+!17 = !{!"branch_weights", i32 80}
+!18 = !{!"branch_weights", i32 1}
diff --git a/test/Transforms/CodeGenPrepare/section.ll b/test/Transforms/CodeGenPrepare/section.ll
index 2c96612e1baf..4f3144e7fc73 100644
--- a/test/Transforms/CodeGenPrepare/section.ll
+++ b/test/Transforms/CodeGenPrepare/section.ll
@@ -10,32 +10,32 @@ define void @hot_func() !prof !15 {
ret void
}
-; CHECK: hot_call_func{{.*}}!section_prefix ![[HOT_ID]]
-; The sum of 2 callsites are hot
-define void @hot_call_func() !prof !16 {
+; For instrumentation based PGO, we should only look at entry counts,
+; not call site VP metadata (which can exist on value profiled memcpy,
+; or possibly left behind after static analysis based devirtualization).
+; CHECK: cold_func1{{.*}}!section_prefix ![[COLD_ID:[0-9]+]]
+define void @cold_func1() !prof !16 {
call void @hot_func(), !prof !17
call void @hot_func(), !prof !17
ret void
}
-; CHECK-NOT: normal_func{{.*}}!section_prefix
-; The sum of all callsites are neither hot or cold
-define void @normal_func() !prof !16 {
+; CHECK: cold_func2{{.*}}!section_prefix
+define void @cold_func2() !prof !16 {
call void @hot_func(), !prof !17
call void @hot_func(), !prof !18
call void @hot_func(), !prof !18
ret void
}
-; CHECK: cold_func{{.*}}!section_prefix ![[COLD_ID:[0-9]+]]
-; The entry and the callsite are both cold
-define void @cold_func() !prof !16 {
+; CHECK: cold_func3{{.*}}!section_prefix ![[COLD_ID]]
+define void @cold_func3() !prof !16 {
call void @hot_func(), !prof !18
ret void
}
; CHECK: ![[HOT_ID]] = !{!"function_section_prefix", !".hot"}
-; CHECK: ![[COLD_ID]] = !{!"function_section_prefix", !".cold"}
+; CHECK: ![[COLD_ID]] = !{!"function_section_prefix", !".unlikely"}
!llvm.module.flags = !{!1}
!1 = !{i32 1, !"ProfileSummary", !2}
!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
diff --git a/test/Transforms/ConstProp/calls-math-finite.ll b/test/Transforms/ConstProp/calls-math-finite.ll
new file mode 100644
index 000000000000..00041f3e4a4b
--- /dev/null
+++ b/test/Transforms/ConstProp/calls-math-finite.ll
@@ -0,0 +1,83 @@
+; RUN: opt < %s -constprop -S | FileCheck %s
+
+; Test to verify constant folding can occur when math
+; routines are mapped to the __<func>_finite versions
+; of functions due to __FINITE_MATH_ONLY__ being
+; enabled on headers. All calls should constant
+; fold away in this test.
+
+declare double @__acos_finite(double) #0
+declare float @__acosf_finite(float) #0
+declare double @__asin_finite(double) #0
+declare float @__asinf_finite(float) #0
+declare double @__atan2_finite(double, double) #0
+declare float @__atan2f_finite(float, float) #0
+declare double @__cosh_finite(double) #0
+declare float @__coshf_finite(float) #0
+declare double @__exp2_finite(double) #0
+declare float @__exp2f_finite(float) #0
+declare double @__exp_finite(double) #0
+declare float @__expf_finite(float) #0
+declare double @__log10_finite(double) #0
+declare float @__log10f_finite(float) #0
+declare double @__log_finite(double) #0
+declare float @__logf_finite(float) #0
+declare double @__pow_finite(double, double) #0
+declare float @__powf_finite(float, float) #0
+declare double @__sinh_finite(double) #0
+declare float @__sinhf_finite(float) #0
+
+attributes #0 = { nounwind readnone }
+
+define void @T() {
+; CHECK-LABEL: @T(
+
+; CHECK-NOT: call
+; CHECK: ret
+
+ %slot = alloca double
+ %slotf = alloca float
+
+ %ACOS = call fast double @__acos_finite(double 1.000000e+00)
+ store double %ACOS, double* %slot
+ %ASIN = call fast double @__asin_finite(double 1.000000e+00)
+ store double %ASIN, double* %slot
+ %ATAN2 = call fast double @__atan2_finite(double 3.000000e+00, double 4.000000e+00)
+ store double %ATAN2, double* %slot
+ %COSH = call fast double @__cosh_finite(double 3.000000e+00)
+ store double %COSH, double* %slot
+ %EXP = call fast double @__exp_finite(double 3.000000e+00)
+ store double %EXP, double* %slot
+ %EXP2 = call fast double @__exp2_finite(double 3.000000e+00)
+ store double %EXP2, double* %slot
+ %LOG = call fast double @__log_finite(double 3.000000e+00)
+ store double %LOG, double* %slot
+ %LOG10 = call fast double @__log10_finite(double 3.000000e+00)
+ store double %LOG10, double* %slot
+ %POW = call fast double @__pow_finite(double 1.000000e+00, double 4.000000e+00)
+ store double %POW, double* %slot
+ %SINH = call fast double @__sinh_finite(double 3.000000e+00)
+ store double %SINH, double* %slot
+
+ %ACOSF = call fast float @__acosf_finite(float 1.000000e+00)
+ store float %ACOSF, float* %slotf
+ %ASINF = call fast float @__asinf_finite(float 1.000000e+00)
+ store float %ASINF, float* %slotf
+ %ATAN2F = call fast float @__atan2f_finite(float 3.000000e+00, float 4.000000e+00)
+ store float %ATAN2F, float* %slotf
+ %COSHF = call fast float @__coshf_finite(float 3.000000e+00)
+ store float %COSHF, float* %slotf
+ %EXPF = call fast float @__expf_finite(float 3.000000e+00)
+ store float %EXPF, float* %slotf
+ %EXP2F = call fast float @__exp2f_finite(float 3.000000e+00)
+ store float %EXP2F, float* %slotf
+ %LOGF = call fast float @__logf_finite(float 3.000000e+00)
+ store float %LOGF, float* %slotf
+ %LOG10F = call fast float @__log10f_finite(float 3.000000e+00)
+ store float %LOG10F, float* %slotf
+ %POWF = call fast float @__powf_finite(float 3.000000e+00, float 4.000000e+00)
+ store float %POWF, float* %slotf
+ %SINHF = call fast float @__sinhf_finite(float 3.000000e+00)
+ store float %SINHF, float* %slotf
+ ret void
+}
diff --git a/test/Transforms/ConstProp/calls.ll b/test/Transforms/ConstProp/calls.ll
index 1175ea522175..161637cc92b8 100644
--- a/test/Transforms/ConstProp/calls.ll
+++ b/test/Transforms/ConstProp/calls.ll
@@ -184,212 +184,6 @@ define double @T() {
ret double %d
}
-define i1 @test_sse_cvts_exact() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvts_exact(
-; CHECK-NOT: call
-; CHECK: ret i1 true
-entry:
- %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> <double 7.0, double undef>) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> <double 7.0, double undef>) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %cmp02 = icmp eq i32 %sum02, 10
- %cmp13 = icmp eq i64 %sum13, 10
- %b = and i1 %cmp02, %cmp13
- ret i1 %b
-}
-
-; Inexact values should not fold as they are dependent on rounding mode
-define i1 @test_sse_cvts_inexact() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvts_inexact(
-; CHECK: call
-; CHECK: call
-; CHECK: call
-; CHECK: call
-entry:
- %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> <double 1.75, double undef>) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> <double 1.75, double undef>) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %cmp02 = icmp eq i32 %sum02, 4
- %cmp13 = icmp eq i64 %sum13, 4
- %b = and i1 %cmp02, %cmp13
- ret i1 %b
-}
-
-; FLT_MAX/DBL_MAX should not fold
-define i1 @test_sse_cvts_max() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvts_max(
-; CHECK: call
-; CHECK: call
-; CHECK: call
-; CHECK: call
-entry:
- %fm = bitcast <4 x i32> <i32 2139095039, i32 undef, i32 undef, i32 undef> to <4 x float>
- %dm = bitcast <2 x i64> <i64 9218868437227405311, i64 undef> to <2 x double>
- %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %sum02.sext = sext i32 %sum02 to i64
- %b = icmp eq i64 %sum02.sext, %sum13
- ret i1 %b
-}
-
-; INF should not fold
-define i1 @test_sse_cvts_inf() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvts_inf(
-; CHECK: call
-; CHECK: call
-; CHECK: call
-; CHECK: call
-entry:
- %fm = bitcast <4 x i32> <i32 2139095040, i32 undef, i32 undef, i32 undef> to <4 x float>
- %dm = bitcast <2 x i64> <i64 9218868437227405312, i64 undef> to <2 x double>
- %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %sum02.sext = sext i32 %sum02 to i64
- %b = icmp eq i64 %sum02.sext, %sum13
- ret i1 %b
-}
-
-; NAN should not fold
-define i1 @test_sse_cvts_nan() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvts_nan(
-; CHECK: call
-; CHECK: call
-; CHECK: call
-; CHECK: call
-entry:
- %fm = bitcast <4 x i32> <i32 2143289344, i32 undef, i32 undef, i32 undef> to <4 x float>
- %dm = bitcast <2 x i64> <i64 9221120237041090560, i64 undef> to <2 x double>
- %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %sum02.sext = sext i32 %sum02 to i64
- %b = icmp eq i64 %sum02.sext, %sum13
- ret i1 %b
-}
-
-define i1 @test_sse_cvtts_exact() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvtts_exact(
-; CHECK-NOT: call
-; CHECK: ret i1 true
-entry:
- %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> <double 7.0, double undef>) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> <double 7.0, double undef>) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %cmp02 = icmp eq i32 %sum02, 10
- %cmp13 = icmp eq i64 %sum13, 10
- %b = and i1 %cmp02, %cmp13
- ret i1 %b
-}
-
-define i1 @test_sse_cvtts_inexact() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvtts_inexact(
-; CHECK-NOT: call
-; CHECK: ret i1 true
-entry:
- %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> <double 1.75, double undef>) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> <double 1.75, double undef>) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %cmp02 = icmp eq i32 %sum02, 2
- %cmp13 = icmp eq i64 %sum13, 2
- %b = and i1 %cmp02, %cmp13
- ret i1 %b
-}
-
-; FLT_MAX/DBL_MAX should not fold
-define i1 @test_sse_cvtts_max() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvtts_max(
-; CHECK: call
-; CHECK: call
-; CHECK: call
-; CHECK: call
-entry:
- %fm = bitcast <4 x i32> <i32 2139095039, i32 undef, i32 undef, i32 undef> to <4 x float>
- %dm = bitcast <2 x i64> <i64 9218868437227405311, i64 undef> to <2 x double>
- %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %sum02.sext = sext i32 %sum02 to i64
- %b = icmp eq i64 %sum02.sext, %sum13
- ret i1 %b
-}
-
-; INF should not fold
-define i1 @test_sse_cvtts_inf() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvtts_inf(
-; CHECK: call
-; CHECK: call
-; CHECK: call
-; CHECK: call
-entry:
- %fm = bitcast <4 x i32> <i32 2139095040, i32 undef, i32 undef, i32 undef> to <4 x float>
- %dm = bitcast <2 x i64> <i64 9218868437227405312, i64 undef> to <2 x double>
- %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %sum02.sext = sext i32 %sum02 to i64
- %b = icmp eq i64 %sum02.sext, %sum13
- ret i1 %b
-}
-
-; NAN should not fold
-define i1 @test_sse_cvtts_nan() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvtts_nan(
-; CHECK: call
-; CHECK: call
-; CHECK: call
-; CHECK: call
-entry:
- %fm = bitcast <4 x i32> <i32 2143289344, i32 undef, i32 undef, i32 undef> to <4 x float>
- %dm = bitcast <2 x i64> <i64 9221120237041090560, i64 undef> to <2 x double>
- %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
- %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
- %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
- %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
- %sum02 = add i32 %i0, %i2
- %sum13 = add i64 %i1, %i3
- %sum02.sext = sext i32 %sum02 to i64
- %b = icmp eq i64 %sum02.sext, %sum13
- ret i1 %b
-}
-
-declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
-declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
-declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
-declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
-declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
-declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
-declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
-declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
-
define double @test_intrinsic_pow() nounwind uwtable ssp {
entry:
; CHECK-LABEL: @test_intrinsic_pow(
diff --git a/test/Transforms/ConstProp/sse.ll b/test/Transforms/ConstProp/sse.ll
new file mode 100644
index 000000000000..cc37c96c1ff1
--- /dev/null
+++ b/test/Transforms/ConstProp/sse.ll
@@ -0,0 +1,208 @@
+; RUN: opt < %s -constprop -S | FileCheck %s
+; REQUIRES: x86
+
+define i1 @test_sse_cvts_exact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_exact(
+; CHECK-NOT: call
+; CHECK: ret i1 true
+entry:
+ %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> <double 7.0, double undef>) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> <double 7.0, double undef>) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %cmp02 = icmp eq i32 %sum02, 10
+ %cmp13 = icmp eq i64 %sum13, 10
+ %b = and i1 %cmp02, %cmp13
+ ret i1 %b
+}
+
+; Inexact values should not fold as they are dependent on rounding mode
+define i1 @test_sse_cvts_inexact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_inexact(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+ %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> <double 1.75, double undef>) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> <double 1.75, double undef>) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %cmp02 = icmp eq i32 %sum02, 4
+ %cmp13 = icmp eq i64 %sum13, 4
+ %b = and i1 %cmp02, %cmp13
+ ret i1 %b
+}
+
+; FLT_MAX/DBL_MAX should not fold
+define i1 @test_sse_cvts_max() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_max(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+ %fm = bitcast <4 x i32> <i32 2139095039, i32 undef, i32 undef, i32 undef> to <4 x float>
+ %dm = bitcast <2 x i64> <i64 9218868437227405311, i64 undef> to <2 x double>
+ %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %sum02.sext = sext i32 %sum02 to i64
+ %b = icmp eq i64 %sum02.sext, %sum13
+ ret i1 %b
+}
+
+; INF should not fold
+define i1 @test_sse_cvts_inf() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_inf(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+ %fm = bitcast <4 x i32> <i32 2139095040, i32 undef, i32 undef, i32 undef> to <4 x float>
+ %dm = bitcast <2 x i64> <i64 9218868437227405312, i64 undef> to <2 x double>
+ %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %sum02.sext = sext i32 %sum02 to i64
+ %b = icmp eq i64 %sum02.sext, %sum13
+ ret i1 %b
+}
+
+; NAN should not fold
+define i1 @test_sse_cvts_nan() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_nan(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+ %fm = bitcast <4 x i32> <i32 2143289344, i32 undef, i32 undef, i32 undef> to <4 x float>
+ %dm = bitcast <2 x i64> <i64 9221120237041090560, i64 undef> to <2 x double>
+ %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %sum02.sext = sext i32 %sum02 to i64
+ %b = icmp eq i64 %sum02.sext, %sum13
+ ret i1 %b
+}
+
+define i1 @test_sse_cvtts_exact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_exact(
+; CHECK-NOT: call
+; CHECK: ret i1 true
+entry:
+ %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> <double 7.0, double undef>) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> <double 7.0, double undef>) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %cmp02 = icmp eq i32 %sum02, 10
+ %cmp13 = icmp eq i64 %sum13, 10
+ %b = and i1 %cmp02, %cmp13
+ ret i1 %b
+}
+
+define i1 @test_sse_cvtts_inexact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_inexact(
+; CHECK-NOT: call
+; CHECK: ret i1 true
+entry:
+ %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> <double 1.75, double undef>) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> <double 1.75, double undef>) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %cmp02 = icmp eq i32 %sum02, 2
+ %cmp13 = icmp eq i64 %sum13, 2
+ %b = and i1 %cmp02, %cmp13
+ ret i1 %b
+}
+
+; FLT_MAX/DBL_MAX should not fold
+define i1 @test_sse_cvtts_max() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_max(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+ %fm = bitcast <4 x i32> <i32 2139095039, i32 undef, i32 undef, i32 undef> to <4 x float>
+ %dm = bitcast <2 x i64> <i64 9218868437227405311, i64 undef> to <2 x double>
+ %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %sum02.sext = sext i32 %sum02 to i64
+ %b = icmp eq i64 %sum02.sext, %sum13
+ ret i1 %b
+}
+
+; INF should not fold
+define i1 @test_sse_cvtts_inf() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_inf(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+ %fm = bitcast <4 x i32> <i32 2139095040, i32 undef, i32 undef, i32 undef> to <4 x float>
+ %dm = bitcast <2 x i64> <i64 9218868437227405312, i64 undef> to <2 x double>
+ %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %sum02.sext = sext i32 %sum02 to i64
+ %b = icmp eq i64 %sum02.sext, %sum13
+ ret i1 %b
+}
+
+; NAN should not fold
+define i1 @test_sse_cvtts_nan() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_nan(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+ %fm = bitcast <4 x i32> <i32 2143289344, i32 undef, i32 undef, i32 undef> to <4 x float>
+ %dm = bitcast <2 x i64> <i64 9221120237041090560, i64 undef> to <2 x double>
+ %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
+ %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
+ %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
+ %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
+ %sum02 = add i32 %i0, %i2
+ %sum13 = add i64 %i1, %i3
+ %sum02.sext = sext i32 %sum02 to i64
+ %b = icmp eq i64 %sum02.sext, %sum13
+ ret i1 %b
+}
+
+declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
+declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
+declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
+declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
diff --git a/test/Transforms/Coroutines/coro-eh-aware-edge-split.ll b/test/Transforms/Coroutines/coro-eh-aware-edge-split.ll
new file mode 100644
index 000000000000..5da0e3c199db
--- /dev/null
+++ b/test/Transforms/Coroutines/coro-eh-aware-edge-split.ll
@@ -0,0 +1,218 @@
+; Check that we can handle edge splits leading into a landingpad
+; RUN: opt < %s -coro-split -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: define internal fastcc void @f.resume(
+define void @f(i1 %cond) "coroutine.presplit"="1" personality i32 0 {
+entry:
+ %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
+ %size = tail call i64 @llvm.coro.size.i64()
+ %alloc = call i8* @malloc(i64 %size)
+ %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
+ %sp = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %sp, label %coro.ret [
+ i8 0, label %resume
+ i8 1, label %cleanup
+ ]
+
+resume:
+ br i1 %cond, label %invoke1, label %invoke2
+
+invoke1:
+ invoke void @may_throw1()
+ to label %unreach unwind label %pad.with.phi
+invoke2:
+ invoke void @may_throw2()
+ to label %unreach unwind label %pad.with.phi
+
+; Verify that we cloned landing pad on every edge and inserted a reload of the spilled value
+
+; CHECK: pad.with.phi.from.invoke2:
+; CHECK: %0 = landingpad { i8*, i32 }
+; CHECK: catch i8* null
+; CHECK: br label %pad.with.phi
+
+; CHECK: pad.with.phi.from.invoke1:
+; CHECK: %1 = landingpad { i8*, i32 }
+; CHECK: catch i8* null
+; CHECK: br label %pad.with.phi
+
+; CHECK: pad.with.phi:
+; CHECK: %val = phi i32 [ 0, %pad.with.phi.from.invoke1 ], [ 1, %pad.with.phi.from.invoke2 ]
+; CHECK: %lp = phi { i8*, i32 } [ %0, %pad.with.phi.from.invoke2 ], [ %1, %pad.with.phi.from.invoke1 ]
+; CHECK: %exn = extractvalue { i8*, i32 } %lp, 0
+; CHECK: call i8* @__cxa_begin_catch(i8* %exn)
+; CHECK: call void @use_val(i32 %val)
+; CHECK: call void @__cxa_end_catch()
+; CHECK: call void @free(i8* %vFrame)
+; CHECK: ret void
+
+pad.with.phi:
+ %val = phi i32 [ 0, %invoke1 ], [ 1, %invoke2 ]
+ %lp = landingpad { i8*, i32 }
+ catch i8* null
+ %exn = extractvalue { i8*, i32 } %lp, 0
+ call i8* @__cxa_begin_catch(i8* %exn)
+ call void @use_val(i32 %val)
+ call void @__cxa_end_catch()
+ br label %cleanup
+
+cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
+ %mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
+ call void @free(i8* %mem)
+ br label %coro.ret
+
+coro.ret:
+ call i1 @llvm.coro.end(i8* null, i1 false)
+ ret void
+
+unreach:
+ unreachable
+}
+
+; CHECK-LABEL: define internal fastcc void @g.resume(
+define void @g(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 {
+entry:
+ %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
+ %size = tail call i64 @llvm.coro.size.i64()
+ %alloc = call i8* @malloc(i64 %size)
+ %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
+ %sp = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %sp, label %coro.ret [
+ i8 0, label %resume
+ i8 1, label %cleanup
+ ]
+
+resume:
+ br i1 %cond, label %invoke1, label %invoke2
+
+invoke1:
+ invoke void @may_throw1()
+ to label %unreach unwind label %pad.with.phi
+invoke2:
+ invoke void @may_throw2()
+ to label %unreach unwind label %pad.with.phi
+
+; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value
+
+; CHECK: pad.with.phi.from.invoke2:
+; CHECK: %0 = cleanuppad within none []
+; CHECK: %y.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 6
+; CHECK: %y.reload = load i32, i32* %y.reload.addr
+; CHECK: cleanupret from %0 unwind label %pad.with.phi
+
+; CHECK: pad.with.phi.from.invoke1:
+; CHECK: %1 = cleanuppad within none []
+; CHECK: %x.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 5
+; CHECK: %x.reload = load i32, i32* %x.reload.addr
+; CHECK: cleanupret from %1 unwind label %pad.with.phi
+
+; CHECK: pad.with.phi:
+; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ]
+; CHECK: %tok = cleanuppad within none []
+; CHECK: call void @use_val(i32 %val)
+; CHECK: cleanupret from %tok unwind to caller
+
+pad.with.phi:
+ %val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ]
+ %tok = cleanuppad within none []
+ call void @use_val(i32 %val)
+ cleanupret from %tok unwind to caller
+
+cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
+ %mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
+ call void @free(i8* %mem)
+ br label %coro.ret
+
+coro.ret:
+ call i1 @llvm.coro.end(i8* null, i1 false)
+ ret void
+
+unreach:
+ unreachable
+}
+
+; CHECK-LABEL: define internal fastcc void @h.resume(
+define void @h(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 {
+entry:
+ %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
+ %size = tail call i64 @llvm.coro.size.i64()
+ %alloc = call i8* @malloc(i64 %size)
+ %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
+ %sp = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %sp, label %coro.ret [
+ i8 0, label %resume
+ i8 1, label %cleanup
+ ]
+
+resume:
+ br i1 %cond, label %invoke1, label %invoke2
+
+invoke1:
+ invoke void @may_throw1()
+ to label %coro.ret unwind label %pad.with.phi
+invoke2:
+ invoke void @may_throw2()
+ to label %coro.ret unwind label %pad.with.phi
+
+; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value
+
+; CHECK: pad.with.phi.from.invoke2:
+; CHECK: %0 = cleanuppad within none []
+; CHECK: %y.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 6
+; CHECK: %y.reload = load i32, i32* %y.reload.addr
+; CHECK: cleanupret from %0 unwind label %pad.with.phi
+
+; CHECK: pad.with.phi.from.invoke1:
+; CHECK: %1 = cleanuppad within none []
+; CHECK: %x.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 5
+; CHECK: %x.reload = load i32, i32* %x.reload.addr
+; CHECK: cleanupret from %1 unwind label %pad.with.phi
+
+; CHECK: pad.with.phi:
+; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ]
+; CHECK: %switch = catchswitch within none [label %catch] unwind to caller
+pad.with.phi:
+ %val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ]
+ %switch = catchswitch within none [label %catch] unwind to caller
+
+catch: ; preds = %catch.dispatch
+ %pad = catchpad within %switch [i8* null, i32 64, i8* null]
+ call void @use_val(i32 %val)
+ catchret from %pad to label %coro.ret
+
+cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
+ %mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
+ call void @free(i8* %mem)
+ br label %coro.ret
+
+coro.ret:
+ call i1 @llvm.coro.end(i8* null, i1 false)
+ ret void
+}
+
+; Function Attrs: argmemonly nounwind readonly
+declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
+declare noalias i8* @malloc(i64)
+declare i64 @llvm.coro.size.i64()
+declare i8* @llvm.coro.begin(token, i8* writeonly)
+
+; Function Attrs: nounwind
+declare token @llvm.coro.save(i8*)
+declare i8 @llvm.coro.suspend(token, i1)
+
+; Function Attrs: argmemonly nounwind
+declare void @may_throw1()
+declare void @may_throw2()
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @use_val(i32)
+declare void @__cxa_end_catch()
+
+; Function Attrs: nounwind
+declare i1 @llvm.coro.end(i8*, i1)
+declare void @free(i8*)
+declare i8* @llvm.coro.free(token, i8* nocapture readonly)
diff --git a/test/Transforms/GVN/PRE/2011-06-01-NonLocalMemdepMiscompile.ll b/test/Transforms/GVN/PRE/2011-06-01-NonLocalMemdepMiscompile.ll
index 0769575759ba..05dc79db95ad 100644
--- a/test/Transforms/GVN/PRE/2011-06-01-NonLocalMemdepMiscompile.ll
+++ b/test/Transforms/GVN/PRE/2011-06-01-NonLocalMemdepMiscompile.ll
@@ -5,8 +5,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-macosx10.7.0"
-
-define i1 @rb_intern() nounwind ssp {
+define i1 @rb_intern(i8 *%foo) nounwind ssp {
; CHECK-LABEL: @rb_intern(
bb:
@@ -19,7 +18,7 @@ bb1:
br i1 undef, label %bb3, label %bb15
; CHECK: bb1:
-; CHECK: [[TMP:%.*]] = phi i8* [ getelementptr (i8, i8* null, i64 undef), %bb10 ], [ null, %bb ]
+; CHECK: [[TMP:%.*]] = phi i8* [ %tmp14, %bb10 ], [ null, %bb ]
; CHECK: bb1.bb15_crit_edge:
; CHECK: %tmp17.pre = load i8, i8* [[TMP]], align 1
@@ -41,7 +40,7 @@ bb10:
%tmp11 = load i8*, i8** %tmp, align 8
%tmp12 = load i8, i8* %tmp11, align 1
%tmp13 = zext i8 %tmp12 to i64
- %tmp14 = getelementptr inbounds i8, i8* null, i64 undef
+ %tmp14 = getelementptr inbounds i8, i8* %foo, i64 undef
store i8* %tmp14, i8** %tmp, align 8
br label %bb1
diff --git a/test/Transforms/GVN/PRE/nonintegral.ll b/test/Transforms/GVN/PRE/nonintegral.ll
new file mode 100644
index 000000000000..75a756e8af8c
--- /dev/null
+++ b/test/Transforms/GVN/PRE/nonintegral.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -gvn -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @nipre(double addrspace(4)** noalias %p, i64 addrspace(4)** noalias %p2, i8 %jmp) {
+
+; CHECK-LABEL: @nipre(
+; CHECK: [[PCAST:%.*]] = bitcast double addrspace(4)** [[P:%.*]] to i64 addrspace(4)**
+; CHECK: a:
+; CHECK: [[L1:%.*]] = load i64 addrspace(4)*, i64 addrspace(4)** [[PCAST]]
+; CHECK: [[TMP0:%.*]] = bitcast i64 addrspace(4)* [[L1]] to double addrspace(4)*
+; CHECK: b:
+; CHECK: [[L2:%.*]] = load i64 addrspace(4)*, i64 addrspace(4)** [[PCAST]]
+; CHECK: [[TMP1:%.*]] = bitcast i64 addrspace(4)* [[L2]] to double addrspace(4)*
+; CHECK: c:
+; CHECK-NEXT: [[L3_PRE:%.*]] = load double addrspace(4)*, double addrspace(4)** %p
+
+entry:
+ %pcast = bitcast double addrspace(4)** %p to i64 addrspace(4)**
+ switch i8 %jmp, label %c [ i8 0, label %a
+ i8 1, label %b]
+a:
+ %l1 = load i64 addrspace(4)*, i64 addrspace(4)** %pcast
+ store i64 addrspace(4)* %l1, i64 addrspace(4)** %p2
+ br label %tail
+b:
+ %l2 = load i64 addrspace(4)*, i64 addrspace(4)** %pcast
+ store i64 addrspace(4)* %l2, i64 addrspace(4)** %p2
+ br label %tail
+c:
+ br label %tail
+tail:
+ %l3 = load double addrspace(4)*, double addrspace(4)** %p
+ %l3cast = bitcast double addrspace(4)* %l3 to i64 addrspace(4)*
+ store i64 addrspace(4)* %l3cast, i64 addrspace(4)** %p2
+ ret void
+}
diff --git a/test/Transforms/IndVarSimplify/2011-10-27-lftrnull.ll b/test/Transforms/IndVarSimplify/2011-10-27-lftrnull.ll
index 3d77a364f96f..49e5d24296c0 100644
--- a/test/Transforms/IndVarSimplify/2011-10-27-lftrnull.ll
+++ b/test/Transforms/IndVarSimplify/2011-10-27-lftrnull.ll
@@ -6,7 +6,7 @@ target triple = "thumbv7-apple-darwin"
; CHECK-LABEL: @test(
; CHECK: if.end.i126:
-; CHECK: %exitcond = icmp ne i8* %incdec.ptr.i, getelementptr (i8, i8* null, i32 undef)
+; CHECK: %exitcond = icmp ne i8* %incdec.ptr.i, null
define void @test() nounwind {
entry:
br label %while.cond
diff --git a/test/Transforms/InferFunctionAttrs/annotate.ll b/test/Transforms/InferFunctionAttrs/annotate.ll
index 64676bf310bd..cb4b5cdd1e8c 100644
--- a/test/Transforms/InferFunctionAttrs/annotate.ll
+++ b/test/Transforms/InferFunctionAttrs/annotate.ll
@@ -22,12 +22,138 @@ declare i32 @__nvvm_reflect(i8*)
; Use an opaque pointer type for all the (possibly opaque) structs.
%opaque = type opaque
+; CHECK: declare double @__acos_finite(double)
+declare double @__acos_finite(double)
+
+; CHECK: declare float @__acosf_finite(float)
+declare float @__acosf_finite(float)
+
+; CHECK: declare double @__acosh_finite(double)
+declare double @__acosh_finite(double)
+
+; CHECK: declare float @__acoshf_finite(float)
+declare float @__acoshf_finite(float)
+
+; CHECK: declare x86_fp80 @__acoshl_finite(x86_fp80)
+declare x86_fp80 @__acoshl_finite(x86_fp80)
+
+; CHECK: declare x86_fp80 @__acosl_finite(x86_fp80)
+declare x86_fp80 @__acosl_finite(x86_fp80)
+
+; CHECK: declare double @__asin_finite(double)
+declare double @__asin_finite(double)
+
+; CHECK: declare float @__asinf_finite(float)
+declare float @__asinf_finite(float)
+
+; CHECK: declare x86_fp80 @__asinl_finite(x86_fp80)
+declare x86_fp80 @__asinl_finite(x86_fp80)
+
+; CHECK: declare double @__atan2_finite(double, double)
+declare double @__atan2_finite(double, double)
+
+; CHECK: declare float @__atan2f_finite(float, float)
+declare float @__atan2f_finite(float, float)
+
+; CHECK: declare x86_fp80 @__atan2l_finite(x86_fp80, x86_fp80)
+declare x86_fp80 @__atan2l_finite(x86_fp80, x86_fp80)
+
+; CHECK: declare double @__atanh_finite(double)
+declare double @__atanh_finite(double)
+
+; CHECK: declare float @__atanhf_finite(float)
+declare float @__atanhf_finite(float)
+
+; CHECK: declare x86_fp80 @__atanhl_finite(x86_fp80)
+declare x86_fp80 @__atanhl_finite(x86_fp80)
+
+; CHECK: declare double @__cosh_finite(double)
+declare double @__cosh_finite(double)
+
+; CHECK: declare float @__coshf_finite(float)
+declare float @__coshf_finite(float)
+
+; CHECK: declare x86_fp80 @__coshl_finite(x86_fp80)
+declare x86_fp80 @__coshl_finite(x86_fp80)
+
; CHECK: declare double @__cospi(double)
declare double @__cospi(double)
; CHECK: declare float @__cospif(float)
declare float @__cospif(float)
+; CHECK: declare double @__exp10_finite(double)
+declare double @__exp10_finite(double)
+
+; CHECK: declare float @__exp10f_finite(float)
+declare float @__exp10f_finite(float)
+
+; CHECK: declare x86_fp80 @__exp10l_finite(x86_fp80)
+declare x86_fp80 @__exp10l_finite(x86_fp80)
+
+; CHECK: declare double @__exp2_finite(double)
+declare double @__exp2_finite(double)
+
+; CHECK: declare float @__exp2f_finite(float)
+declare float @__exp2f_finite(float)
+
+; CHECK: declare x86_fp80 @__exp2l_finite(x86_fp80)
+declare x86_fp80 @__exp2l_finite(x86_fp80)
+
+; CHECK: declare double @__exp_finite(double)
+declare double @__exp_finite(double)
+
+; CHECK: declare float @__expf_finite(float)
+declare float @__expf_finite(float)
+
+; CHECK: declare x86_fp80 @__expl_finite(x86_fp80)
+declare x86_fp80 @__expl_finite(x86_fp80)
+
+; CHECK: declare double @__log10_finite(double)
+declare double @__log10_finite(double)
+
+; CHECK: declare float @__log10f_finite(float)
+declare float @__log10f_finite(float)
+
+; CHECK: declare x86_fp80 @__log10l_finite(x86_fp80)
+declare x86_fp80 @__log10l_finite(x86_fp80)
+
+; CHECK: declare double @__log2_finite(double)
+declare double @__log2_finite(double)
+
+; CHECK: declare float @__log2f_finite(float)
+declare float @__log2f_finite(float)
+
+; CHECK: declare x86_fp80 @__log2l_finite(x86_fp80)
+declare x86_fp80 @__log2l_finite(x86_fp80)
+
+; CHECK: declare double @__log_finite(double)
+declare double @__log_finite(double)
+
+; CHECK: declare float @__logf_finite(float)
+declare float @__logf_finite(float)
+
+; CHECK: declare x86_fp80 @__logl_finite(x86_fp80)
+declare x86_fp80 @__logl_finite(x86_fp80)
+
+; CHECK: declare double @__pow_finite(double, double)
+declare double @__pow_finite(double, double)
+
+; CHECK: declare float @__powf_finite(float, float)
+declare float @__powf_finite(float, float)
+
+; CHECK: declare x86_fp80 @__powl_finite(x86_fp80, x86_fp80)
+declare x86_fp80 @__powl_finite(x86_fp80, x86_fp80)
+
+; CHECK: declare double @__sinh_finite(double)
+declare double @__sinh_finite(double)
+
+; CHECK: declare float @__sinhf_finite(float)
+declare float @__sinhf_finite(float)
+
+; CHECK: declare x86_fp80 @__sinhl_finite(x86_fp80)
+declare x86_fp80 @__sinhl_finite(x86_fp80)
+
; CHECK: declare double @__sinpi(double)
declare double @__sinpi(double)
diff --git a/test/Transforms/InferFunctionAttrs/no-proto.ll b/test/Transforms/InferFunctionAttrs/no-proto.ll
index 25a4805c367f..3cab0ab4bf40 100644
--- a/test/Transforms/InferFunctionAttrs/no-proto.ll
+++ b/test/Transforms/InferFunctionAttrs/no-proto.ll
@@ -3,12 +3,138 @@
; Check that we don't modify libc functions with invalid prototypes.
+; CHECK: declare void @__acos_finite(...)
+declare void @__acos_finite(...)
+
+; CHECK: declare void @__acosf_finite(...)
+declare void @__acosf_finite(...)
+
+; CHECK: declare void @__acosh_finite(...)
+declare void @__acosh_finite(...)
+
+; CHECK: declare void @__acoshf_finite(...)
+declare void @__acoshf_finite(...)
+
+; CHECK: declare void @__acoshl_finite(...)
+declare void @__acoshl_finite(...)
+
+; CHECK: declare void @__acosl_finite(...)
+declare void @__acosl_finite(...)
+
+; CHECK: declare void @__asin_finite(...)
+declare void @__asin_finite(...)
+
+; CHECK: declare void @__asinf_finite(...)
+declare void @__asinf_finite(...)
+
+; CHECK: declare void @__asinl_finite(...)
+declare void @__asinl_finite(...)
+
+; CHECK: declare void @__atan2_finite(...)
+declare void @__atan2_finite(...)
+
+; CHECK: declare void @__atan2f_finite(...)
+declare void @__atan2f_finite(...)
+
+; CHECK: declare void @__atan2l_finite(...)
+declare void @__atan2l_finite(...)
+
+; CHECK: declare void @__atanh_finite(...)
+declare void @__atanh_finite(...)
+
+; CHECK: declare void @__atanhf_finite(...)
+declare void @__atanhf_finite(...)
+
+; CHECK: declare void @__atanhl_finite(...)
+declare void @__atanhl_finite(...)
+
+; CHECK: declare void @__cosh_finite(...)
+declare void @__cosh_finite(...)
+
+; CHECK: declare void @__coshf_finite(...)
+declare void @__coshf_finite(...)
+
+; CHECK: declare void @__coshl_finite(...)
+declare void @__coshl_finite(...)
+
; CHECK: declare void @__cospi(...)
declare void @__cospi(...)
; CHECK: declare void @__cospif(...)
declare void @__cospif(...)
+; CHECK: declare void @__exp10_finite(...)
+declare void @__exp10_finite(...)
+
+; CHECK: declare void @__exp10f_finite(...)
+declare void @__exp10f_finite(...)
+
+; CHECK: declare void @__exp10l_finite(...)
+declare void @__exp10l_finite(...)
+
+; CHECK: declare void @__exp2_finite(...)
+declare void @__exp2_finite(...)
+
+; CHECK: declare void @__exp2f_finite(...)
+declare void @__exp2f_finite(...)
+
+; CHECK: declare void @__exp2l_finite(...)
+declare void @__exp2l_finite(...)
+
+; CHECK: declare void @__exp_finite(...)
+declare void @__exp_finite(...)
+
+; CHECK: declare void @__expf_finite(...)
+declare void @__expf_finite(...)
+
+; CHECK: declare void @__expl_finite(...)
+declare void @__expl_finite(...)
+
+; CHECK: declare void @__log10_finite(...)
+declare void @__log10_finite(...)
+
+; CHECK: declare void @__log10f_finite(...)
+declare void @__log10f_finite(...)
+
+; CHECK: declare void @__log10l_finite(...)
+declare void @__log10l_finite(...)
+
+; CHECK: declare void @__log2_finite(...)
+declare void @__log2_finite(...)
+
+; CHECK: declare void @__log2f_finite(...)
+declare void @__log2f_finite(...)
+
+; CHECK: declare void @__log2l_finite(...)
+declare void @__log2l_finite(...)
+
+; CHECK: declare void @__log_finite(...)
+declare void @__log_finite(...)
+
+; CHECK: declare void @__logf_finite(...)
+declare void @__logf_finite(...)
+
+; CHECK: declare void @__logl_finite(...)
+declare void @__logl_finite(...)
+
+; CHECK: declare void @__pow_finite(...)
+declare void @__pow_finite(...)
+
+; CHECK: declare void @__powf_finite(...)
+declare void @__powf_finite(...)
+
+; CHECK: declare void @__powl_finite(...)
+declare void @__powl_finite(...)
+
+; CHECK: declare void @__sinh_finite(...)
+declare void @__sinh_finite(...)
+
+; CHECK: declare void @__sinhf_finite(...)
+declare void @__sinhf_finite(...)
+
+; CHECK: declare void @__sinhl_finite(...)
+declare void @__sinhl_finite(...)
+
; CHECK: declare void @__sinpi(...)
declare void @__sinpi(...)
diff --git a/test/Transforms/Inline/inline-cold.ll b/test/Transforms/Inline/inline-cold.ll
index 93d2569d87ad..e0e679ad4036 100644
--- a/test/Transforms/Inline/inline-cold.ll
+++ b/test/Transforms/Inline/inline-cold.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -inline -S -inlinecold-threshold=75 | FileCheck %s
+; RUN: opt < %s -inline -S -inlinecold-threshold=25 | FileCheck %s
; Test that functions with attribute Cold are not inlined while the
; same function without attribute Cold will be inlined.
@@ -64,23 +64,7 @@ entry:
%x3 = add i32 %x2, %a3
%a4 = load volatile i32, i32* @a
%x4 = add i32 %x3, %a4
- %a5 = load volatile i32, i32* @a
- %x5 = add i32 %x4, %a5
- %a6 = load volatile i32, i32* @a
- %x6 = add i32 %x5, %a6
- %a7 = load volatile i32, i32* @a
- %x7 = add i32 %x6, %a6
- %a8 = load volatile i32, i32* @a
- %x8 = add i32 %x7, %a8
- %a9 = load volatile i32, i32* @a
- %x9 = add i32 %x8, %a9
- %a10 = load volatile i32, i32* @a
- %x10 = add i32 %x9, %a10
- %a11 = load volatile i32, i32* @a
- %x11 = add i32 %x10, %a11
- %a12 = load volatile i32, i32* @a
- %x12 = add i32 %x11, %a12
- %add = add i32 %x12, %a
+ %add = add i32 %x4, %a
ret i32 %add
}
diff --git a/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll b/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll
index 1f2b143c97ee..b8d41abe1c35 100644
--- a/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll
+++ b/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll
@@ -6,7 +6,7 @@ target datalayout = "e-p3:32:32-p4:64:64-n32"
@lds = internal addrspace(3) global [64 x i64] zeroinitializer
; CHECK-LABEL: @constexpr_addrspacecast_ptr_size_change(
-; CHECK: load i64, i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* addrspacecast (i64 addrspace(3)* getelementptr inbounds ([64 x i64], [64 x i64] addrspace(3)* @lds, i32 0, i32 0) to i64 addrspace(4)*), i64 undef)
+; CHECK: load i64, i64 addrspace(4)* addrspacecast (i64 addrspace(3)* getelementptr inbounds ([64 x i64], [64 x i64] addrspace(3)* @lds, i32 0, i32 0) to i64 addrspace(4)*)
; CHECK-NEXT: br
define void @constexpr_addrspacecast_ptr_size_change() #0 {
%tmp0 = call i32 @foo(i64 addrspace(4)* addrspacecast (i64 addrspace(3)* getelementptr inbounds ([64 x i64], [64 x i64] addrspace(3)* @lds, i32 0, i32 0) to i64 addrspace(4)*)) #1
diff --git a/test/Transforms/Inline/partial-inline-act.ll b/test/Transforms/Inline/partial-inline-act.ll
index 916436260bd6..27e719153875 100644
--- a/test/Transforms/Inline/partial-inline-act.ll
+++ b/test/Transforms/Inline/partial-inline-act.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -partial-inliner -disable-output
+; RUN: opt < %s -partial-inliner -skip-partial-inlining-cost-analysis -disable-output
; This testcase tests the assumption cache
define internal i32 @inlinedFunc(i1 %cond, i32* align 4 %align.val) {
diff --git a/test/Transforms/Inline/prof-update.ll b/test/Transforms/Inline/prof-update.ll
index 3fefa1c56cea..4a4471e8e17a 100644
--- a/test/Transforms/Inline/prof-update.ll
+++ b/test/Transforms/Inline/prof-update.ll
@@ -6,21 +6,21 @@ declare void @ext1();
@func = global void ()* null
; CHECK: define void @callee(i32 %n) !prof ![[ENTRY_COUNT:[0-9]*]]
-define void @callee(i32 %n) !prof !1 {
+define void @callee(i32 %n) !prof !15 {
%cond = icmp sle i32 %n, 10
br i1 %cond, label %cond_true, label %cond_false
cond_true:
; ext1 is optimized away, thus not updated.
; CHECK: call void @ext1(), !prof ![[COUNT_CALLEE1:[0-9]*]]
- call void @ext1(), !prof !2
+ call void @ext1(), !prof !16
ret void
cond_false:
; ext is cloned and updated.
; CHECK: call void @ext(), !prof ![[COUNT_CALLEE:[0-9]*]]
- call void @ext(), !prof !2
+ call void @ext(), !prof !16
%f = load void ()*, void ()** @func
; CHECK: call void %f(), !prof ![[COUNT_IND_CALLEE:[0-9]*]]
- call void %f(), !prof !4
+ call void %f(), !prof !18
ret void
}
@@ -28,16 +28,29 @@ cond_false:
define void @caller() {
; CHECK: call void @ext(), !prof ![[COUNT_CALLER:[0-9]*]]
; CHECK: call void %f.i(), !prof ![[COUNT_IND_CALLER:[0-9]*]]
- call void @callee(i32 15), !prof !3
+ call void @callee(i32 15), !prof !17
ret void
}
-!llvm.module.flags = !{!0}
-!0 = !{i32 1, !"MaxFunctionCount", i32 2000}
-!1 = !{!"function_entry_count", i64 1000}
-!2 = !{!"branch_weights", i64 2000}
-!3 = !{!"branch_weights", i64 400}
-!4 = !{!"VP", i32 0, i64 140, i64 111, i64 80, i64 222, i64 40, i64 333, i64 20}
+!llvm.module.flags = !{!1}
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"SampleProfile"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 10}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 2000}
+!8 = !{!"NumCounts", i64 2}
+!9 = !{!"NumFunctions", i64 2}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+!15 = !{!"function_entry_count", i64 1000}
+!16 = !{!"branch_weights", i64 2000}
+!17 = !{!"branch_weights", i64 400}
+!18 = !{!"VP", i32 0, i64 140, i64 111, i64 80, i64 222, i64 40, i64 333, i64 20}
attributes #0 = { alwaysinline }
; CHECK: ![[ENTRY_COUNT]] = !{!"function_entry_count", i64 600}
; CHECK: ![[COUNT_CALLEE1]] = !{!"branch_weights", i64 2000}
diff --git a/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll b/test/Transforms/InstCombine/AArch64/2012-04-23-Neon-Intrinsics.ll
index 39408a2d394c..04fb7d91193a 100644
--- a/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll
+++ b/test/Transforms/InstCombine/AArch64/2012-04-23-Neon-Intrinsics.ll
@@ -1,70 +1,6 @@
; RUN: opt -S -instcombine < %s | FileCheck %s
-
-define <4 x i32> @mulByZero(<4 x i16> %x) nounwind readnone ssp {
-entry:
- %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> zeroinitializer) nounwind
- ret <4 x i32> %a
-; CHECK: entry:
-; CHECK-NEXT: ret <4 x i32> zeroinitializer
-}
-
-define <4 x i32> @mulByOne(<4 x i16> %x) nounwind readnone ssp {
-entry:
- %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
- ret <4 x i32> %a
-; CHECK: entry:
-; CHECK-NEXT: %a = sext <4 x i16> %x to <4 x i32>
-; CHECK-NEXT: ret <4 x i32> %a
-}
-
-define <4 x i32> @constantMul() nounwind readnone ssp {
-entry:
- %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
- ret <4 x i32> %a
-; CHECK: entry:
-; CHECK-NEXT: ret <4 x i32> <i32 6, i32 6, i32 6, i32 6>
-}
-
-define <4 x i32> @constantMulS() nounwind readnone ssp {
-entry:
- %b = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
- ret <4 x i32> %b
-; CHECK: entry:
-; CHECK-NEXT: ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
-}
-
-define <4 x i32> @constantMulU() nounwind readnone ssp {
-entry:
- %b = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
- ret <4 x i32> %b
-; CHECK: entry:
-; CHECK-NEXT: ret <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
-}
-
-define <4 x i32> @complex1(<4 x i16> %x) nounwind readnone ssp {
-entry:
- %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) nounwind
- %b = add <4 x i32> zeroinitializer, %a
- ret <4 x i32> %b
-; CHECK: entry:
-; CHECK-NEXT: %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) [[NUW:#[0-9]+]]
-; CHECK-NEXT: ret <4 x i32> %a
-}
-
-define <4 x i32> @complex2(<4 x i32> %x) nounwind readnone ssp {
-entry:
- %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
- %b = add <4 x i32> %x, %a
- ret <4 x i32> %b
-; CHECK: entry:
-; CHECK-NEXT: %b = add <4 x i32> %x, <i32 6, i32 6, i32 6, i32 6>
-; CHECK-NEXT: ret <4 x i32> %b
-}
-
-declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-
-; ARM64 variants - <rdar://problem/12349617>
+; ARM64 neon intrinsic variants - <rdar://problem/12349617>
+; REQUIRES: aarch64
define <4 x i32> @mulByZeroARM64(<4 x i16> %x) nounwind readnone ssp {
entry:
diff --git a/test/Transforms/InstCombine/AArch64/lit.local.cfg b/test/Transforms/InstCombine/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..7184443994b6
--- /dev/null
+++ b/test/Transforms/InstCombine/AArch64/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/InstCombine/amdgcn-intrinsics.ll b/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
index 1901997c5521..1901997c5521 100644
--- a/test/Transforms/InstCombine/amdgcn-intrinsics.ll
+++ b/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
diff --git a/test/Transforms/InstCombine/AMDGPU/lit.local.cfg b/test/Transforms/InstCombine/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..2a665f06be72
--- /dev/null
+++ b/test/Transforms/InstCombine/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AMDGPU' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/InstCombine/ARM/2012-04-23-Neon-Intrinsics.ll b/test/Transforms/InstCombine/ARM/2012-04-23-Neon-Intrinsics.ll
new file mode 100644
index 000000000000..9efed367d19f
--- /dev/null
+++ b/test/Transforms/InstCombine/ARM/2012-04-23-Neon-Intrinsics.ll
@@ -0,0 +1,65 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+define <4 x i32> @mulByZero(<4 x i16> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> zeroinitializer) nounwind
+ ret <4 x i32> %a
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> zeroinitializer
+}
+
+define <4 x i32> @mulByOne(<4 x i16> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ ret <4 x i32> %a
+; CHECK: entry:
+; CHECK-NEXT: %a = sext <4 x i16> %x to <4 x i32>
+; CHECK-NEXT: ret <4 x i32> %a
+}
+
+define <4 x i32> @constantMul() nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
+ ret <4 x i32> %a
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> <i32 6, i32 6, i32 6, i32 6>
+}
+
+define <4 x i32> @constantMulS() nounwind readnone ssp {
+entry:
+ %b = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+}
+
+define <4 x i32> @constantMulU() nounwind readnone ssp {
+entry:
+ %b = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
+}
+
+define <4 x i32> @complex1(<4 x i16> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) nounwind
+ %b = add <4 x i32> zeroinitializer, %a
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) [[NUW:#[0-9]+]]
+; CHECK-NEXT: ret <4 x i32> %a
+}
+
+define <4 x i32> @complex2(<4 x i32> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
+ %b = add <4 x i32> %x, %a
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: %b = add <4 x i32> %x, <i32 6, i32 6, i32 6, i32 6>
+; CHECK-NEXT: ret <4 x i32> %b
+}
+
+declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
diff --git a/test/Transforms/InstCombine/constant-fold-hang.ll b/test/Transforms/InstCombine/ARM/constant-fold-hang.ll
index 2ca6b86ccc2f..2ca6b86ccc2f 100644
--- a/test/Transforms/InstCombine/constant-fold-hang.ll
+++ b/test/Transforms/InstCombine/ARM/constant-fold-hang.ll
diff --git a/test/Transforms/InstCombine/ARM/lit.local.cfg b/test/Transforms/InstCombine/ARM/lit.local.cfg
new file mode 100644
index 000000000000..236e1d344166
--- /dev/null
+++ b/test/Transforms/InstCombine/ARM/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'ARM' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/InstCombine/neon-intrinsics.ll b/test/Transforms/InstCombine/ARM/neon-intrinsics.ll
index d22fa9c811dc..d22fa9c811dc 100644
--- a/test/Transforms/InstCombine/neon-intrinsics.ll
+++ b/test/Transforms/InstCombine/ARM/neon-intrinsics.ll
diff --git a/test/Transforms/InstCombine/aligned-altivec.ll b/test/Transforms/InstCombine/PowerPC/aligned-altivec.ll
index 10b4e4d62631..10b4e4d62631 100644
--- a/test/Transforms/InstCombine/aligned-altivec.ll
+++ b/test/Transforms/InstCombine/PowerPC/aligned-altivec.ll
diff --git a/test/Transforms/InstCombine/aligned-qpx.ll b/test/Transforms/InstCombine/PowerPC/aligned-qpx.ll
index e9710df5670c..e9710df5670c 100644
--- a/test/Transforms/InstCombine/aligned-qpx.ll
+++ b/test/Transforms/InstCombine/PowerPC/aligned-qpx.ll
diff --git a/test/Transforms/InstCombine/PowerPC/lit.local.cfg b/test/Transforms/InstCombine/PowerPC/lit.local.cfg
new file mode 100644
index 000000000000..5d33887ff0a4
--- /dev/null
+++ b/test/Transforms/InstCombine/PowerPC/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'PowerPC' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/InstCombine/vsx-unaligned.ll b/test/Transforms/InstCombine/PowerPC/vsx-unaligned.ll
index ad264fb15b31..ad264fb15b31 100644
--- a/test/Transforms/InstCombine/vsx-unaligned.ll
+++ b/test/Transforms/InstCombine/PowerPC/vsx-unaligned.ll
diff --git a/test/Transforms/InstCombine/X86FsubCmpCombine.ll b/test/Transforms/InstCombine/X86/X86FsubCmpCombine.ll
index fde0692d00a2..fde0692d00a2 100644
--- a/test/Transforms/InstCombine/X86FsubCmpCombine.ll
+++ b/test/Transforms/InstCombine/X86/X86FsubCmpCombine.ll
diff --git a/test/Transforms/InstCombine/blend_x86.ll b/test/Transforms/InstCombine/X86/blend_x86.ll
index 39ceb0186efe..39ceb0186efe 100644
--- a/test/Transforms/InstCombine/blend_x86.ll
+++ b/test/Transforms/InstCombine/X86/blend_x86.ll
diff --git a/test/Transforms/InstCombine/X86/lit.local.cfg b/test/Transforms/InstCombine/X86/lit.local.cfg
new file mode 100644
index 000000000000..c8625f4d9d24
--- /dev/null
+++ b/test/Transforms/InstCombine/X86/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'X86' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/InstCombine/pr2645-1.ll b/test/Transforms/InstCombine/X86/pr2645-1.ll
index 2986d21866bf..2986d21866bf 100644
--- a/test/Transforms/InstCombine/pr2645-1.ll
+++ b/test/Transforms/InstCombine/X86/pr2645-1.ll
diff --git a/test/Transforms/InstCombine/shufflemask-undef.ll b/test/Transforms/InstCombine/X86/shufflemask-undef.ll
index 10509a92941b..d95c42da5f7e 100644
--- a/test/Transforms/InstCombine/shufflemask-undef.ll
+++ b/test/Transforms/InstCombine/X86/shufflemask-undef.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -instcombine -S | not grep "shufflevector.*i32 8"
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; CHECK-NOT: shufflevector{{.*}}i32 8"
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9"
diff --git a/test/Transforms/InstCombine/x86-avx2.ll b/test/Transforms/InstCombine/X86/x86-avx2.ll
index f4045f788e2d..f4045f788e2d 100644
--- a/test/Transforms/InstCombine/x86-avx2.ll
+++ b/test/Transforms/InstCombine/X86/x86-avx2.ll
diff --git a/test/Transforms/InstCombine/x86-avx512.ll b/test/Transforms/InstCombine/X86/x86-avx512.ll
index 2a24d93ce76a..2a24d93ce76a 100644
--- a/test/Transforms/InstCombine/x86-avx512.ll
+++ b/test/Transforms/InstCombine/X86/x86-avx512.ll
diff --git a/test/Transforms/InstCombine/x86-crc32-demanded.ll b/test/Transforms/InstCombine/X86/x86-crc32-demanded.ll
index 878b97d1bb22..878b97d1bb22 100644
--- a/test/Transforms/InstCombine/x86-crc32-demanded.ll
+++ b/test/Transforms/InstCombine/X86/x86-crc32-demanded.ll
diff --git a/test/Transforms/InstCombine/x86-f16c.ll b/test/Transforms/InstCombine/X86/x86-f16c.ll
index 6b5b6cb26eda..6b5b6cb26eda 100644
--- a/test/Transforms/InstCombine/x86-f16c.ll
+++ b/test/Transforms/InstCombine/X86/x86-f16c.ll
diff --git a/test/Transforms/InstCombine/x86-fma.ll b/test/Transforms/InstCombine/X86/x86-fma.ll
index 0d27d3276163..0d27d3276163 100644
--- a/test/Transforms/InstCombine/x86-fma.ll
+++ b/test/Transforms/InstCombine/X86/x86-fma.ll
diff --git a/test/Transforms/InstCombine/x86-insertps.ll b/test/Transforms/InstCombine/X86/x86-insertps.ll
index f55ea6f22d2e..f55ea6f22d2e 100644
--- a/test/Transforms/InstCombine/x86-insertps.ll
+++ b/test/Transforms/InstCombine/X86/x86-insertps.ll
diff --git a/test/Transforms/InstCombine/x86-masked-memops.ll b/test/Transforms/InstCombine/X86/x86-masked-memops.ll
index 8502b1899ecb..8502b1899ecb 100644
--- a/test/Transforms/InstCombine/x86-masked-memops.ll
+++ b/test/Transforms/InstCombine/X86/x86-masked-memops.ll
diff --git a/test/Transforms/InstCombine/x86-movmsk.ll b/test/Transforms/InstCombine/X86/x86-movmsk.ll
index 11acc1dbca84..11acc1dbca84 100644
--- a/test/Transforms/InstCombine/x86-movmsk.ll
+++ b/test/Transforms/InstCombine/X86/x86-movmsk.ll
diff --git a/test/Transforms/InstCombine/x86-muldq.ll b/test/Transforms/InstCombine/X86/x86-muldq.ll
index bcbb8919c403..bcbb8919c403 100644
--- a/test/Transforms/InstCombine/x86-muldq.ll
+++ b/test/Transforms/InstCombine/X86/x86-muldq.ll
diff --git a/test/Transforms/InstCombine/x86-pack.ll b/test/Transforms/InstCombine/X86/x86-pack.ll
index f3c41a8aa476..f3c41a8aa476 100644
--- a/test/Transforms/InstCombine/x86-pack.ll
+++ b/test/Transforms/InstCombine/X86/x86-pack.ll
diff --git a/test/Transforms/InstCombine/x86-pshufb.ll b/test/Transforms/InstCombine/X86/x86-pshufb.ll
index f181ef57fe20..f181ef57fe20 100644
--- a/test/Transforms/InstCombine/x86-pshufb.ll
+++ b/test/Transforms/InstCombine/X86/x86-pshufb.ll
diff --git a/test/Transforms/InstCombine/x86-sse.ll b/test/Transforms/InstCombine/X86/x86-sse.ll
index 6ed62a4e0224..6ed62a4e0224 100644
--- a/test/Transforms/InstCombine/x86-sse.ll
+++ b/test/Transforms/InstCombine/X86/x86-sse.ll
diff --git a/test/Transforms/InstCombine/x86-sse2.ll b/test/Transforms/InstCombine/X86/x86-sse2.ll
index fe8828bfb5b2..fe8828bfb5b2 100644
--- a/test/Transforms/InstCombine/x86-sse2.ll
+++ b/test/Transforms/InstCombine/X86/x86-sse2.ll
diff --git a/test/Transforms/InstCombine/x86-sse41.ll b/test/Transforms/InstCombine/X86/x86-sse41.ll
index 16975471b9e1..16975471b9e1 100644
--- a/test/Transforms/InstCombine/x86-sse41.ll
+++ b/test/Transforms/InstCombine/X86/x86-sse41.ll
diff --git a/test/Transforms/InstCombine/x86-sse4a.ll b/test/Transforms/InstCombine/X86/x86-sse4a.ll
index e36a73532259..e36a73532259 100644
--- a/test/Transforms/InstCombine/x86-sse4a.ll
+++ b/test/Transforms/InstCombine/X86/x86-sse4a.ll
diff --git a/test/Transforms/InstCombine/X86/x86-vec_demanded_elts.ll b/test/Transforms/InstCombine/X86/x86-vec_demanded_elts.ll
new file mode 100644
index 000000000000..5ad8e767d767
--- /dev/null
+++ b/test/Transforms/InstCombine/X86/x86-vec_demanded_elts.ll
@@ -0,0 +1,110 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define i16 @test1(float %f) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: [[TMP281:%.*]] = fadd float %f, -1.000000e+00
+; CHECK-NEXT: [[TMP373:%.*]] = fmul float [[TMP281]], 5.000000e-01
+; CHECK-NEXT: [[TMP374:%.*]] = insertelement <4 x float> undef, float [[TMP373]], i32 0
+; CHECK-NEXT: [[TMP48:%.*]] = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> [[TMP374]], <4 x float> <float 6.553500e+04, float undef, float undef, float undef>)
+; CHECK-NEXT: [[TMP59:%.*]] = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> [[TMP48]], <4 x float> <float 0.000000e+00, float undef, float undef, float undef>)
+; CHECK-NEXT: [[TMP_UPGRD_1:%.*]] = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[TMP59]])
+; CHECK-NEXT: [[TMP69:%.*]] = trunc i32 [[TMP_UPGRD_1]] to i16
+; CHECK-NEXT: ret i16 [[TMP69]]
+;
+ %tmp = insertelement <4 x float> undef, float %f, i32 0
+ %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1
+ %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2
+ %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3
+ %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
+ %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
+ %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
+ %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer )
+ %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 )
+ %tmp69 = trunc i32 %tmp.upgrd.1 to i16
+ ret i16 %tmp69
+}
+
+define i64 @test3(float %f, double %d) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[V00:%.*]] = insertelement <4 x float> undef, float %f, i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> [[V00]])
+; CHECK-NEXT: [[V10:%.*]] = insertelement <4 x float> undef, float %f, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> [[V10]])
+; CHECK-NEXT: [[V20:%.*]] = insertelement <4 x float> undef, float %f, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[V20]])
+; CHECK-NEXT: [[V30:%.*]] = insertelement <4 x float> undef, float %f, i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> [[V30]])
+; CHECK-NEXT: [[V40:%.*]] = insertelement <2 x double> undef, double %d, i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> [[V40]])
+; CHECK-NEXT: [[V50:%.*]] = insertelement <2 x double> undef, double %d, i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> [[V50]])
+; CHECK-NEXT: [[V60:%.*]] = insertelement <2 x double> undef, double %d, i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> [[V60]])
+; CHECK-NEXT: [[V70:%.*]] = insertelement <2 x double> undef, double %d, i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> [[V70]])
+; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP0]], [[TMP2]]
+; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP4]], [[TMP6]]
+; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]]
+; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]]
+; CHECK-NEXT: ret i64 [[TMP15]]
+;
+ %v00 = insertelement <4 x float> undef, float %f, i32 0
+ %v01 = insertelement <4 x float> %v00, float 0.000000e+00, i32 1
+ %v02 = insertelement <4 x float> %v01, float 0.000000e+00, i32 2
+ %v03 = insertelement <4 x float> %v02, float 0.000000e+00, i32 3
+ %tmp0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %v03)
+ %v10 = insertelement <4 x float> undef, float %f, i32 0
+ %v11 = insertelement <4 x float> %v10, float 0.000000e+00, i32 1
+ %v12 = insertelement <4 x float> %v11, float 0.000000e+00, i32 2
+ %v13 = insertelement <4 x float> %v12, float 0.000000e+00, i32 3
+ %tmp1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %v13)
+ %v20 = insertelement <4 x float> undef, float %f, i32 0
+ %v21 = insertelement <4 x float> %v20, float 0.000000e+00, i32 1
+ %v22 = insertelement <4 x float> %v21, float 0.000000e+00, i32 2
+ %v23 = insertelement <4 x float> %v22, float 0.000000e+00, i32 3
+ %tmp2 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %v23)
+ %v30 = insertelement <4 x float> undef, float %f, i32 0
+ %v31 = insertelement <4 x float> %v30, float 0.000000e+00, i32 1
+ %v32 = insertelement <4 x float> %v31, float 0.000000e+00, i32 2
+ %v33 = insertelement <4 x float> %v32, float 0.000000e+00, i32 3
+ %tmp3 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %v33)
+ %v40 = insertelement <2 x double> undef, double %d, i32 0
+ %v41 = insertelement <2 x double> %v40, double 0.000000e+00, i32 1
+ %tmp4 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %v41)
+ %v50 = insertelement <2 x double> undef, double %d, i32 0
+ %v51 = insertelement <2 x double> %v50, double 0.000000e+00, i32 1
+ %tmp5 = tail call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %v51)
+ %v60 = insertelement <2 x double> undef, double %d, i32 0
+ %v61 = insertelement <2 x double> %v60, double 0.000000e+00, i32 1
+ %tmp6 = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %v61)
+ %v70 = insertelement <2 x double> undef, double %d, i32 0
+ %v71 = insertelement <2 x double> %v70, double 0.000000e+00, i32 1
+ %tmp7 = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %v71)
+ %tmp8 = add i32 %tmp0, %tmp2
+ %tmp9 = add i32 %tmp4, %tmp6
+ %tmp10 = add i32 %tmp8, %tmp9
+ %tmp11 = sext i32 %tmp10 to i64
+ %tmp12 = add i64 %tmp1, %tmp3
+ %tmp13 = add i64 %tmp5, %tmp7
+ %tmp14 = add i64 %tmp12, %tmp13
+ %tmp15 = add i64 %tmp11, %tmp14
+ ret i64 %tmp15
+}
+
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+declare i32 @llvm.x86.sse.cvtss2si(<4 x float>)
+declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>)
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>)
+declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>)
+declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>)
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>)
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>)
diff --git a/test/Transforms/InstCombine/x86-vector-shifts.ll b/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
index 07934fbdfe72..07934fbdfe72 100644
--- a/test/Transforms/InstCombine/x86-vector-shifts.ll
+++ b/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
diff --git a/test/Transforms/InstCombine/x86-vperm2.ll b/test/Transforms/InstCombine/X86/x86-vperm2.ll
index 84f69aa25d24..84f69aa25d24 100644
--- a/test/Transforms/InstCombine/x86-vperm2.ll
+++ b/test/Transforms/InstCombine/X86/x86-vperm2.ll
diff --git a/test/Transforms/InstCombine/x86-vpermil.ll b/test/Transforms/InstCombine/X86/x86-vpermil.ll
index f68eb36c4b58..f68eb36c4b58 100644
--- a/test/Transforms/InstCombine/x86-vpermil.ll
+++ b/test/Transforms/InstCombine/X86/x86-vpermil.ll
diff --git a/test/Transforms/InstCombine/x86-xop.ll b/test/Transforms/InstCombine/X86/x86-xop.ll
index 03a3f921abb2..03a3f921abb2 100644
--- a/test/Transforms/InstCombine/x86-xop.ll
+++ b/test/Transforms/InstCombine/X86/x86-xop.ll
diff --git a/test/Transforms/InstCombine/add.ll b/test/Transforms/InstCombine/add.ll
index 648305d134cd..5f7101e8feca 100644
--- a/test/Transforms/InstCombine/add.ll
+++ b/test/Transforms/InstCombine/add.ll
@@ -27,6 +27,32 @@ define <2 x i32> @select_0_or_1_from_bool_vec(<2 x i1> %x) {
ret <2 x i32> %add
}
+; This is an 'andn' of the low bit.
+
+define i32 @flip_and_mask(i32 %x) {
+; CHECK-LABEL: @flip_and_mask(
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 1
+; CHECK-NEXT: [[INC:%.*]] = xor i32 [[TMP1]], 1
+; CHECK-NEXT: ret i32 [[INC]]
+;
+ %shl = shl i32 %x, 31
+ %shr = ashr i32 %shl, 31
+ %inc = add i32 %shr, 1
+ ret i32 %inc
+}
+
+define <2 x i8> @flip_and_mask_splat(<2 x i8> %x) {
+; CHECK-LABEL: @flip_and_mask_splat(
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> %x, <i8 1, i8 1>
+; CHECK-NEXT: [[INC:%.*]] = and <2 x i8> [[TMP1]], <i8 1, i8 1>
+; CHECK-NEXT: ret <2 x i8> [[INC]]
+;
+ %shl = shl <2 x i8> %x, <i8 7, i8 7>
+ %shr = ashr <2 x i8> %shl, <i8 7, i8 7>
+ %inc = add <2 x i8> %shr, <i8 1, i8 1>
+ ret <2 x i8> %inc
+}
+
define i32 @test1(i32 %A) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: ret i32 %A
diff --git a/test/Transforms/InstCombine/and.ll b/test/Transforms/InstCombine/and.ll
index 8ef7870891f0..7bb9b95b3179 100644
--- a/test/Transforms/InstCombine/and.ll
+++ b/test/Transforms/InstCombine/and.ll
@@ -310,7 +310,7 @@ define i8 @test27(i8 %A) {
ret i8 %E
}
-;; This is juse a zero extending shr.
+;; This is just a zero-extending shr.
define i32 @test28(i32 %X) {
; CHECK-LABEL: @test28(
; CHECK-NEXT: [[Y1:%.*]] = lshr i32 %X, 24
diff --git a/test/Transforms/InstCombine/bit-tracking.ll b/test/Transforms/InstCombine/bit-tracking.ll
deleted file mode 100644
index 51bbc0888836..000000000000
--- a/test/Transforms/InstCombine/bit-tracking.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; This file contains various testcases that require tracking whether bits are
-; set or cleared by various instructions.
-; RUN: opt < %s -instcombine -instcombine -S |\
-; RUN: not grep %ELIM
-
-; Reduce down to a single XOR
-define i32 @test3(i32 %B) {
- %ELIMinc = and i32 %B, 1 ; <i32> [#uses=1]
- %tmp.5 = xor i32 %ELIMinc, 1 ; <i32> [#uses=1]
- %ELIM7 = and i32 %B, -2 ; <i32> [#uses=1]
- %tmp.8 = or i32 %tmp.5, %ELIM7 ; <i32> [#uses=1]
- ret i32 %tmp.8
-}
-
-; Finally, a bigger case where we chain things together. This corresponds to
-; incrementing a single-bit bitfield, which should become just an xor.
-define i32 @test4(i32 %B) {
- %ELIM3 = shl i32 %B, 31 ; <i32> [#uses=1]
- %ELIM4 = ashr i32 %ELIM3, 31 ; <i32> [#uses=1]
- %inc = add i32 %ELIM4, 1 ; <i32> [#uses=1]
- %ELIM5 = and i32 %inc, 1 ; <i32> [#uses=1]
- %ELIM7 = and i32 %B, -2 ; <i32> [#uses=1]
- %tmp.8 = or i32 %ELIM5, %ELIM7 ; <i32> [#uses=1]
- ret i32 %tmp.8
-}
-
diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll
index 4621d33d4388..a4375a5cd57e 100644
--- a/test/Transforms/InstCombine/cast.ll
+++ b/test/Transforms/InstCombine/cast.ll
@@ -1432,3 +1432,41 @@ define <2 x i32> @test90() {
%tmp6 = bitcast <4 x half> <half undef, half undef, half undef, half 0xH3C00> to <2 x i32>
ret <2 x i32> %tmp6
}
+
+; Do not optimize to ashr i64 (shift by 48 > 96 - 64)
+define i64 @test91(i64 %A) {
+; CHECK-LABEL: @test91(
+; CHECK-NEXT: [[B:%.*]] = sext i64 %A to i96
+; CHECK-NEXT: [[C:%.*]] = lshr i96 [[B]], 48
+; CHECK-NEXT: [[D:%.*]] = trunc i96 [[C]] to i64
+; CHECK-NEXT: ret i64 [[D]]
+;
+ %B = sext i64 %A to i96
+ %C = lshr i96 %B, 48
+ %D = trunc i96 %C to i64
+ ret i64 %D
+}
+
+; Do optimize to ashr i64 (shift by 32 <= 96 - 64)
+define i64 @test92(i64 %A) {
+; CHECK-LABEL: @test92(
+; CHECK-NEXT: [[C:%.*]] = ashr i64 %A, 32
+; CHECK-NEXT: ret i64 [[C]]
+;
+ %B = sext i64 %A to i96
+ %C = lshr i96 %B, 32
+ %D = trunc i96 %C to i64
+ ret i64 %D
+}
+
+; When optimizing to ashr i32, don't shift by more than 31.
+define i32 @test93(i32 %A) {
+; CHECK-LABEL: @test93(
+; CHECK-NEXT: [[C:%.*]] = ashr i32 %A, 31
+; CHECK-NEXT: ret i32 [[C]]
+;
+ %B = sext i32 %A to i96
+ %C = lshr i96 %B, 64
+ %D = trunc i96 %C to i32
+ ret i32 %D
+}
diff --git a/test/Transforms/InstCombine/constant-fold-iteration.ll b/test/Transforms/InstCombine/constant-fold-iteration.ll
new file mode 100644
index 000000000000..e1b692173ce8
--- /dev/null
+++ b/test/Transforms/InstCombine/constant-fold-iteration.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S -debug 2>&1 | FileCheck %s
+; REQUIRES: asserts
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+
+define i32 @a() nounwind readnone {
+entry:
+ ret i32 zext (i1 icmp eq (i32 0, i32 ptrtoint (i32 ()* @a to i32)) to i32)
+}
+; CHECK: INSTCOMBINE ITERATION #1
+; CHECK-NOT: INSTCOMBINE ITERATION #2
diff --git a/test/Transforms/InstCombine/demorgan.ll b/test/Transforms/InstCombine/demorgan.ll
index 26c2270a3fdf..8c3d3b830468 100644
--- a/test/Transforms/InstCombine/demorgan.ll
+++ b/test/Transforms/InstCombine/demorgan.ll
@@ -399,7 +399,7 @@ define i32 @demorgan_or_zext(i1 %X, i1 %Y) {
; CHECK-LABEL: @demorgan_or_zext(
; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i1 %X, %Y
; CHECK-NEXT: [[OR1:%.*]] = xor i1 [[OR1_DEMORGAN]], true
-; CHECK-NEXT: [[OR:%.*]] = zext i1 [[OR:%.*]]1 to i32
+; CHECK-NEXT: [[OR:%.*]] = zext i1 [[OR1]] to i32
; CHECK-NEXT: ret i32 [[OR]]
;
%zextX = zext i1 %X to i32
@@ -414,7 +414,7 @@ define i32 @demorgan_and_zext(i1 %X, i1 %Y) {
; CHECK-LABEL: @demorgan_and_zext(
; CHECK-NEXT: [[AND1_DEMORGAN:%.*]] = or i1 %X, %Y
; CHECK-NEXT: [[AND1:%.*]] = xor i1 [[AND1_DEMORGAN]], true
-; CHECK-NEXT: [[AND:%.*]] = zext i1 [[AND:%.*]]1 to i32
+; CHECK-NEXT: [[AND:%.*]] = zext i1 [[AND1]] to i32
; CHECK-NEXT: ret i32 [[AND]]
;
%zextX = zext i1 %X to i32
@@ -429,7 +429,7 @@ define <2 x i32> @demorgan_or_zext_vec(<2 x i1> %X, <2 x i1> %Y) {
; CHECK-LABEL: @demorgan_or_zext_vec(
; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and <2 x i1> %X, %Y
; CHECK-NEXT: [[OR1:%.*]] = xor <2 x i1> [[OR1_DEMORGAN]], <i1 true, i1 true>
-; CHECK-NEXT: [[OR:%.*]] = zext <2 x i1> [[OR:%.*]]1 to <2 x i32>
+; CHECK-NEXT: [[OR:%.*]] = zext <2 x i1> [[OR1]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[OR]]
;
%zextX = zext <2 x i1> %X to <2 x i32>
@@ -444,7 +444,7 @@ define <2 x i32> @demorgan_and_zext_vec(<2 x i1> %X, <2 x i1> %Y) {
; CHECK-LABEL: @demorgan_and_zext_vec(
; CHECK-NEXT: [[AND1_DEMORGAN:%.*]] = or <2 x i1> %X, %Y
; CHECK-NEXT: [[AND1:%.*]] = xor <2 x i1> [[AND1_DEMORGAN]], <i1 true, i1 true>
-; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[AND:%.*]]1 to <2 x i32>
+; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[AND1]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[AND]]
;
%zextX = zext <2 x i1> %X to <2 x i32>
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
index edfa9a102917..6f657b190454 100644
--- a/test/Transforms/InstCombine/icmp.ll
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -695,6 +695,21 @@ define i1 @test48(i32 %X, i32 %Y, i32 %Z) {
ret i1 %C
}
+; The above transform only works for equality predicates.
+
+define i1 @PR32949(i32 %X, i32 %Y, i32 %Z) {
+; CHECK-LABEL: @PR32949(
+; CHECK-NEXT: [[A:%.*]] = sdiv exact i32 %X, %Z
+; CHECK-NEXT: [[B:%.*]] = sdiv exact i32 %Y, %Z
+; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %A = sdiv exact i32 %X, %Z
+ %B = sdiv exact i32 %Y, %Z
+ %C = icmp sgt i32 %A, %B
+ ret i1 %C
+}
+
; PR8469
define <2 x i1> @test49(<2 x i32> %tmp3) {
; CHECK-LABEL: @test49(
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index 66ab7f48aeff..5654b265da58 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S < %s | FileCheck %s
%overflow.result = type {i8, i1}
@@ -283,14 +284,24 @@ define i32 @cttz(i32 %a) {
define i1 @cttz_knownbits(i32 %arg) {
; CHECK-LABEL: @cttz_knownbits(
+; CHECK-NEXT: ret i1 false
+;
+ %or = or i32 %arg, 4
+ %cnt = call i32 @llvm.cttz.i32(i32 %or, i1 true) nounwind readnone
+ %res = icmp eq i32 %cnt, 4
+ ret i1 %res
+}
+
+define i1 @cttz_knownbits2(i32 %arg) {
+; CHECK-LABEL: @cttz_knownbits2(
; CHECK-NEXT: [[OR:%.*]] = or i32 [[ARG:%.*]], 4
; CHECK-NEXT: [[CNT:%.*]] = call i32 @llvm.cttz.i32(i32 [[OR]], i1 true)
-; CHECK-NEXT: [[RES:%.*]] = icmp eq i32 [[CNT]], 4
+; CHECK-NEXT: [[RES:%.*]] = icmp eq i32 [[CNT]], 2
; CHECK-NEXT: ret i1 [[RES]]
;
%or = or i32 %arg, 4
%cnt = call i32 @llvm.cttz.i32(i32 %or, i1 true) nounwind readnone
- %res = icmp eq i32 %cnt, 4
+ %res = icmp eq i32 %cnt, 2
ret i1 %res
}
@@ -306,14 +317,24 @@ define i8 @ctlz(i8 %a) {
define i1 @ctlz_knownbits(i8 %arg) {
; CHECK-LABEL: @ctlz_knownbits(
+; CHECK-NEXT: ret i1 false
+;
+ %or = or i8 %arg, 32
+ %cnt = call i8 @llvm.ctlz.i8(i8 %or, i1 true) nounwind readnone
+ %res = icmp eq i8 %cnt, 4
+ ret i1 %res
+}
+
+define i1 @ctlz_knownbits2(i8 %arg) {
+; CHECK-LABEL: @ctlz_knownbits2(
; CHECK-NEXT: [[OR:%.*]] = or i8 [[ARG:%.*]], 32
; CHECK-NEXT: [[CNT:%.*]] = call i8 @llvm.ctlz.i8(i8 [[OR]], i1 true)
-; CHECK-NEXT: [[RES:%.*]] = icmp eq i8 [[CNT]], 4
+; CHECK-NEXT: [[RES:%.*]] = icmp eq i8 [[CNT]], 2
; CHECK-NEXT: ret i1 [[RES]]
;
%or = or i8 %arg, 32
%cnt = call i8 @llvm.ctlz.i8(i8 %or, i1 true) nounwind readnone
- %res = icmp eq i8 %cnt, 4
+ %res = icmp eq i8 %cnt, 2
ret i1 %res
}
diff --git a/test/Transforms/InstCombine/logical-select.ll b/test/Transforms/InstCombine/logical-select.ll
index 3ab40c4de92d..7f0bd23eb8a5 100644
--- a/test/Transforms/InstCombine/logical-select.ll
+++ b/test/Transforms/InstCombine/logical-select.ll
@@ -62,6 +62,81 @@ define i32 @poo(i32 %a, i32 %b, i32 %c, i32 %d) {
ret i32 %t3
}
+; TODO: For the next 4 tests, are there potential canonicalizations and/or folds for these
+; in InstCombine? Independent of that, tests like this that may not show any transforms
+; still have value because they can help identify conflicting canonicalization rules that
+; lead to infinite looping.
+
+; PR32791 - https://bugs.llvm.org//show_bug.cgi?id=32791
+; Fold two selects with inverted predicates and zero operands.
+define i32 @fold_inverted_icmp_preds(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: @fold_inverted_icmp_preds(
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 %a, %b
+; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CMP1]], i32 %c, i32 0
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 %a, %b
+; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CMP2]], i32 %d, i32 0
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SEL1]], [[SEL2]]
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %cmp1 = icmp slt i32 %a, %b
+ %sel1 = select i1 %cmp1, i32 %c, i32 0
+ %cmp2 = icmp sge i32 %a, %b
+ %sel2 = select i1 %cmp2, i32 %d, i32 0
+ %or = or i32 %sel1, %sel2
+ ret i32 %or
+}
+
+define i32 @fold_inverted_icmp_preds_reverse(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: @fold_inverted_icmp_preds_reverse(
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 %a, %b
+; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CMP1]], i32 0, i32 %c
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 %a, %b
+; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CMP2]], i32 0, i32 %d
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SEL1]], [[SEL2]]
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %cmp1 = icmp slt i32 %a, %b
+ %sel1 = select i1 %cmp1, i32 0, i32 %c
+ %cmp2 = icmp sge i32 %a, %b
+ %sel2 = select i1 %cmp2, i32 0, i32 %d
+ %or = or i32 %sel1, %sel2
+ ret i32 %or
+}
+
+define i32 @fold_inverted_fcmp_preds(float %a, float %b, i32 %c, i32 %d) {
+; CHECK-LABEL: @fold_inverted_fcmp_preds(
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float %a, %b
+; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CMP1]], i32 %c, i32 0
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp uge float %a, %b
+; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CMP2]], i32 %d, i32 0
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SEL1]], [[SEL2]]
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %cmp1 = fcmp olt float %a, %b
+ %sel1 = select i1 %cmp1, i32 %c, i32 0
+ %cmp2 = fcmp uge float %a, %b
+ %sel2 = select i1 %cmp2, i32 %d, i32 0
+ %or = or i32 %sel1, %sel2
+ ret i32 %or
+}
+
+define <2 x i32> @fold_inverted_icmp_vector_preds(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
+; CHECK-LABEL: @fold_inverted_icmp_vector_preds(
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne <2 x i32> %a, %b
+; CHECK-NEXT: [[SEL1:%.*]] = select <2 x i1> [[CMP1]], <2 x i32> %c, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq <2 x i32> %a, %b
+; CHECK-NEXT: [[SEL2:%.*]] = select <2 x i1> [[CMP2]], <2 x i32> %d, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[SEL1]], [[SEL2]]
+; CHECK-NEXT: ret <2 x i32> [[OR]]
+;
+ %cmp1 = icmp ne <2 x i32> %a, %b
+ %sel1 = select <2 x i1> %cmp1, <2 x i32> %c, <2 x i32> <i32 0, i32 0>
+ %cmp2 = icmp eq <2 x i32> %a, %b
+ %sel2 = select <2 x i1> %cmp2, <2 x i32> %d, <2 x i32> <i32 0, i32 0>
+ %or = or <2 x i32> %sel1, %sel2
+ ret <2 x i32> %or
+}
+
define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @par(
; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 %a, %b
diff --git a/test/Transforms/InstCombine/not.ll b/test/Transforms/InstCombine/not.ll
index 2760d4ae044d..6ff0a50318d2 100644
--- a/test/Transforms/InstCombine/not.ll
+++ b/test/Transforms/InstCombine/not.ll
@@ -11,8 +11,8 @@ define i32 @test1(i32 %A) {
define i1 @invert_icmp(i32 %A, i32 %B) {
; CHECK-LABEL: @invert_icmp(
-; CHECK-NEXT: [[NOT:%.*]] = icmp sgt i32 %A, %B
-; CHECK-NEXT: ret i1 [[NOT]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 %A, %B
+; CHECK-NEXT: ret i1 [[CMP]]
;
%cmp = icmp sle i32 %A, %B
%not = xor i1 %cmp, true
@@ -23,8 +23,8 @@ define i1 @invert_icmp(i32 %A, i32 %B) {
define i1 @invert_fcmp(float %X, float %Y) {
; CHECK-LABEL: @invert_fcmp(
-; CHECK-NEXT: [[NOT:%.*]] = fcmp uge float %X, %Y
-; CHECK-NEXT: ret i1 [[NOT]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp uge float %X, %Y
+; CHECK-NEXT: ret i1 [[CMP]]
;
%cmp = fcmp olt float %X, %Y
%not = xor i1 %cmp, true
@@ -48,11 +48,75 @@ define zeroext i8 @test6(i32 %a, i32 %b) {
define <2 x i1> @test7(<2 x i32> %A, <2 x i32> %B) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[RET:%.*]] = icmp sgt <2 x i32> %A, %B
-; CHECK-NEXT: ret <2 x i1> [[RET]]
+; CHECK-NEXT: [[COND:%.*]] = icmp sgt <2 x i32> %A, %B
+; CHECK-NEXT: ret <2 x i1> [[COND]]
;
%cond = icmp sle <2 x i32> %A, %B
%Ret = xor <2 x i1> %cond, <i1 true, i1 true>
ret <2 x i1> %Ret
}
+define i32 @not_ashr_not(i32 %A, i32 %B) {
+; CHECK-LABEL: @not_ashr_not(
+; CHECK-NEXT: [[NOT2:%.*]] = ashr i32 %A, %B
+; CHECK-NEXT: ret i32 [[NOT2]]
+;
+ %not1 = xor i32 %A, -1
+ %ashr = ashr i32 %not1, %B
+ %not2 = xor i32 %ashr, -1
+ ret i32 %not2
+}
+
+define i8 @not_ashr_const(i8 %x) {
+; CHECK-LABEL: @not_ashr_const(
+; CHECK-NEXT: [[NOT:%.*]] = lshr i8 41, %x
+; CHECK-NEXT: ret i8 [[NOT]]
+;
+ %shr = ashr i8 -42, %x
+ %not = xor i8 %shr, -1
+ ret i8 %not
+}
+
+define <2 x i8> @not_ashr_const_splat(<2 x i8> %x) {
+; CHECK-LABEL: @not_ashr_const_splat(
+; CHECK-NEXT: [[NOT:%.*]] = lshr <2 x i8> <i8 41, i8 41>, %x
+; CHECK-NEXT: ret <2 x i8> [[NOT]]
+;
+ %shr = ashr <2 x i8> <i8 -42, i8 -42>, %x
+ %not = xor <2 x i8> %shr, <i8 -1, i8 -1>
+ ret <2 x i8> %not
+}
+
+; We can't get rid of the 'not' on a logical shift of a negative constant.
+
+define i8 @not_lshr_const_negative(i8 %x) {
+; CHECK-LABEL: @not_lshr_const_negative(
+; CHECK-NEXT: [[SHR:%.*]] = lshr i8 -42, %x
+; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[SHR]], -1
+; CHECK-NEXT: ret i8 [[NOT]]
+;
+ %shr = lshr i8 -42, %x
+ %not = xor i8 %shr, -1
+ ret i8 %not
+}
+
+define i8 @not_lshr_const(i8 %x) {
+; CHECK-LABEL: @not_lshr_const(
+; CHECK-NEXT: [[NOT:%.*]] = ashr i8 -43, %x
+; CHECK-NEXT: ret i8 [[NOT]]
+;
+ %shr = lshr i8 42, %x
+ %not = xor i8 %shr, -1
+ ret i8 %not
+}
+
+define <2 x i8> @not_lshr_const_splat(<2 x i8> %x) {
+; CHECK-LABEL: @not_lshr_const_splat(
+; CHECK-NEXT: [[NOT:%.*]] = ashr <2 x i8> <i8 -43, i8 -43>, %x
+; CHECK-NEXT: ret <2 x i8> [[NOT]]
+;
+ %shr = lshr <2 x i8> <i8 42, i8 42>, %x
+ %not = xor <2 x i8> %shr, <i8 -1, i8 -1>
+ ret <2 x i8> %not
+}
+
diff --git a/test/Transforms/InstCombine/or-xor.ll b/test/Transforms/InstCombine/or-xor.ll
index ec5b71656a47..f2bc290d79a4 100644
--- a/test/Transforms/InstCombine/or-xor.ll
+++ b/test/Transforms/InstCombine/or-xor.ll
@@ -230,3 +230,73 @@ define i32 @test16(i32 %a, i32 %b) {
%xor = or i32 %and1, %and2
ret i32 %xor
}
+
+define i8 @not_or(i8 %x) {
+; CHECK-LABEL: @not_or(
+; CHECK-NEXT: [[NOTX:%.*]] = or i8 %x, 7
+; CHECK-NEXT: [[OR:%.*]] = xor i8 [[NOTX]], -8
+; CHECK-NEXT: ret i8 [[OR]]
+;
+ %notx = xor i8 %x, -1
+ %or = or i8 %notx, 7
+ ret i8 %or
+}
+
+define i8 @not_or_xor(i8 %x) {
+; CHECK-LABEL: @not_or_xor(
+; CHECK-NEXT: [[NOTX:%.*]] = or i8 %x, 7
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[NOTX]], -12
+; CHECK-NEXT: ret i8 [[XOR]]
+;
+ %notx = xor i8 %x, -1
+ %or = or i8 %notx, 7
+ %xor = xor i8 %or, 12
+ ret i8 %xor
+}
+
+define i8 @xor_or(i8 %x) {
+; CHECK-LABEL: @xor_or(
+; CHECK-NEXT: [[XOR:%.*]] = or i8 %x, 7
+; CHECK-NEXT: [[OR:%.*]] = xor i8 [[XOR]], 32
+; CHECK-NEXT: ret i8 [[OR]]
+;
+ %xor = xor i8 %x, 32
+ %or = or i8 %xor, 7
+ ret i8 %or
+}
+
+define i8 @xor_or2(i8 %x) {
+; CHECK-LABEL: @xor_or2(
+; CHECK-NEXT: [[XOR:%.*]] = or i8 %x, 7
+; CHECK-NEXT: [[OR:%.*]] = xor i8 [[XOR]], 32
+; CHECK-NEXT: ret i8 [[OR]]
+;
+ %xor = xor i8 %x, 33
+ %or = or i8 %xor, 7
+ ret i8 %or
+}
+
+define i8 @xor_or_xor(i8 %x) {
+; CHECK-LABEL: @xor_or_xor(
+; CHECK-NEXT: [[XOR1:%.*]] = or i8 %x, 7
+; CHECK-NEXT: [[XOR2:%.*]] = xor i8 [[XOR1]], 44
+; CHECK-NEXT: ret i8 [[XOR2]]
+;
+ %xor1 = xor i8 %x, 33
+ %or = or i8 %xor1, 7
+ %xor2 = xor i8 %or, 12
+ ret i8 %xor2
+}
+
+define i8 @or_xor_or(i8 %x) {
+; CHECK-LABEL: @or_xor_or(
+; CHECK-NEXT: [[XOR:%.*]] = or i8 %x, 39
+; CHECK-NEXT: [[OR2:%.*]] = xor i8 [[XOR]], 8
+; CHECK-NEXT: ret i8 [[OR2]]
+;
+ %or1 = or i8 %x, 33
+ %xor = xor i8 %or1, 12
+ %or2 = or i8 %xor, 7
+ ret i8 %or2
+}
+
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
index bfafd66ebb41..764fe4503b5e 100644
--- a/test/Transforms/InstCombine/or.ll
+++ b/test/Transforms/InstCombine/or.ll
@@ -3,115 +3,6 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-define i32 @test1(i32 %A) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: ret i32 %A
-;
- %B = or i32 %A, 0
- ret i32 %B
-}
-
-define i32 @test2(i32 %A) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: ret i32 -1
-;
- %B = or i32 %A, -1
- ret i32 %B
-}
-
-define i8 @test2a(i8 %A) {
-; CHECK-LABEL: @test2a(
-; CHECK-NEXT: ret i8 -1
-;
- %B = or i8 %A, -1
- ret i8 %B
-}
-
-define i1 @test3(i1 %A) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: ret i1 %A
-;
- %B = or i1 %A, false
- ret i1 %B
-}
-
-define i1 @test4(i1 %A) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: ret i1 true
-;
- %B = or i1 %A, true
- ret i1 %B
-}
-
-define i1 @test5(i1 %A) {
-; CHECK-LABEL: @test5(
-; CHECK-NEXT: ret i1 %A
-;
- %B = or i1 %A, %A
- ret i1 %B
-}
-
-define i32 @test6(i32 %A) {
-; CHECK-LABEL: @test6(
-; CHECK-NEXT: ret i32 %A
-;
- %B = or i32 %A, %A
- ret i32 %B
-}
-
-; A | ~A == -1
-define i32 @test7(i32 %A) {
-; CHECK-LABEL: @test7(
-; CHECK-NEXT: ret i32 -1
-;
- %NotA = xor i32 -1, %A
- %B = or i32 %A, %NotA
- ret i32 %B
-}
-
-define i8 @test8(i8 %A) {
-; CHECK-LABEL: @test8(
-; CHECK-NEXT: ret i8 -1
-;
- %B = or i8 %A, -2
- %C = or i8 %B, 1
- ret i8 %C
-}
-
-; Test that (A|c1)|(B|c2) == (A|B)|(c1|c2)
-define i8 @test9(i8 %A, i8 %B) {
-; CHECK-LABEL: @test9(
-; CHECK-NEXT: ret i8 -1
-;
- %C = or i8 %A, 1
- %D = or i8 %B, -2
- %E = or i8 %C, %D
- ret i8 %E
-}
-
-define i8 @test10(i8 %A) {
-; CHECK-LABEL: @test10(
-; CHECK-NEXT: ret i8 -2
-;
- %B = or i8 %A, 1
- %C = and i8 %B, -2
- ; (X & C1) | C2 --> (X | C2) & (C1|C2)
- %D = or i8 %C, -2
- ret i8 %D
-}
-
-define i8 @test11(i8 %A) {
-; CHECK-LABEL: @test11(
-; CHECK-NEXT: ret i8 -1
-;
- %B = or i8 %A, -2
- %C = xor i8 %B, 13
- ; (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
- %D = or i8 %C, 1
- %E = xor i8 %D, 12
- ret i8 %E
-}
-
define i32 @test12(i32 %A) {
; Should be eliminated
; CHECK-LABEL: @test12(
diff --git a/test/Transforms/InstCombine/sext.ll b/test/Transforms/InstCombine/sext.ll
index 4cdd080fb0e0..46406ac2f788 100644
--- a/test/Transforms/InstCombine/sext.ll
+++ b/test/Transforms/InstCombine/sext.ll
@@ -128,7 +128,7 @@ F:
define i32 @test10(i32 %i) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[B1:%.*]] = shl i32 %i, 30
-; CHECK-NEXT: [[B:%.*]] = ashr exact i32 [[B:%.*]]1, 30
+; CHECK-NEXT: [[B:%.*]] = ashr exact i32 [[B1]], 30
; CHECK-NEXT: ret i32 [[B]]
;
%tmp12 = trunc i32 %i to i8
diff --git a/test/Transforms/InstCombine/trunc.ll b/test/Transforms/InstCombine/trunc.ll
index 5597b578f017..dd86e5a907b8 100644
--- a/test/Transforms/InstCombine/trunc.ll
+++ b/test/Transforms/InstCombine/trunc.ll
@@ -24,7 +24,7 @@ define i64 @test2(i64 %a) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[B:%.*]] = trunc i64 %a to i32
; CHECK-NEXT: [[D1:%.*]] = shl i64 %a, 36
-; CHECK-NEXT: [[D:%.*]] = ashr exact i64 [[D:%.*]]1, 36
+; CHECK-NEXT: [[D:%.*]] = ashr exact i64 [[D1]], 36
; CHECK-NEXT: call void @use(i32 [[B]])
; CHECK-NEXT: ret i64 [[D]]
;
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
index 5f27634da19c..00efbe00b08d 100644
--- a/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -2,30 +2,6 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-define i16 @test1(float %f) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[TMP281:%.*]] = fadd float %f, -1.000000e+00
-; CHECK-NEXT: [[TMP373:%.*]] = fmul float [[TMP281]], 5.000000e-01
-; CHECK-NEXT: [[TMP374:%.*]] = insertelement <4 x float> undef, float [[TMP373]], i32 0
-; CHECK-NEXT: [[TMP48:%.*]] = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> [[TMP374]], <4 x float> <float 6.553500e+04, float undef, float undef, float undef>)
-; CHECK-NEXT: [[TMP59:%.*]] = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> [[TMP48]], <4 x float> <float 0.000000e+00, float undef, float undef, float undef>)
-; CHECK-NEXT: [[TMP_UPGRD_1:%.*]] = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[TMP59]])
-; CHECK-NEXT: [[TMP69:%.*]] = trunc i32 [[TMP_UPGRD_1]] to i16
-; CHECK-NEXT: ret i16 [[TMP69]]
-;
- %tmp = insertelement <4 x float> undef, float %f, i32 0
- %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1
- %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2
- %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3
- %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
- %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
- %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )
- %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer )
- %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 )
- %tmp69 = trunc i32 %tmp.upgrd.1 to i16
- ret i16 %tmp69
-}
-
define i32 @test2(float %f) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[TMP5:%.*]] = fmul float %f, %f
@@ -42,77 +18,6 @@ define i32 @test2(float %f) {
ret i32 %tmp21
}
-define i64 @test3(float %f, double %d) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[V00:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> [[V00]])
-; CHECK-NEXT: [[V10:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> [[V10]])
-; CHECK-NEXT: [[V20:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[V20]])
-; CHECK-NEXT: [[V30:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> [[V30]])
-; CHECK-NEXT: [[V40:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> [[V40]])
-; CHECK-NEXT: [[V50:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> [[V50]])
-; CHECK-NEXT: [[V60:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> [[V60]])
-; CHECK-NEXT: [[V70:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> [[V70]])
-; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP0]], [[TMP2]]
-; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP4]], [[TMP6]]
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
-; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]]
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]]
-; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]]
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]]
-; CHECK-NEXT: ret i64 [[TMP15]]
-;
- %v00 = insertelement <4 x float> undef, float %f, i32 0
- %v01 = insertelement <4 x float> %v00, float 0.000000e+00, i32 1
- %v02 = insertelement <4 x float> %v01, float 0.000000e+00, i32 2
- %v03 = insertelement <4 x float> %v02, float 0.000000e+00, i32 3
- %tmp0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %v03)
- %v10 = insertelement <4 x float> undef, float %f, i32 0
- %v11 = insertelement <4 x float> %v10, float 0.000000e+00, i32 1
- %v12 = insertelement <4 x float> %v11, float 0.000000e+00, i32 2
- %v13 = insertelement <4 x float> %v12, float 0.000000e+00, i32 3
- %tmp1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %v13)
- %v20 = insertelement <4 x float> undef, float %f, i32 0
- %v21 = insertelement <4 x float> %v20, float 0.000000e+00, i32 1
- %v22 = insertelement <4 x float> %v21, float 0.000000e+00, i32 2
- %v23 = insertelement <4 x float> %v22, float 0.000000e+00, i32 3
- %tmp2 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %v23)
- %v30 = insertelement <4 x float> undef, float %f, i32 0
- %v31 = insertelement <4 x float> %v30, float 0.000000e+00, i32 1
- %v32 = insertelement <4 x float> %v31, float 0.000000e+00, i32 2
- %v33 = insertelement <4 x float> %v32, float 0.000000e+00, i32 3
- %tmp3 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %v33)
- %v40 = insertelement <2 x double> undef, double %d, i32 0
- %v41 = insertelement <2 x double> %v40, double 0.000000e+00, i32 1
- %tmp4 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %v41)
- %v50 = insertelement <2 x double> undef, double %d, i32 0
- %v51 = insertelement <2 x double> %v50, double 0.000000e+00, i32 1
- %tmp5 = tail call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %v51)
- %v60 = insertelement <2 x double> undef, double %d, i32 0
- %v61 = insertelement <2 x double> %v60, double 0.000000e+00, i32 1
- %tmp6 = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %v61)
- %v70 = insertelement <2 x double> undef, double %d, i32 0
- %v71 = insertelement <2 x double> %v70, double 0.000000e+00, i32 1
- %tmp7 = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %v71)
- %tmp8 = add i32 %tmp0, %tmp2
- %tmp9 = add i32 %tmp4, %tmp6
- %tmp10 = add i32 %tmp8, %tmp9
- %tmp11 = sext i32 %tmp10 to i64
- %tmp12 = add i64 %tmp1, %tmp3
- %tmp13 = add i64 %tmp5, %tmp7
- %tmp14 = add i64 %tmp12, %tmp13
- %tmp15 = add i64 %tmp11, %tmp14
- ret i64 %tmp15
-}
-
define void @get_image() nounwind {
; CHECK-LABEL: @get_image(
; CHECK-NEXT: entry:
@@ -156,18 +61,6 @@ entry:
}
declare i32 @fgetc(i8*)
-declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
-declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
-declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
-declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
-declare i32 @llvm.x86.sse.cvtss2si(<4 x float>)
-declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>)
-declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
-declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>)
-declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>)
-declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>)
-declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>)
-declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>)
define <4 x float> @dead_shuffle_elt(<4 x float> %x, <2 x float> %y) nounwind {
; CHECK-LABEL: @dead_shuffle_elt(
@@ -248,4 +141,3 @@ define <2 x i64> @PR24922(<2 x i64> %v) {
%result = select <2 x i1> <i1 icmp eq (i64 extractelement (<2 x i64> bitcast (<4 x i32> <i32 15, i32 15, i32 15, i32 15> to <2 x i64>), i64 0), i64 0), i1 true>, <2 x i64> %v, <2 x i64> zeroinitializer
ret <2 x i64> %result
}
-
diff --git a/test/Transforms/InstCombine/xor2.ll b/test/Transforms/InstCombine/xor2.ll
index f817ac5ca40c..3afbf632f6e1 100644
--- a/test/Transforms/InstCombine/xor2.ll
+++ b/test/Transforms/InstCombine/xor2.ll
@@ -57,17 +57,6 @@ define i32 @test3(i32 %tmp1) {
ret i32 %ov110
}
-define i32 @test4(i32 %A, i32 %B) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 %A, %B
-; CHECK-NEXT: ret i32 [[TMP1]]
-;
- %1 = xor i32 %A, -1
- %2 = ashr i32 %1, %B
- %3 = xor i32 %2, -1
- ret i32 %3
-}
-
; defect-2 in rdar://12329730
; (X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
; where the "X" has more than one use
diff --git a/test/Transforms/InstNamer/basic.ll b/test/Transforms/InstNamer/basic.ll
new file mode 100644
index 000000000000..4c819246b90b
--- /dev/null
+++ b/test/Transforms/InstNamer/basic.ll
@@ -0,0 +1,19 @@
+; RUN: opt -S -instnamer < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @f_0(i32) {
+; CHECK-LABEL: @f_0(
+; CHECK: bb:
+; CHECK-NEXT: %tmp = add i32 %arg, 2
+; CHECK-NEXT: br label %bb1
+; CHECK: bb1:
+; CHECK-NEXT: ret i32 %tmp
+
+ %2 = add i32 %0, 2
+ br label %3
+
+; <label>:3:
+ ret i32 %2
+}
diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll
index e059d77f1fa8..427ea655fcb2 100644
--- a/test/Transforms/InstSimplify/AndOrXor.ll
+++ b/test/Transforms/InstSimplify/AndOrXor.ll
@@ -628,3 +628,176 @@ define i32 @test46_commuted_and(i32 %a, i32 %b) {
%or = or i32 %xor, %and
ret i32 %or
}
+
+; (~A ^ B) | (A & B) -> ~A ^ B
+
+define i32 @test47(i32 %a, i32 %b) {
+; CHECK-LABEL: @test47(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[NEGA]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %a, %b
+ %xor = xor i32 %nega, %b
+ %or = or i32 %xor, %and
+ ret i32 %or
+}
+
+define i32 @test48(i32 %a, i32 %b) {
+; CHECK-LABEL: @test48(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B:%.*]], [[NEGA]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %a, %b
+ %xor = xor i32 %b, %nega
+ %or = or i32 %xor, %and
+ ret i32 %or
+}
+
+define i32 @test49(i32 %a, i32 %b) {
+; CHECK-LABEL: @test49(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B:%.*]], [[NEGA]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %b, %a
+ %xor = xor i32 %b, %nega
+ %or = or i32 %xor, %and
+ ret i32 %or
+}
+
+define i32 @test50(i32 %a, i32 %b) {
+; CHECK-LABEL: @test50(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[NEGA]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %b, %a
+ %xor = xor i32 %nega, %b
+ %or = or i32 %xor, %and
+ ret i32 %or
+}
+
+define i32 @test51(i32 %a, i32 %b) {
+; CHECK-LABEL: @test51(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[NEGA]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %a, %b
+ %xor = xor i32 %nega, %b
+ %or = or i32 %and, %xor
+ ret i32 %or
+}
+
+define i32 @test52(i32 %a, i32 %b) {
+; CHECK-LABEL: @test52(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B:%.*]], [[NEGA]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %a, %b
+ %xor = xor i32 %b, %nega
+ %or = or i32 %and, %xor
+ ret i32 %or
+}
+
+define i32 @test53(i32 %a, i32 %b) {
+; CHECK-LABEL: @test53(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B:%.*]], [[NEGA]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %b, %a
+ %xor = xor i32 %b, %nega
+ %or = or i32 %and, %xor
+ ret i32 %or
+}
+
+define i32 @test54(i32 %a, i32 %b) {
+; CHECK-LABEL: @test54(
+; CHECK-NEXT: [[NEGA:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[NEGA]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %nega = xor i32 %a, -1
+ %and = and i32 %b, %a
+ %xor = xor i32 %nega, %b
+ %or = or i32 %and, %xor
+ ret i32 %or
+}
+
+define i8 @lshr_perfect_mask(i8 %x) {
+; CHECK-LABEL: @lshr_perfect_mask(
+; CHECK-NEXT: [[SH:%.*]] = lshr i8 %x, 5
+; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SH]], 7
+; CHECK-NEXT: ret i8 [[MASK]]
+;
+ %sh = lshr i8 %x, 5
+ %mask = and i8 %sh, 7 ; 0x07
+ ret i8 %mask
+}
+
+define <2 x i8> @lshr_oversized_mask_splat(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_oversized_mask_splat(
+; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> %x, <i8 5, i8 5>
+; CHECK-NEXT: [[MASK:%.*]] = and <2 x i8> [[SH]], <i8 -121, i8 -121>
+; CHECK-NEXT: ret <2 x i8> [[MASK]]
+;
+ %sh = lshr <2 x i8> %x, <i8 5, i8 5>
+ %mask = and <2 x i8> %sh, <i8 135, i8 135> ; 0x87
+ ret <2 x i8> %mask
+}
+
+define i8 @lshr_undersized_mask(i8 %x) {
+; CHECK-LABEL: @lshr_undersized_mask(
+; CHECK-NEXT: [[SH:%.*]] = lshr i8 %x, 5
+; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SH]], -2
+; CHECK-NEXT: ret i8 [[MASK]]
+;
+ %sh = lshr i8 %x, 5
+ %mask = and i8 %sh, -2 ; 0xFE
+ ret i8 %mask
+}
+
+define <2 x i8> @shl_perfect_mask_splat(<2 x i8> %x) {
+; CHECK-LABEL: @shl_perfect_mask_splat(
+; CHECK-NEXT: [[SH:%.*]] = shl <2 x i8> %x, <i8 6, i8 6>
+; CHECK-NEXT: [[MASK:%.*]] = and <2 x i8> [[SH]], <i8 -64, i8 -64>
+; CHECK-NEXT: ret <2 x i8> [[MASK]]
+;
+ %sh = shl <2 x i8> %x, <i8 6, i8 6>
+ %mask = and <2 x i8> %sh, <i8 192, i8 192> ; 0xC0
+ ret <2 x i8> %mask
+}
+
+define i8 @shl_oversized_mask(i8 %x) {
+; CHECK-LABEL: @shl_oversized_mask(
+; CHECK-NEXT: [[SH:%.*]] = shl i8 %x, 6
+; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SH]], -61
+; CHECK-NEXT: ret i8 [[MASK]]
+;
+ %sh = shl i8 %x, 6
+ %mask = and i8 %sh, 195 ; 0xC3
+ ret i8 %mask
+}
+
+define <2 x i8> @shl_undersized_mask_splat(<2 x i8> %x) {
+; CHECK-LABEL: @shl_undersized_mask_splat(
+; CHECK-NEXT: [[SH:%.*]] = shl <2 x i8> [[X:%.*]], <i8 6, i8 6>
+; CHECK-NEXT: [[MASK:%.*]] = and <2 x i8> [[SH]], <i8 -120, i8 -120>
+; CHECK-NEXT: ret <2 x i8> [[MASK]]
+;
+ %sh = shl <2 x i8> %x, <i8 6, i8 6>
+ %mask = and <2 x i8> %sh, <i8 136, i8 136> ; 0x88
+ ret <2 x i8> %mask
+}
+
diff --git a/test/Transforms/InstSimplify/apint-or.ll b/test/Transforms/InstSimplify/apint-or.ll
deleted file mode 100644
index e3dc2c48fb40..000000000000
--- a/test/Transforms/InstSimplify/apint-or.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; NOTE: Assertions have been autogenerated by update_test_checks.py
-; RUN: opt < %s -instsimplify -S | FileCheck %s
-
-; Test the case where integer BitWidth <= 64 && BitWidth % 2 != 0.
-define i39 @test1(i39 %V, i39 %M) {
-; CHECK-LABEL: @test1(
-; CHECK: [[N:%.*]] = and i39 %M, -274877906944
-; CHECK-NEXT: [[A:%.*]] = add i39 %V, [[N]]
-; CHECK-NEXT: ret i39 [[A]]
-;
- ;; If we have: ((V + N) & C1) | (V & C2)
- ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
- ;; replace with V+N.
- %C1 = xor i39 274877906943, -1 ;; C2 = 274877906943
- %N = and i39 %M, 274877906944
- %A = add i39 %V, %N
- %B = and i39 %A, %C1
- %D = and i39 %V, 274877906943
- %R = or i39 %B, %D
- ret i39 %R
-}
-
-define i7 @test2(i7 %X) {
-; CHECK-LABEL: @test2(
-; CHECK: ret i7 %X
-;
- %Y = or i7 %X, 0
- ret i7 %Y
-}
-
-define i17 @test3(i17 %X) {
-; CHECK-LABEL: @test3(
-; CHECK: ret i17 -1
-;
- %Y = or i17 %X, -1
- ret i17 %Y
-}
-
-; Test the case where Integer BitWidth > 64 && BitWidth <= 1024.
-define i399 @test4(i399 %V, i399 %M) {
-; CHECK-LABEL: @test4(
-; CHECK: [[N:%.*]] = and i399 %M, 18446742974197923840
-; CHECK-NEXT: [[A:%.*]] = add i399 %V, [[N]]
-; CHECK-NEXT: ret i399 [[A]]
-;
- ;; If we have: ((V + N) & C1) | (V & C2)
- ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
- ;; replace with V+N.
- %C1 = xor i399 274877906943, -1 ;; C2 = 274877906943
- %N = and i399 %M, 18446742974197923840
- %A = add i399 %V, %N
- %B = and i399 %A, %C1
- %D = and i399 %V, 274877906943
- %R = or i399 %B, %D
- ret i399 %R
-}
-
-define i777 @test5(i777 %X) {
-; CHECK-LABEL: @test5(
-; CHECK: ret i777 %X
-;
- %Y = or i777 %X, 0
- ret i777 %Y
-}
-
-define i117 @test6(i117 %X) {
-; CHECK-LABEL: @test6(
-; CHECK: ret i117 -1
-;
- %Y = or i117 %X, -1
- ret i117 %Y
-}
diff --git a/test/Transforms/InstSimplify/compare.ll b/test/Transforms/InstSimplify/compare.ll
index 883bf31ff77a..d6f1b634102f 100644
--- a/test/Transforms/InstSimplify/compare.ll
+++ b/test/Transforms/InstSimplify/compare.ll
@@ -598,11 +598,14 @@ define i1 @sdiv_exact_equality(i32 %Z) {
ret i1 %C
}
-; FIXME: But not other preds: PR32949 - https://bugs.llvm.org/show_bug.cgi?id=32949
+; But not other preds: PR32949 - https://bugs.llvm.org/show_bug.cgi?id=32949
define i1 @sdiv_exact_not_equality(i32 %Z) {
; CHECK-LABEL: @sdiv_exact_not_equality(
-; CHECK-NEXT: ret i1 true
+; CHECK-NEXT: [[A:%.*]] = sdiv exact i32 10, %Z
+; CHECK-NEXT: [[B:%.*]] = sdiv exact i32 20, %Z
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
;
%A = sdiv exact i32 10, %Z
%B = sdiv exact i32 20, %Z
diff --git a/test/Transforms/InstSimplify/or.ll b/test/Transforms/InstSimplify/or.ll
new file mode 100644
index 000000000000..2c5b6181bc6c
--- /dev/null
+++ b/test/Transforms/InstSimplify/or.ll
@@ -0,0 +1,181 @@
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+define i32 @test1(i32 %A) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: ret i32 %A
+;
+ %B = or i32 %A, 0
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: ret i32 -1
+;
+ %B = or i32 %A, -1
+ ret i32 %B
+}
+
+define i8 @test2a(i8 %A) {
+; CHECK-LABEL: @test2a(
+; CHECK-NEXT: ret i8 -1
+;
+ %B = or i8 %A, -1
+ ret i8 %B
+}
+
+define i1 @test3(i1 %A) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: ret i1 %A
+;
+ %B = or i1 %A, false
+ ret i1 %B
+}
+
+define i1 @test4(i1 %A) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: ret i1 true
+;
+ %B = or i1 %A, true
+ ret i1 %B
+}
+
+define i1 @test5(i1 %A) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: ret i1 %A
+;
+ %B = or i1 %A, %A
+ ret i1 %B
+}
+
+define i32 @test6(i32 %A) {
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: ret i32 %A
+;
+ %B = or i32 %A, %A
+ ret i32 %B
+}
+
+; A | ~A == -1
+define i32 @test7(i32 %A) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: ret i32 -1
+;
+ %NotA = xor i32 %A, -1
+ %B = or i32 %A, %NotA
+ ret i32 %B
+}
+
+define i8 @test8(i8 %A) {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: ret i8 -1
+;
+ %B = or i8 %A, -2
+ %C = or i8 %B, 1
+ ret i8 %C
+}
+
+; Test that (A|c1)|(B|c2) == (A|B)|(c1|c2)
+define i8 @test9(i8 %A, i8 %B) {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: ret i8 -1
+;
+ %C = or i8 %A, 1
+ %D = or i8 %B, -2
+ %E = or i8 %C, %D
+ ret i8 %E
+}
+
+define i8 @test10(i8 %A) {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: ret i8 -2
+;
+ %B = or i8 %A, 1
+ %C = and i8 %B, -2
+ ; (X & C1) | C2 --> (X | C2) & (C1|C2)
+ %D = or i8 %C, -2
+ ret i8 %D
+}
+
+define i8 @test11(i8 %A) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: ret i8 -1
+;
+ %B = or i8 %A, -2
+ %C = xor i8 %B, 13
+ ; (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
+ %D = or i8 %C, 1
+ %E = xor i8 %D, 12
+ ret i8 %E
+}
+
+; Test the case where integer BitWidth <= 64 && BitWidth % 2 != 0.
+define i39 @test1_apint(i39 %V, i39 %M) {
+; CHECK-LABEL: @test1_apint(
+; CHECK: [[N:%.*]] = and i39 %M, -274877906944
+; CHECK-NEXT: [[A:%.*]] = add i39 %V, [[N]]
+; CHECK-NEXT: ret i39 [[A]]
+;
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i39 274877906943, -1 ;; C2 = 274877906943
+ %N = and i39 %M, 274877906944
+ %A = add i39 %V, %N
+ %B = and i39 %A, %C1
+ %D = and i39 %V, 274877906943
+ %R = or i39 %B, %D
+ ret i39 %R
+}
+
+define i7 @test2_apint(i7 %X) {
+; CHECK-LABEL: @test2_apint(
+; CHECK: ret i7 %X
+;
+ %Y = or i7 %X, 0
+ ret i7 %Y
+}
+
+define i17 @test3_apint(i17 %X) {
+; CHECK-LABEL: @test3_apint(
+; CHECK: ret i17 -1
+;
+ %Y = or i17 %X, -1
+ ret i17 %Y
+}
+
+; Test the case where Integer BitWidth > 64 && BitWidth <= 1024.
+define i399 @test4_apint(i399 %V, i399 %M) {
+; CHECK-LABEL: @test4_apint(
+; CHECK: [[N:%.*]] = and i399 %M, 18446742974197923840
+; CHECK-NEXT: [[A:%.*]] = add i399 %V, [[N]]
+; CHECK-NEXT: ret i399 [[A]]
+;
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i399 274877906943, -1 ;; C2 = 274877906943
+ %N = and i399 %M, 18446742974197923840
+ %A = add i399 %V, %N
+ %B = and i399 %A, %C1
+ %D = and i399 %V, 274877906943
+ %R = or i399 %B, %D
+ ret i399 %R
+}
+
+define i777 @test5_apint(i777 %X) {
+; CHECK-LABEL: @test5_apint(
+; CHECK: ret i777 %X
+;
+ %Y = or i777 %X, 0
+ ret i777 %Y
+}
+
+define i117 @test6_apint(i117 %X) {
+; CHECK-LABEL: @test6_apint(
+; CHECK: ret i117 -1
+;
+ %Y = or i117 %X, -1
+ ret i117 %Y
+}
+
diff --git a/test/Transforms/LoopIdiom/ARM/ctlz.ll b/test/Transforms/LoopIdiom/ARM/ctlz.ll
new file mode 100644
index 000000000000..281d97c8c338
--- /dev/null
+++ b/test/Transforms/LoopIdiom/ARM/ctlz.ll
@@ -0,0 +1,185 @@
+; RUN: opt -loop-idiom -mtriple=armv7a < %s -S | FileCheck -check-prefix=LZCNT --check-prefix=ALL %s
+; RUN: opt -loop-idiom -mtriple=armv4t < %s -S | FileCheck -check-prefix=NOLZCNT --check-prefix=ALL %s
+
+; Recognize CTLZ builtin pattern.
+; Here we'll just convert loop to countable,
+; so do not insert builtin if CPU do not support CTLZ
+;
+; int ctlz_and_other(int n, char *a)
+; {
+; int i = 0, n0 = n;
+; while(n >>= 1) {
+; a[i] = (n0 & (1 << i)) ? 1 : 0;
+; i++;
+; }
+; return i;
+; }
+;
+; LZCNT: entry
+; LZCNT: %0 = call i32 @llvm.ctlz.i32(i32 %shr8, i1 true)
+; LZCNT-NEXT: %1 = sub i32 32, %0
+; LZCNT-NEXT: %2 = zext i32 %1 to i64
+; LZCNT: %indvars.iv.next.lcssa = phi i64 [ %2, %while.body ]
+; LZCNT: %4 = trunc i64 %indvars.iv.next.lcssa to i32
+; LZCNT: %i.0.lcssa = phi i32 [ 0, %entry ], [ %4, %while.end.loopexit ]
+; LZCNT: ret i32 %i.0.lcssa
+
+; NOLZCNT: entry
+; NOLZCNT-NOT: @llvm.ctlz
+
+; Function Attrs: norecurse nounwind uwtable
+define i32 @ctlz_and_other(i32 %n, i8* nocapture %a) {
+entry:
+ %shr8 = ashr i32 %n, 1
+ %tobool9 = icmp eq i32 %shr8, 0
+ br i1 %tobool9, label %while.end, label %while.body.preheader
+
+while.body.preheader: ; preds = %entry
+ br label %while.body
+
+while.body: ; preds = %while.body.preheader, %while.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %while.body ], [ 0, %while.body.preheader ]
+ %shr11 = phi i32 [ %shr, %while.body ], [ %shr8, %while.body.preheader ]
+ %0 = trunc i64 %indvars.iv to i32
+ %shl = shl i32 1, %0
+ %and = and i32 %shl, %n
+ %tobool1 = icmp ne i32 %and, 0
+ %conv = zext i1 %tobool1 to i8
+ %arrayidx = getelementptr inbounds i8, i8* %a, i64 %indvars.iv
+ store i8 %conv, i8* %arrayidx, align 1
+ %indvars.iv.next = add nuw i64 %indvars.iv, 1
+ %shr = ashr i32 %shr11, 1
+ %tobool = icmp eq i32 %shr, 0
+ br i1 %tobool, label %while.end.loopexit, label %while.body
+
+while.end.loopexit: ; preds = %while.body
+ %1 = trunc i64 %indvars.iv.next to i32
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %i.0.lcssa = phi i32 [ 0, %entry ], [ %1, %while.end.loopexit ]
+ ret i32 %i.0.lcssa
+}
+
+; Recognize CTLZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int ctlz_zero_check(int n)
+; {
+; int i = 0;
+; while(n) {
+; n >>= 1;
+; i++;
+; }
+; return i;
+; }
+;
+; ALL: entry
+; ALL: %0 = call i32 @llvm.ctlz.i32(i32 %n, i1 true)
+; ALL-NEXT: %1 = sub i32 32, %0
+; ALL: %inc.lcssa = phi i32 [ %1, %while.body ]
+; ALL: %i.0.lcssa = phi i32 [ 0, %entry ], [ %inc.lcssa, %while.end.loopexit ]
+; ALL: ret i32 %i.0.lcssa
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @ctlz_zero_check(i32 %n) {
+entry:
+ %tobool4 = icmp eq i32 %n, 0
+ br i1 %tobool4, label %while.end, label %while.body.preheader
+
+while.body.preheader: ; preds = %entry
+ br label %while.body
+
+while.body: ; preds = %while.body.preheader, %while.body
+ %i.06 = phi i32 [ %inc, %while.body ], [ 0, %while.body.preheader ]
+ %n.addr.05 = phi i32 [ %shr, %while.body ], [ %n, %while.body.preheader ]
+ %shr = ashr i32 %n.addr.05, 1
+ %inc = add nsw i32 %i.06, 1
+ %tobool = icmp eq i32 %shr, 0
+ br i1 %tobool, label %while.end.loopexit, label %while.body
+
+while.end.loopexit: ; preds = %while.body
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %i.0.lcssa = phi i32 [ 0, %entry ], [ %inc, %while.end.loopexit ]
+ ret i32 %i.0.lcssa
+}
+
+; Recognize CTLZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int ctlz(int n)
+; {
+; int i = 0;
+; while(n >>= 1) {
+; i++;
+; }
+; return i;
+; }
+;
+; ALL: entry
+; ALL: %0 = ashr i32 %n, 1
+; ALL-NEXT: %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 false)
+; ALL-NEXT: %2 = sub i32 32, %1
+; ALL-NEXT: %3 = add i32 %2, 1
+; ALL: %i.0.lcssa = phi i32 [ %2, %while.cond ]
+; ALL: ret i32 %i.0.lcssa
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @ctlz(i32 %n) {
+entry:
+ br label %while.cond
+
+while.cond: ; preds = %while.cond, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %shr, %while.cond ]
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %while.cond ]
+ %shr = ashr i32 %n.addr.0, 1
+ %tobool = icmp eq i32 %shr, 0
+ %inc = add nsw i32 %i.0, 1
+ br i1 %tobool, label %while.end, label %while.cond
+
+while.end: ; preds = %while.cond
+ ret i32 %i.0
+}
+
+; Recognize CTLZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int ctlz_add(int n, int i0)
+; {
+; int i = i0;
+; while(n >>= 1) {
+; i++;
+; }
+; return i;
+; }
+;
+; ALL: entry
+; ALL: %0 = ashr i32 %n, 1
+; ALL-NEXT: %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 false)
+; ALL-NEXT: %2 = sub i32 32, %1
+; ALL-NEXT: %3 = add i32 %2, 1
+; ALL-NEXT: %4 = add i32 %2, %i0
+; ALL: %i.0.lcssa = phi i32 [ %4, %while.cond ]
+; ALL: ret i32 %i.0.lcssa
+;
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @ctlz_add(i32 %n, i32 %i0) {
+entry:
+ br label %while.cond
+
+while.cond: ; preds = %while.cond, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %shr, %while.cond ]
+ %i.0 = phi i32 [ %i0, %entry ], [ %inc, %while.cond ]
+ %shr = ashr i32 %n.addr.0, 1
+ %tobool = icmp eq i32 %shr, 0
+ %inc = add nsw i32 %i.0, 1
+ br i1 %tobool, label %while.end, label %while.cond
+
+while.end: ; preds = %while.cond
+ ret i32 %i.0
+}
diff --git a/test/Transforms/LoopIdiom/X86/ctlz.ll b/test/Transforms/LoopIdiom/X86/ctlz.ll
new file mode 100644
index 000000000000..d8daa3a9bbab
--- /dev/null
+++ b/test/Transforms/LoopIdiom/X86/ctlz.ll
@@ -0,0 +1,185 @@
+; RUN: opt -loop-idiom -mtriple=x86_64 -mcpu=core-avx2 < %s -S | FileCheck -check-prefix=LZCNT --check-prefix=ALL %s
+; RUN: opt -loop-idiom -mtriple=x86_64 -mcpu=corei7 < %s -S | FileCheck -check-prefix=NOLZCNT --check-prefix=ALL %s
+
+; Recognize CTLZ builtin pattern.
+; Here we'll just convert loop to countable,
+; so do not insert builtin if CPU do not support CTLZ
+;
+; int ctlz_and_other(int n, char *a)
+; {
+; int i = 0, n0 = n;
+; while(n >>= 1) {
+; a[i] = (n0 & (1 << i)) ? 1 : 0;
+; i++;
+; }
+; return i;
+; }
+;
+; LZCNT: entry
+; LZCNT: %0 = call i32 @llvm.ctlz.i32(i32 %shr8, i1 true)
+; LZCNT-NEXT: %1 = sub i32 32, %0
+; LZCNT-NEXT: %2 = zext i32 %1 to i64
+; LZCNT: %indvars.iv.next.lcssa = phi i64 [ %2, %while.body ]
+; LZCNT: %4 = trunc i64 %indvars.iv.next.lcssa to i32
+; LZCNT: %i.0.lcssa = phi i32 [ 0, %entry ], [ %4, %while.end.loopexit ]
+; LZCNT: ret i32 %i.0.lcssa
+
+; NOLZCNT: entry
+; NOLZCNT-NOT: @llvm.ctlz
+
+; Function Attrs: norecurse nounwind uwtable
+define i32 @ctlz_and_other(i32 %n, i8* nocapture %a) {
+entry:
+ %shr8 = ashr i32 %n, 1
+ %tobool9 = icmp eq i32 %shr8, 0
+ br i1 %tobool9, label %while.end, label %while.body.preheader
+
+while.body.preheader: ; preds = %entry
+ br label %while.body
+
+while.body: ; preds = %while.body.preheader, %while.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %while.body ], [ 0, %while.body.preheader ]
+ %shr11 = phi i32 [ %shr, %while.body ], [ %shr8, %while.body.preheader ]
+ %0 = trunc i64 %indvars.iv to i32
+ %shl = shl i32 1, %0
+ %and = and i32 %shl, %n
+ %tobool1 = icmp ne i32 %and, 0
+ %conv = zext i1 %tobool1 to i8
+ %arrayidx = getelementptr inbounds i8, i8* %a, i64 %indvars.iv
+ store i8 %conv, i8* %arrayidx, align 1
+ %indvars.iv.next = add nuw i64 %indvars.iv, 1
+ %shr = ashr i32 %shr11, 1
+ %tobool = icmp eq i32 %shr, 0
+ br i1 %tobool, label %while.end.loopexit, label %while.body
+
+while.end.loopexit: ; preds = %while.body
+ %1 = trunc i64 %indvars.iv.next to i32
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %i.0.lcssa = phi i32 [ 0, %entry ], [ %1, %while.end.loopexit ]
+ ret i32 %i.0.lcssa
+}
+
+; Recognize CTLZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int ctlz_zero_check(int n)
+; {
+; int i = 0;
+; while(n) {
+; n >>= 1;
+; i++;
+; }
+; return i;
+; }
+;
+; ALL: entry
+; ALL: %0 = call i32 @llvm.ctlz.i32(i32 %n, i1 true)
+; ALL-NEXT: %1 = sub i32 32, %0
+; ALL: %inc.lcssa = phi i32 [ %1, %while.body ]
+; ALL: %i.0.lcssa = phi i32 [ 0, %entry ], [ %inc.lcssa, %while.end.loopexit ]
+; ALL: ret i32 %i.0.lcssa
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @ctlz_zero_check(i32 %n) {
+entry:
+ %tobool4 = icmp eq i32 %n, 0
+ br i1 %tobool4, label %while.end, label %while.body.preheader
+
+while.body.preheader: ; preds = %entry
+ br label %while.body
+
+while.body: ; preds = %while.body.preheader, %while.body
+ %i.06 = phi i32 [ %inc, %while.body ], [ 0, %while.body.preheader ]
+ %n.addr.05 = phi i32 [ %shr, %while.body ], [ %n, %while.body.preheader ]
+ %shr = ashr i32 %n.addr.05, 1
+ %inc = add nsw i32 %i.06, 1
+ %tobool = icmp eq i32 %shr, 0
+ br i1 %tobool, label %while.end.loopexit, label %while.body
+
+while.end.loopexit: ; preds = %while.body
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %i.0.lcssa = phi i32 [ 0, %entry ], [ %inc, %while.end.loopexit ]
+ ret i32 %i.0.lcssa
+}
+
+; Recognize CTLZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int ctlz(int n)
+; {
+; int i = 0;
+; while(n >>= 1) {
+; i++;
+; }
+; return i;
+; }
+;
+; ALL: entry
+; ALL: %0 = ashr i32 %n, 1
+; ALL-NEXT: %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 false)
+; ALL-NEXT: %2 = sub i32 32, %1
+; ALL-NEXT: %3 = add i32 %2, 1
+; ALL: %i.0.lcssa = phi i32 [ %2, %while.cond ]
+; ALL: ret i32 %i.0.lcssa
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @ctlz(i32 %n) {
+entry:
+ br label %while.cond
+
+while.cond: ; preds = %while.cond, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %shr, %while.cond ]
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %while.cond ]
+ %shr = ashr i32 %n.addr.0, 1
+ %tobool = icmp eq i32 %shr, 0
+ %inc = add nsw i32 %i.0, 1
+ br i1 %tobool, label %while.end, label %while.cond
+
+while.end: ; preds = %while.cond
+ ret i32 %i.0
+}
+
+; Recognize CTLZ builtin pattern.
+; Here it will replace the loop -
+; assume builtin is always profitable.
+;
+; int ctlz_add(int n, int i0)
+; {
+; int i = i0;
+; while(n >>= 1) {
+; i++;
+; }
+; return i;
+; }
+;
+; ALL: entry
+; ALL: %0 = ashr i32 %n, 1
+; ALL-NEXT: %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 false)
+; ALL-NEXT: %2 = sub i32 32, %1
+; ALL-NEXT: %3 = add i32 %2, 1
+; ALL-NEXT: %4 = add i32 %2, %i0
+; ALL: %i.0.lcssa = phi i32 [ %4, %while.cond ]
+; ALL: ret i32 %i.0.lcssa
+;
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @ctlz_add(i32 %n, i32 %i0) {
+entry:
+ br label %while.cond
+
+while.cond: ; preds = %while.cond, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %shr, %while.cond ]
+ %i.0 = phi i32 [ %i0, %entry ], [ %inc, %while.cond ]
+ %shr = ashr i32 %n.addr.0, 1
+ %tobool = icmp eq i32 %shr, 0
+ %inc = add nsw i32 %i.0, 1
+ br i1 %tobool, label %while.end, label %while.cond
+
+while.end: ; preds = %while.cond
+ ret i32 %i.0
+}
diff --git a/test/Transforms/LoopUnroll/not-rotated.ll b/test/Transforms/LoopUnroll/not-rotated.ll
index ffe80920d948..b4b88e096079 100644
--- a/test/Transforms/LoopUnroll/not-rotated.ll
+++ b/test/Transforms/LoopUnroll/not-rotated.ll
@@ -4,7 +4,7 @@
; properly handled by LoopUnroll, currently.
; RUN: opt -loop-unroll -verify-dom-info %s
-; REQUIRE: asserts
+; REQUIRES: asserts
define void @tinkywinky(i1 %patatino) {
entry:
diff --git a/test/Transforms/LoopVectorize/X86/svml-calls-finite.ll b/test/Transforms/LoopVectorize/X86/svml-calls-finite.ll
new file mode 100644
index 000000000000..5a4bfe5e6bdd
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/svml-calls-finite.ll
@@ -0,0 +1,187 @@
+; RUN: opt -vector-library=SVML -loop-vectorize -S < %s | FileCheck %s
+
+; Test to verify that when math headers are built with
+; __FINITE_MATH_ONLY__ enabled, causing use of __<func>_finite
+; function versions, vectorization can map these to vector versions.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare float @__expf_finite(float) #0
+
+; CHECK-LABEL: @exp_f32
+; CHECK: <4 x float> @__svml_expf4
+; CHECK: ret
+define void @exp_f32(float* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__expf_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
+ store float %call, float* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!1 = distinct !{!1, !2, !3}
+!2 = !{!"llvm.loop.vectorize.width", i32 4}
+!3 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__exp_finite(double) #0
+
+; CHECK-LABEL: @exp_f64
+; CHECK: <4 x double> @__svml_exp4
+; CHECK: ret
+define void @exp_f64(double* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call fast double @__exp_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
+ store double %call, double* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !11
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!11 = distinct !{!11, !12, !13}
+!12 = !{!"llvm.loop.vectorize.width", i32 4}
+!13 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+
+
+declare float @__logf_finite(float) #0
+
+; CHECK-LABEL: @log_f32
+; CHECK: <4 x float> @__svml_logf4
+; CHECK: ret
+define void @log_f32(float* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__logf_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
+ store float %call, float* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!21 = distinct !{!21, !22, !23}
+!22 = !{!"llvm.loop.vectorize.width", i32 4}
+!23 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__log_finite(double) #0
+
+; CHECK-LABEL: @log_f64
+; CHECK: <4 x double> @__svml_log4
+; CHECK: ret
+define void @log_f64(double* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call fast double @__log_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
+ store double %call, double* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!31 = distinct !{!31, !32, !33}
+!32 = !{!"llvm.loop.vectorize.width", i32 4}
+!33 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare float @__powf_finite(float, float) #0
+
+; CHECK-LABEL: @pow_f32
+; CHECK: <4 x float> @__svml_powf4
+; CHECK: ret
+define void @pow_f32(float* nocapture %varray, float* nocapture readonly %exp) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %arrayidx = getelementptr inbounds float, float* %exp, i64 %indvars.iv
+ %tmp1 = load float, float* %arrayidx, align 4
+ %tmp2 = tail call fast float @__powf_finite(float %conv, float %tmp1)
+ %arrayidx2 = getelementptr inbounds float, float* %varray, i64 %indvars.iv
+ store float %tmp2, float* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !41
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!41 = distinct !{!41, !42, !43}
+!42 = !{!"llvm.loop.vectorize.width", i32 4}
+!43 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__pow_finite(double, double) #0
+
+; CHECK-LABEL: @pow_f64
+; CHECK: <4 x double> @__svml_pow4
+; CHECK: ret
+define void @pow_f64(double* nocapture %varray, double* nocapture readonly %exp) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %arrayidx = getelementptr inbounds double, double* %exp, i64 %indvars.iv
+ %tmp1 = load double, double* %arrayidx, align 4
+ %tmp2 = tail call fast double @__pow_finite(double %conv, double %tmp1)
+ %arrayidx2 = getelementptr inbounds double, double* %varray, i64 %indvars.iv
+ store double %tmp2, double* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !51
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!51 = distinct !{!51, !52, !53}
+!52 = !{!"llvm.loop.vectorize.width", i32 4}
+!53 = !{!"llvm.loop.vectorize.enable", i1 true}
diff --git a/test/Transforms/LoopVectorize/induction.ll b/test/Transforms/LoopVectorize/induction.ll
index 6507166dd1f2..7e9e6b1cdc8e 100644
--- a/test/Transforms/LoopVectorize/induction.ll
+++ b/test/Transforms/LoopVectorize/induction.ll
@@ -849,3 +849,48 @@ for.end:
%tmp7 = phi i32 [ %tmp6, %for.inc ]
ret i32 %tmp7
}
+
+; Ensure that the shuffle vector for first order recurrence is inserted
+; correctly after all the phis. These new phis correspond to new IVs
+; that are generated by optimizing non-free truncs of IVs to IVs themselves
+define i64 @trunc_with_first_order_recurrence() {
+; CHECK-LABEL: trunc_with_first_order_recurrence
+; CHECK-LABEL: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK-NEXT: %vec.phi = phi <2 x i64>
+; CHECK-NEXT: %vec.ind = phi <2 x i64> [ <i64 1, i64 2>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+; CHECK-NEXT: %vec.ind2 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next3, %vector.body ]
+; CHECK-NEXT: %vector.recur = phi <2 x i32> [ <i32 undef, i32 42>, %vector.ph ], [ %vec.ind5, %vector.body ]
+; CHECK-NEXT: %vec.ind5 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next6, %vector.body ]
+; CHECK-NEXT: %vec.ind7 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next8, %vector.body ]
+; CHECK-NEXT: shufflevector <2 x i32> %vector.recur, <2 x i32> %vec.ind5, <2 x i32> <i32 1, i32 2>
+entry:
+ br label %loop
+
+exit: ; preds = %loop
+ %.lcssa = phi i64 [ %c23, %loop ]
+ ret i64 %.lcssa
+
+loop: ; preds = %loop, %entry
+ %c5 = phi i64 [ %c23, %loop ], [ 0, %entry ]
+ %indvars.iv = phi i64 [ %indvars.iv.next, %loop ], [ 1, %entry ]
+ %x = phi i32 [ %c24, %loop ], [ 1, %entry ]
+ %y = phi i32 [ %c6, %loop ], [ 42, %entry ]
+ %c6 = trunc i64 %indvars.iv to i32
+ %c8 = mul i32 %x, %c6
+ %c9 = add i32 %c8, 42
+ %c10 = add i32 %y, %c6
+ %c11 = add i32 %c10, %c9
+ %c12 = sext i32 %c11 to i64
+ %c13 = add i64 %c5, %c12
+ %indvars.iv.tr = trunc i64 %indvars.iv to i32
+ %c14 = shl i32 %indvars.iv.tr, 1
+ %c15 = add i32 %c9, %c14
+ %c16 = sext i32 %c15 to i64
+ %c23 = add i64 %c13, %c16
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %c24 = add nuw nsw i32 %x, 1
+ %exitcond.i = icmp eq i64 %indvars.iv.next, 114
+ br i1 %exitcond.i, label %exit, label %loop
+
+}
diff --git a/test/Transforms/LoopVectorize/pr32859.ll b/test/Transforms/LoopVectorize/pr32859.ll
new file mode 100644
index 000000000000..31cb84699f71
--- /dev/null
+++ b/test/Transforms/LoopVectorize/pr32859.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -loop-vectorize -S | FileCheck %s
+
+; Out of the LCSSA form we could have 'phi i32 [ loop-invariant, %for.inc.2.i ]'
+; but the IR Verifier requires for PHI one entry for each predecessor of
+; it's parent basic block. The original PR14725 solution for the issue just
+; added 'undef' for an predecessor BB and which is not correct. We copy the real
+; value for another predecessor instead of bringing 'undef'.
+
+; CHECK-LABEL: for.cond.preheader:
+; CHECK: %e.0.ph = phi i32 [ 0, %if.end.2.i ], [ 0, %middle.block ]
+
+; Function Attrs: nounwind uwtable
+define void @main() #0 {
+entry:
+ br label %for.cond1.preheader.i
+
+for.cond1.preheader.i: ; preds = %if.end.2.i, %entry
+ %c.06.i = phi i32 [ 0, %entry ], [ %inc5.i, %if.end.2.i ]
+ %tobool.i = icmp ne i32 undef, 0
+ br label %if.end.2.i
+
+if.end.2.i: ; preds = %for.cond1.preheader.i
+ %inc5.i = add nsw i32 %c.06.i, 1
+ %cmp.i = icmp slt i32 %inc5.i, 16
+ br i1 %cmp.i, label %for.cond1.preheader.i, label %for.cond.preheader
+
+for.cond.preheader: ; preds = %if.end.2.i
+ %e.0.ph = phi i32 [ 0, %if.end.2.i ]
+ unreachable
+}
diff --git a/test/Transforms/NewGVN/pr32934.ll b/test/Transforms/NewGVN/pr32934.ll
new file mode 100644
index 000000000000..4bb7ea150437
--- /dev/null
+++ b/test/Transforms/NewGVN/pr32934.ll
@@ -0,0 +1,69 @@
+; REQUIRES: disabled
+; RUN: opt -S -newgvn %s | FileCheck %s
+
+; CHECK: define void @tinkywinky() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %d = alloca i32, align 4
+; CHECK-NEXT: store i32 0, i32* null, align 4
+; CHECK-NEXT: br label %for.cond
+; CHECK: for.cond: ; preds = %if.end, %entry
+; CHECK-NEXT: %0 = load i32, i32* null, align 4
+; CHECK-NEXT: %cmp = icmp slt i32 %0, 1
+; CHECK-NEXT: br i1 %cmp, label %for.body, label %while.cond
+; CHECK: for.body: ; preds = %for.cond
+; CHECK-NEXT: %1 = load i32, i32* @a, align 4
+; CHECK-NEXT: store i32 %1, i32* %d, align 4
+; CHECK-NEXT: br label %L
+; CHECK: L: ; preds = %if.then, %for.body
+; CHECK-NEXT: %tobool = icmp ne i32 %1, 0
+; CHECK-NEXT: br i1 %tobool, label %if.then, label %if.end
+; CHECK: if.then: ; preds = %L
+; CHECK-NEXT: call void (i8*, ...) @printf(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @patatino, i32 0, i32 0))
+; CHECK-NEXT: br label %L
+; CHECK: if.end: ; preds = %L
+; CHECK-NEXT: br label %for.cond
+; CHECK: while.cond: ; preds = %while.body, %for.cond
+; CHECK-NEXT: br i1 undef, label %while.body, label %while.end
+; CHECK: while.body: ; preds = %while.cond
+; CHECK-NEXT: call void (i8*, ...) @printf(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @patatino, i32 0, i32 0))
+; CHECK-NEXT: br label %while.cond
+; CHECK: while.end:
+; CHECK-NEXT: %2 = load i32, i32* @a, align 4
+; CHECK-NEXT: store i32 %2, i32* undef, align 4
+; CHECK-NEXT: ret void
+
+@a = external global i32, align 4
+@patatino = external unnamed_addr constant [2 x i8], align 1
+define void @tinkywinky() {
+entry:
+ %d = alloca i32, align 4
+ store i32 0, i32* null, align 4
+ br label %for.cond
+for.cond:
+ %0 = load i32, i32* null, align 4
+ %cmp = icmp slt i32 %0, 1
+ br i1 %cmp, label %for.body, label %while.cond
+for.body:
+ %1 = load i32, i32* @a, align 4
+ store i32 %1, i32* %d, align 4
+ br label %L
+L:
+ %2 = load i32, i32* %d, align 4
+ %tobool = icmp ne i32 %2, 0
+ br i1 %tobool, label %if.then, label %if.end
+if.then:
+ call void (i8*, ...) @printf(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @patatino, i32 0, i32 0))
+ br label %L
+if.end:
+ br label %for.cond
+while.cond:
+ br i1 undef, label %while.body, label %while.end
+while.body:
+ call void (i8*, ...) @printf(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @patatino, i32 0, i32 0))
+ br label %while.cond
+while.end:
+ %3 = load i32, i32* @a, align 4
+ store i32 %3, i32* undef, align 4
+ ret void
+}
+declare void @printf(i8*, ...) #1
diff --git a/test/Transforms/NewGVN/pr32952.ll b/test/Transforms/NewGVN/pr32952.ll
new file mode 100644
index 000000000000..056b3a5105ec
--- /dev/null
+++ b/test/Transforms/NewGVN/pr32952.ll
@@ -0,0 +1,42 @@
+; PR32952: Don't erroneously consider congruent two phi nodes which
+; have the same arguments but different incoming edges.
+; RUN: opt -newgvn -S %s | FileCheck %s
+
+@a = common global i16 0, align 2
+@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+
+define i32 @tinkywinky() {
+entry:
+ %0 = load i16, i16* @a, align 2
+ %conv = sext i16 %0 to i32
+ %neg = xor i32 %conv, -1
+ %conv1 = trunc i32 %neg to i16
+ %conv3 = zext i16 %conv1 to i32
+ %cmp = icmp slt i32 %conv, %conv3
+ br i1 %cmp, label %tinky, label %winky
+
+tinky:
+ store i16 2, i16* @a, align 2
+ br label %patatino
+
+winky:
+ br label %patatino
+
+patatino:
+; CHECK: %meh = phi i16 [ %0, %winky ], [ %conv1, %tinky ]
+; CHECK: %banana = phi i16 [ %0, %tinky ], [ %conv1, %winky ]
+ %meh = phi i16 [ %0, %winky ], [ %conv1, %tinky ]
+ %banana = phi i16 [ %0, %tinky ], [ %conv1, %winky ]
+ br label %end
+
+end:
+; CHECK: %promoted = zext i16 %banana to i32
+; CHECK: %other = zext i16 %meh to i32
+ %promoted = zext i16 %banana to i32
+ %other = zext i16 %meh to i32
+ %first = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %promoted)
+ %second = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %other)
+ ret i32 0
+}
+
+declare i32 @printf(i8*, ...)
diff --git a/test/Transforms/NewGVN/verify-memoryphi.ll b/test/Transforms/NewGVN/verify-memoryphi.ll
new file mode 100644
index 000000000000..57dbd18986d2
--- /dev/null
+++ b/test/Transforms/NewGVN/verify-memoryphi.ll
@@ -0,0 +1,29 @@
+; Skip dead MemoryPhis when performing memory congruency verification
+; in NewGVN.
+; RUN: opt -S -newgvn %s | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: define void @tinkywinky() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label %body, label %end
+; CHECK: body:
+; CHECK-NEXT: store i8 undef, i8* null
+; CHECK-NEXT: br label %end
+; CHECK: end:
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+
+define void @tinkywinky() {
+entry:
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* undef)
+ br i1 false, label %body, label %end
+
+body:
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* undef)
+ br label %end
+
+end:
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll b/test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll
new file mode 100644
index 000000000000..edc8042a217d
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/AArch64/64-bit-vector.ll
@@ -0,0 +1,22 @@
+; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic < %s | FileCheck %s
+; RUN: opt -S -slp-vectorizer -mtriple=aarch64-apple-ios -mcpu=cyclone < %s | FileCheck %s
+; Currently disabled for a few subtargets (e.g. Kryo):
+; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=kryo < %s | FileCheck --check-prefix=NO_SLP %s
+; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic -slp-min-reg-size=128 < %s | FileCheck --check-prefix=NO_SLP %s
+
+define void @f(float* %r, float* %w) {
+ %r0 = getelementptr inbounds float, float* %r, i64 0
+ %r1 = getelementptr inbounds float, float* %r, i64 1
+ %f0 = load float, float* %r0
+ %f1 = load float, float* %r1
+ %add0 = fadd float %f0, %f0
+; CHECK: fadd <2 x float>
+; NO_SLP: fadd float
+; NO_SLP: fadd float
+ %add1 = fadd float %f1, %f1
+ %w0 = getelementptr inbounds float, float* %w, i64 0
+ %w1 = getelementptr inbounds float, float* %w, i64 1
+ store float %add0, float* %w0
+ store float %add1, float* %w1
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll b/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
index e9b71963530c..962a6c3b57b3 100644
--- a/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
@@ -1,4 +1,5 @@
-; RUN: opt -S -slp-vectorizer -slp-threshold=-18 -dce -instcombine < %s | FileCheck %s
+; RUN: opt -S -slp-vectorizer -slp-threshold=-18 -dce -instcombine -pass-remarks-output=%t < %s | FileCheck %s
+; RUN: cat %t | FileCheck -check-prefix=YAML %s
target datalayout = "e-m:e-i32:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnu"
@@ -23,7 +24,25 @@ target triple = "aarch64--linux-gnu"
; CHECK: [[A:%[a-zA-Z0-9.]+]] = add nsw <4 x i32>
; CHECK: [[X:%[a-zA-Z0-9.]+]] = extractelement <4 x i32> [[A]]
; CHECK: sext i32 [[X]] to i64
-;
+
+; YAML: Pass: slp-vectorizer
+; YAML-NEXT: Name: VectorizedList
+; YAML-NEXT: Function: getelementptr_4x32
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'SLP vectorized with cost '
+; YAML-NEXT: - Cost: '11'
+; YAML-NEXT: - String: ' and with tree size '
+; YAML-NEXT: - TreeSize: '5'
+
+; YAML: Pass: slp-vectorizer
+; YAML-NEXT: Name: VectorizedList
+; YAML-NEXT: Function: getelementptr_4x32
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'SLP vectorized with cost '
+; YAML-NEXT: - Cost: '16'
+; YAML-NEXT: - String: ' and with tree size '
+; YAML-NEXT: - TreeSize: '3'
+
define i32 @getelementptr_4x32(i32* nocapture readonly %g, i32 %n, i32 %x, i32 %y, i32 %z) {
entry:
%cmp31 = icmp sgt i32 %n, 0
@@ -69,7 +88,25 @@ for.body:
; CHECK: [[A:%[a-zA-Z0-9.]+]] = add nsw <2 x i32>
; CHECK: [[X:%[a-zA-Z0-9.]+]] = extractelement <2 x i32> [[A]]
; CHECK: sext i32 [[X]] to i64
-;
+
+; YAML: Pass: slp-vectorizer
+; YAML-NEXT: Name: VectorizedList
+; YAML-NEXT: Function: getelementptr_2x32
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'SLP vectorized with cost '
+; YAML-NEXT: - Cost: '11'
+; YAML-NEXT: - String: ' and with tree size '
+; YAML-NEXT: - TreeSize: '5'
+
+; YAML: Pass: slp-vectorizer
+; YAML-NEXT: Name: VectorizedList
+; YAML-NEXT: Function: getelementptr_2x32
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'SLP vectorized with cost '
+; YAML-NEXT: - Cost: '6'
+; YAML-NEXT: - String: ' and with tree size '
+; YAML-NEXT: - TreeSize: '3'
+
define i32 @getelementptr_2x32(i32* nocapture readonly %g, i32 %n, i32 %x, i32 %y, i32 %z) {
entry:
%cmp31 = icmp sgt i32 %n, 0
diff --git a/test/Transforms/SLPVectorizer/AArch64/horizontal.ll b/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
index 8f8bf2648aa2..1a6a2fb890d3 100644
--- a/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
@@ -1,4 +1,5 @@
-; RUN: opt -slp-vectorizer -slp-threshold=-6 -S < %s | FileCheck %s
+; RUN: opt -slp-vectorizer -slp-threshold=-6 -S -pass-remarks-output=%t < %s | FileCheck %s
+; RUN: cat %t | FileCheck -check-prefix=YAML %s
; FIXME: The threshold is changed to keep this test case a bit smaller.
; The AArch64 cost model should not give such high costs to select statements.
@@ -10,6 +11,16 @@ target triple = "aarch64--linux"
; CHECK: load <4 x i32>
; CHECK: load <4 x i32>
; CHECK: select <4 x i1>
+
+; YAML: Pass: slp-vectorizer
+; YAML-NEXT: Name: VectorizedHorizontalReduction
+; YAML-NEXT: Function: test_select
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
+; YAML-NEXT: - Cost: '4'
+; YAML-NEXT: - String: ' and with tree size '
+; YAML-NEXT: - TreeSize: '8'
+
define i32 @test_select(i32* noalias nocapture readonly %blk1, i32* noalias nocapture readonly %blk2, i32 %lx, i32 %h) {
entry:
%cmp.22 = icmp sgt i32 %h, 0
@@ -93,6 +104,16 @@ define i32 @reduction_with_br(i32* noalias nocapture readonly %blk1, i32* noalia
; CHECK: load <4 x i32>
; CHECK: load <4 x i32>
; CHECK: mul nsw <4 x i32>
+
+; YAML: Pass: slp-vectorizer
+; YAML-NEXT: Name: VectorizedHorizontalReduction
+; YAML-NEXT: Function: reduction_with_br
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
+; YAML-NEXT: - Cost: '1'
+; YAML-NEXT: - String: ' and with tree size '
+; YAML-NEXT: - TreeSize: '3'
+
entry:
%cmp.16 = icmp sgt i32 %h, 0
br i1 %cmp.16, label %for.body.lr.ph, label %for.end
@@ -150,6 +171,16 @@ for.end: ; preds = %for.end.loopexit, %
; CHECK: load <8 x i8>
; CHECK: load <8 x i8>
; CHECK: select <8 x i1>
+
+; YAML: Pass: slp-vectorizer
+; YAML-NEXT: Name: VectorizedHorizontalReduction
+; YAML-NEXT: Function: test_unrolled_select
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
+; YAML-NEXT: - Cost: '-33'
+; YAML-NEXT: - String: ' and with tree size '
+; YAML-NEXT: - TreeSize: '10'
+
define i32 @test_unrolled_select(i8* noalias nocapture readonly %blk1, i8* noalias nocapture readonly %blk2, i32 %lx, i32 %h, i32 %lim) #0 {
entry:
%cmp.43 = icmp sgt i32 %h, 0
diff --git a/test/Transforms/SLPVectorizer/AArch64/remarks.ll b/test/Transforms/SLPVectorizer/AArch64/remarks.ll
new file mode 100644
index 000000000000..e8c37512594e
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/AArch64/remarks.ll
@@ -0,0 +1,32 @@
+; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic -pass-remarks=slp-vectorizer -o /dev/null < %s 2>&1 | FileCheck %s
+
+define void @f(double* %r, double* %w) {
+ %r0 = getelementptr inbounds double, double* %r, i64 0
+ %r1 = getelementptr inbounds double, double* %r, i64 1
+ %f0 = load double, double* %r0
+ %f1 = load double, double* %r1
+ %add0 = fadd double %f0, %f0
+ %add1 = fadd double %f1, %f1
+ %w0 = getelementptr inbounds double, double* %w, i64 0
+ %w1 = getelementptr inbounds double, double* %w, i64 1
+; CHECK: remark: /tmp/s.c:5:10: Stores SLP vectorized with cost -4 and with tree size 3
+ store double %add0, double* %w0, !dbg !9
+ store double %add1, double* %w1
+ ret void
+}
+
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (trunk 281293) (llvm/trunk 281290)", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "/tmp/s.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 4.0.0 (trunk 281293) (llvm/trunk 281290)"}
+!7 = distinct !DISubprogram(name: "baz", scope: !1, file: !1, line: 4, type: !8, isLocal: false, isDefinition: true, scopeLine: 4, isOptimized: true, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !2)
+!9 = !DILocation(line: 5, column: 10, scope: !7)
diff --git a/test/Transforms/SLPVectorizer/X86/arith-add.ll b/test/Transforms/SLPVectorizer/X86/arith-add.ll
new file mode 100644
index 000000000000..0266758b27d2
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/arith-add.ll
@@ -0,0 +1,649 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+avx512bw -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8 = common global [64 x i8] zeroinitializer, align 64
+@b8 = common global [64 x i8] zeroinitializer, align 64
+@c8 = common global [64 x i8] zeroinitializer, align 64
+
+define void @add_v8i64() {
+; SSE-LABEL: @add_v8i64(
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @a64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @b64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP9:%.*]] = add <2 x i64> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = add <2 x i64> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = add <2 x i64> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = add <2 x i64> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @add_v8i64(
+; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP5:%.*]] = add <4 x i64> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = add <4 x i64> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; AVX-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @add_v8i64(
+; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP3:%.*]] = add <8 x i64> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
+; AVX512-NEXT: ret void
+;
+ %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+ %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+ %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+ %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+ %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+ %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+ %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+ %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+ %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+ %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+ %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+ %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+ %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+ %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+ %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+ %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+ %r0 = add i64 %a0, %b0
+ %r1 = add i64 %a1, %b1
+ %r2 = add i64 %a2, %b2
+ %r3 = add i64 %a3, %b3
+ %r4 = add i64 %a4, %b4
+ %r5 = add i64 %a5, %b5
+ %r6 = add i64 %a6, %b6
+ %r7 = add i64 %a7, %b7
+ store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+ store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+ store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+ store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+ store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+ store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+ store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+ store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+ ret void
+}
+
+define void @add_v16i32() {
+; SSE-LABEL: @add_v16i32(
+; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP9:%.*]] = add <4 x i32> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = add <4 x i32> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = add <4 x i32> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = add <4 x i32> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @add_v16i32(
+; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP5:%.*]] = add <8 x i32> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = add <8 x i32> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; AVX-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @add_v16i32(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP3:%.*]] = add <16 x i32> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
+; AVX512-NEXT: ret void
+;
+ %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+ %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+ %a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+ %a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+ %a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+ %a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+ %a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+ %a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+ %a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+ %a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+ %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+ %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+ %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+ %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+ %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+ %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+ %b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+ %b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+ %b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+ %b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+ %b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+ %b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+ %b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+ %b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+ %b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+ %b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+ %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+ %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+ %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+ %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+ %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+ %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+ %r0 = add i32 %a0 , %b0
+ %r1 = add i32 %a1 , %b1
+ %r2 = add i32 %a2 , %b2
+ %r3 = add i32 %a3 , %b3
+ %r4 = add i32 %a4 , %b4
+ %r5 = add i32 %a5 , %b5
+ %r6 = add i32 %a6 , %b6
+ %r7 = add i32 %a7 , %b7
+ %r8 = add i32 %a8 , %b8
+ %r9 = add i32 %a9 , %b9
+ %r10 = add i32 %a10, %b10
+ %r11 = add i32 %a11, %b11
+ %r12 = add i32 %a12, %b12
+ %r13 = add i32 %a13, %b13
+ %r14 = add i32 %a14, %b14
+ %r15 = add i32 %a15, %b15
+ store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+ store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+ store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+ store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+ store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+ store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+ store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+ store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+ store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+ store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+ store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+ store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+ store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+ store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+ store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+ store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+ ret void
+}
+
+define void @add_v32i16() {
+; SSE-LABEL: @add_v32i16(
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP9:%.*]] = add <8 x i16> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = add <8 x i16> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = add <8 x i16> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = add <8 x i16> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @add_v32i16(
+; AVX-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = add <16 x i16> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @add_v32i16(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP1]], [[TMP3]]
+; AVX512-NEXT: [[TMP6:%.*]] = add <16 x i16> [[TMP2]], [[TMP4]]
+; AVX512-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: ret void
+;
+ %a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+ %a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+ %a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+ %a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+ %a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+ %a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+ %a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+ %a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+ %a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+ %a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+ %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+ %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+ %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+ %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+ %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+ %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+ %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+ %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+ %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+ %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+ %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+ %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+ %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+ %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+ %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+ %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+ %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+ %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+ %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+ %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+ %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+ %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+ %b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+ %b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+ %b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+ %b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+ %b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+ %b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+ %b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+ %b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+ %b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+ %b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+ %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+ %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+ %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+ %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+ %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+ %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+ %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+ %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+ %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+ %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+ %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+ %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+ %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+ %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+ %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+ %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+ %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+ %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+ %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+ %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+ %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+ %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+ %r0 = add i16 %a0 , %b0
+ %r1 = add i16 %a1 , %b1
+ %r2 = add i16 %a2 , %b2
+ %r3 = add i16 %a3 , %b3
+ %r4 = add i16 %a4 , %b4
+ %r5 = add i16 %a5 , %b5
+ %r6 = add i16 %a6 , %b6
+ %r7 = add i16 %a7 , %b7
+ %r8 = add i16 %a8 , %b8
+ %r9 = add i16 %a9 , %b9
+ %r10 = add i16 %a10, %b10
+ %r11 = add i16 %a11, %b11
+ %r12 = add i16 %a12, %b12
+ %r13 = add i16 %a13, %b13
+ %r14 = add i16 %a14, %b14
+ %r15 = add i16 %a15, %b15
+ %r16 = add i16 %a16, %b16
+ %r17 = add i16 %a17, %b17
+ %r18 = add i16 %a18, %b18
+ %r19 = add i16 %a19, %b19
+ %r20 = add i16 %a20, %b20
+ %r21 = add i16 %a21, %b21
+ %r22 = add i16 %a22, %b22
+ %r23 = add i16 %a23, %b23
+ %r24 = add i16 %a24, %b24
+ %r25 = add i16 %a25, %b25
+ %r26 = add i16 %a26, %b26
+ %r27 = add i16 %a27, %b27
+ %r28 = add i16 %a28, %b28
+ %r29 = add i16 %a29, %b29
+ %r30 = add i16 %a30, %b30
+ %r31 = add i16 %a31, %b31
+ store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+ store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+ store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+ store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+ store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+ store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+ store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+ store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+ store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+ store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+ store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+ store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+ store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+ store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+ store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+ store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+ store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+ store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+ store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+ store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+ store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+ store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+ store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+ store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+ store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+ store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+ store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+ store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+ store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+ store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+ store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+ store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+ ret void
+}
+
+define void @add_v64i8() {
+; CHECK-LABEL: @add_v64i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP9:%.*]] = add <16 x i8> [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = add <16 x i8> [[TMP2]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = add <16 x i8> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = add <16 x i8> [[TMP4]], [[TMP8]]
+; CHECK-NEXT: store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
+;
+ %a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+ %a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+ %a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+ %a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+ %a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+ %a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+ %a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+ %a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+ %a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+ %a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+ %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+ %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+ %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+ %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+ %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+ %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+ %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+ %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+ %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+ %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+ %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+ %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+ %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+ %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+ %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+ %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+ %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+ %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+ %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+ %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+ %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+ %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+ %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+ %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+ %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+ %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+ %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+ %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+ %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+ %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+ %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+ %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+ %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+ %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+ %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+ %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+ %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+ %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+ %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+ %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+ %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+ %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+ %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+ %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+ %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+ %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+ %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+ %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+ %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+ %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+ %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+ %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+ %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+ %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+ %b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+ %b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+ %b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+ %b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+ %b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+ %b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+ %b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+ %b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+ %b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+ %b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+ %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+ %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+ %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+ %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+ %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+ %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+ %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+ %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+ %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+ %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+ %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+ %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+ %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+ %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+ %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+ %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+ %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+ %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+ %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+ %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+ %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+ %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+ %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+ %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+ %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+ %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+ %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+ %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+ %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+ %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+ %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+ %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+ %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+ %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+ %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+ %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+ %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+ %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+ %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+ %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+ %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+ %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+ %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+ %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+ %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+ %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+ %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+ %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+ %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+ %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+ %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+ %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+ %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+ %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+ %r0 = add i8 %a0 , %b0
+ %r1 = add i8 %a1 , %b1
+ %r2 = add i8 %a2 , %b2
+ %r3 = add i8 %a3 , %b3
+ %r4 = add i8 %a4 , %b4
+ %r5 = add i8 %a5 , %b5
+ %r6 = add i8 %a6 , %b6
+ %r7 = add i8 %a7 , %b7
+ %r8 = add i8 %a8 , %b8
+ %r9 = add i8 %a9 , %b9
+ %r10 = add i8 %a10, %b10
+ %r11 = add i8 %a11, %b11
+ %r12 = add i8 %a12, %b12
+ %r13 = add i8 %a13, %b13
+ %r14 = add i8 %a14, %b14
+ %r15 = add i8 %a15, %b15
+ %r16 = add i8 %a16, %b16
+ %r17 = add i8 %a17, %b17
+ %r18 = add i8 %a18, %b18
+ %r19 = add i8 %a19, %b19
+ %r20 = add i8 %a20, %b20
+ %r21 = add i8 %a21, %b21
+ %r22 = add i8 %a22, %b22
+ %r23 = add i8 %a23, %b23
+ %r24 = add i8 %a24, %b24
+ %r25 = add i8 %a25, %b25
+ %r26 = add i8 %a26, %b26
+ %r27 = add i8 %a27, %b27
+ %r28 = add i8 %a28, %b28
+ %r29 = add i8 %a29, %b29
+ %r30 = add i8 %a30, %b30
+ %r31 = add i8 %a31, %b31
+ %r32 = add i8 %a32, %b32
+ %r33 = add i8 %a33, %b33
+ %r34 = add i8 %a34, %b34
+ %r35 = add i8 %a35, %b35
+ %r36 = add i8 %a36, %b36
+ %r37 = add i8 %a37, %b37
+ %r38 = add i8 %a38, %b38
+ %r39 = add i8 %a39, %b39
+ %r40 = add i8 %a40, %b40
+ %r41 = add i8 %a41, %b41
+ %r42 = add i8 %a42, %b42
+ %r43 = add i8 %a43, %b43
+ %r44 = add i8 %a44, %b44
+ %r45 = add i8 %a45, %b45
+ %r46 = add i8 %a46, %b46
+ %r47 = add i8 %a47, %b47
+ %r48 = add i8 %a48, %b48
+ %r49 = add i8 %a49, %b49
+ %r50 = add i8 %a50, %b50
+ %r51 = add i8 %a51, %b51
+ %r52 = add i8 %a52, %b52
+ %r53 = add i8 %a53, %b53
+ %r54 = add i8 %a54, %b54
+ %r55 = add i8 %a55, %b55
+ %r56 = add i8 %a56, %b56
+ %r57 = add i8 %a57, %b57
+ %r58 = add i8 %a58, %b58
+ %r59 = add i8 %a59, %b59
+ %r60 = add i8 %a60, %b60
+ %r61 = add i8 %a61, %b61
+ %r62 = add i8 %a62, %b62
+ %r63 = add i8 %a63, %b63
+ store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+ store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+ store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+ store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+ store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+ store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+ store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+ store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+ store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+ store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+ store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+ store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+ store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+ store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+ store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+ store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+ store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+ store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+ store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+ store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+ store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+ store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+ store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+ store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+ store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+ store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+ store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+ store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+ store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+ store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+ store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+ store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+ store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+ store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+ store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+ store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+ store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+ store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+ store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+ store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+ store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+ store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+ store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+ store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+ store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+ store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+ store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+ store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+ store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+ store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+ store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+ store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+ store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+ store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+ store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+ store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+ store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+ store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+ store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+ store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+ store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+ store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+ store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+ store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/arith-mul.ll b/test/Transforms/SLPVectorizer/X86/arith-mul.ll
new file mode 100644
index 000000000000..95875d7f01fd
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/arith-mul.ll
@@ -0,0 +1,700 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8 = common global [64 x i8] zeroinitializer, align 64
+@b8 = common global [64 x i8] zeroinitializer, align 64
+@c8 = common global [64 x i8] zeroinitializer, align 64
+
+define void @mul_v8i64() {
+; SSE-LABEL: @mul_v8i64(
+; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; SSE-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; SSE-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; SSE-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; SSE-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; SSE-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; SSE-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; SSE-NEXT: [[R0:%.*]] = mul i64 [[A0]], [[B0]]
+; SSE-NEXT: [[R1:%.*]] = mul i64 [[A1]], [[B1]]
+; SSE-NEXT: [[R2:%.*]] = mul i64 [[A2]], [[B2]]
+; SSE-NEXT: [[R3:%.*]] = mul i64 [[A3]], [[B3]]
+; SSE-NEXT: [[R4:%.*]] = mul i64 [[A4]], [[B4]]
+; SSE-NEXT: [[R5:%.*]] = mul i64 [[A5]], [[B5]]
+; SSE-NEXT: [[R6:%.*]] = mul i64 [[A6]], [[B6]]
+; SSE-NEXT: [[R7:%.*]] = mul i64 [[A7]], [[B7]]
+; SSE-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; SSE-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; SSE-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; SSE-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; SSE-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; SSE-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; SSE-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; SSE-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; SSE-NEXT: ret void
+;
+; AVX1-LABEL: @mul_v8i64(
+; AVX1-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; AVX1-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; AVX1-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; AVX1-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; AVX1-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; AVX1-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; AVX1-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; AVX1-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; AVX1-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; AVX1-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; AVX1-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; AVX1-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; AVX1-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; AVX1-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; AVX1-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; AVX1-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; AVX1-NEXT: [[R0:%.*]] = mul i64 [[A0]], [[B0]]
+; AVX1-NEXT: [[R1:%.*]] = mul i64 [[A1]], [[B1]]
+; AVX1-NEXT: [[R2:%.*]] = mul i64 [[A2]], [[B2]]
+; AVX1-NEXT: [[R3:%.*]] = mul i64 [[A3]], [[B3]]
+; AVX1-NEXT: [[R4:%.*]] = mul i64 [[A4]], [[B4]]
+; AVX1-NEXT: [[R5:%.*]] = mul i64 [[A5]], [[B5]]
+; AVX1-NEXT: [[R6:%.*]] = mul i64 [[A6]], [[B6]]
+; AVX1-NEXT: [[R7:%.*]] = mul i64 [[A7]], [[B7]]
+; AVX1-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; AVX1-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; AVX1-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; AVX1-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; AVX1-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; AVX1-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; AVX1-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; AVX1-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; AVX1-NEXT: ret void
+;
+; AVX2-LABEL: @mul_v8i64(
+; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP5:%.*]] = mul <4 x i64> [[TMP1]], [[TMP3]]
+; AVX2-NEXT: [[TMP6:%.*]] = mul <4 x i64> [[TMP2]], [[TMP4]]
+; AVX2-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; AVX2-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: ret void
+;
+; AVX512-LABEL: @mul_v8i64(
+; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP3:%.*]] = mul <8 x i64> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
+; AVX512-NEXT: ret void
+;
+ %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+ %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+ %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+ %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+ %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+ %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+ %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+ %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+ %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+ %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+ %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+ %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+ %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+ %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+ %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+ %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+ %r0 = mul i64 %a0, %b0
+ %r1 = mul i64 %a1, %b1
+ %r2 = mul i64 %a2, %b2
+ %r3 = mul i64 %a3, %b3
+ %r4 = mul i64 %a4, %b4
+ %r5 = mul i64 %a5, %b5
+ %r6 = mul i64 %a6, %b6
+ %r7 = mul i64 %a7, %b7
+ store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+ store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+ store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+ store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+ store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+ store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+ store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+ store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+ ret void
+}
+
+define void @mul_v16i32() {
+; SSE-LABEL: @mul_v16i32(
+; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP9:%.*]] = mul <4 x i32> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = mul <4 x i32> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = mul <4 x i32> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = mul <4 x i32> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @mul_v16i32(
+; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP5:%.*]] = mul <8 x i32> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = mul <8 x i32> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; AVX-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @mul_v16i32(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP3:%.*]] = mul <16 x i32> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
+; AVX512-NEXT: ret void
+;
+ %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+ %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+ %a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+ %a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+ %a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+ %a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+ %a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+ %a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+ %a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+ %a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+ %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+ %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+ %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+ %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+ %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+ %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+ %b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+ %b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+ %b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+ %b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+ %b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+ %b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+ %b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+ %b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+ %b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+ %b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+ %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+ %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+ %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+ %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+ %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+ %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+ %r0 = mul i32 %a0 , %b0
+ %r1 = mul i32 %a1 , %b1
+ %r2 = mul i32 %a2 , %b2
+ %r3 = mul i32 %a3 , %b3
+ %r4 = mul i32 %a4 , %b4
+ %r5 = mul i32 %a5 , %b5
+ %r6 = mul i32 %a6 , %b6
+ %r7 = mul i32 %a7 , %b7
+ %r8 = mul i32 %a8 , %b8
+ %r9 = mul i32 %a9 , %b9
+ %r10 = mul i32 %a10, %b10
+ %r11 = mul i32 %a11, %b11
+ %r12 = mul i32 %a12, %b12
+ %r13 = mul i32 %a13, %b13
+ %r14 = mul i32 %a14, %b14
+ %r15 = mul i32 %a15, %b15
+ store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+ store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+ store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+ store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+ store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+ store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+ store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+ store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+ store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+ store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+ store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+ store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+ store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+ store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+ store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+ store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+ ret void
+}
+
+define void @mul_v32i16() {
+; SSE-LABEL: @mul_v32i16(
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP9:%.*]] = mul <8 x i16> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = mul <8 x i16> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = mul <8 x i16> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = mul <8 x i16> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @mul_v32i16(
+; AVX-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP5:%.*]] = mul <16 x i16> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = mul <16 x i16> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @mul_v32i16(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP5:%.*]] = mul <16 x i16> [[TMP1]], [[TMP3]]
+; AVX512-NEXT: [[TMP6:%.*]] = mul <16 x i16> [[TMP2]], [[TMP4]]
+; AVX512-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: ret void
+;
+ %a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+ %a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+ %a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+ %a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+ %a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+ %a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+ %a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+ %a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+ %a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+ %a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+ %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+ %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+ %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+ %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+ %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+ %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+ %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+ %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+ %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+ %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+ %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+ %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+ %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+ %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+ %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+ %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+ %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+ %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+ %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+ %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+ %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+ %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+ %b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+ %b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+ %b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+ %b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+ %b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+ %b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+ %b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+ %b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+ %b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+ %b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+ %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+ %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+ %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+ %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+ %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+ %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+ %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+ %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+ %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+ %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+ %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+ %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+ %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+ %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+ %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+ %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+ %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+ %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+ %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+ %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+ %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+ %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+ %r0 = mul i16 %a0 , %b0
+ %r1 = mul i16 %a1 , %b1
+ %r2 = mul i16 %a2 , %b2
+ %r3 = mul i16 %a3 , %b3
+ %r4 = mul i16 %a4 , %b4
+ %r5 = mul i16 %a5 , %b5
+ %r6 = mul i16 %a6 , %b6
+ %r7 = mul i16 %a7 , %b7
+ %r8 = mul i16 %a8 , %b8
+ %r9 = mul i16 %a9 , %b9
+ %r10 = mul i16 %a10, %b10
+ %r11 = mul i16 %a11, %b11
+ %r12 = mul i16 %a12, %b12
+ %r13 = mul i16 %a13, %b13
+ %r14 = mul i16 %a14, %b14
+ %r15 = mul i16 %a15, %b15
+ %r16 = mul i16 %a16, %b16
+ %r17 = mul i16 %a17, %b17
+ %r18 = mul i16 %a18, %b18
+ %r19 = mul i16 %a19, %b19
+ %r20 = mul i16 %a20, %b20
+ %r21 = mul i16 %a21, %b21
+ %r22 = mul i16 %a22, %b22
+ %r23 = mul i16 %a23, %b23
+ %r24 = mul i16 %a24, %b24
+ %r25 = mul i16 %a25, %b25
+ %r26 = mul i16 %a26, %b26
+ %r27 = mul i16 %a27, %b27
+ %r28 = mul i16 %a28, %b28
+ %r29 = mul i16 %a29, %b29
+ %r30 = mul i16 %a30, %b30
+ %r31 = mul i16 %a31, %b31
+ store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+ store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+ store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+ store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+ store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+ store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+ store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+ store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+ store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+ store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+ store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+ store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+ store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+ store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+ store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+ store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+ store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+ store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+ store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+ store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+ store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+ store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+ store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+ store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+ store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+ store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+ store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+ store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+ store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+ store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+ store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+ store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+ ret void
+}
+
+define void @mul_v64i8() {
+; CHECK-LABEL: @mul_v64i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP9:%.*]] = mul <16 x i8> [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul <16 x i8> [[TMP2]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i8> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = mul <16 x i8> [[TMP4]], [[TMP8]]
+; CHECK-NEXT: store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
+;
+ %a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+ %a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+ %a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+ %a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+ %a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+ %a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+ %a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+ %a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+ %a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+ %a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+ %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+ %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+ %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+ %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+ %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+ %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+ %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+ %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+ %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+ %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+ %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+ %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+ %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+ %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+ %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+ %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+ %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+ %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+ %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+ %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+ %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+ %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+ %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+ %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+ %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+ %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+ %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+ %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+ %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+ %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+ %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+ %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+ %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+ %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+ %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+ %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+ %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+ %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+ %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+ %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+ %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+ %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+ %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+ %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+ %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+ %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+ %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+ %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+ %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+ %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+ %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+ %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+ %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+ %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+ %b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+ %b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+ %b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+ %b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+ %b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+ %b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+ %b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+ %b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+ %b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+ %b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+ %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+ %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+ %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+ %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+ %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+ %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+ %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+ %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+ %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+ %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+ %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+ %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+ %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+ %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+ %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+ %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+ %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+ %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+ %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+ %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+ %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+ %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+ %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+ %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+ %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+ %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+ %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+ %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+ %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+ %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+ %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+ %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+ %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+ %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+ %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+ %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+ %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+ %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+ %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+ %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+ %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+ %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+ %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+ %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+ %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+ %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+ %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+ %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+ %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+ %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+ %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+ %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+ %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+ %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+ %r0 = mul i8 %a0 , %b0
+ %r1 = mul i8 %a1 , %b1
+ %r2 = mul i8 %a2 , %b2
+ %r3 = mul i8 %a3 , %b3
+ %r4 = mul i8 %a4 , %b4
+ %r5 = mul i8 %a5 , %b5
+ %r6 = mul i8 %a6 , %b6
+ %r7 = mul i8 %a7 , %b7
+ %r8 = mul i8 %a8 , %b8
+ %r9 = mul i8 %a9 , %b9
+ %r10 = mul i8 %a10, %b10
+ %r11 = mul i8 %a11, %b11
+ %r12 = mul i8 %a12, %b12
+ %r13 = mul i8 %a13, %b13
+ %r14 = mul i8 %a14, %b14
+ %r15 = mul i8 %a15, %b15
+ %r16 = mul i8 %a16, %b16
+ %r17 = mul i8 %a17, %b17
+ %r18 = mul i8 %a18, %b18
+ %r19 = mul i8 %a19, %b19
+ %r20 = mul i8 %a20, %b20
+ %r21 = mul i8 %a21, %b21
+ %r22 = mul i8 %a22, %b22
+ %r23 = mul i8 %a23, %b23
+ %r24 = mul i8 %a24, %b24
+ %r25 = mul i8 %a25, %b25
+ %r26 = mul i8 %a26, %b26
+ %r27 = mul i8 %a27, %b27
+ %r28 = mul i8 %a28, %b28
+ %r29 = mul i8 %a29, %b29
+ %r30 = mul i8 %a30, %b30
+ %r31 = mul i8 %a31, %b31
+ %r32 = mul i8 %a32, %b32
+ %r33 = mul i8 %a33, %b33
+ %r34 = mul i8 %a34, %b34
+ %r35 = mul i8 %a35, %b35
+ %r36 = mul i8 %a36, %b36
+ %r37 = mul i8 %a37, %b37
+ %r38 = mul i8 %a38, %b38
+ %r39 = mul i8 %a39, %b39
+ %r40 = mul i8 %a40, %b40
+ %r41 = mul i8 %a41, %b41
+ %r42 = mul i8 %a42, %b42
+ %r43 = mul i8 %a43, %b43
+ %r44 = mul i8 %a44, %b44
+ %r45 = mul i8 %a45, %b45
+ %r46 = mul i8 %a46, %b46
+ %r47 = mul i8 %a47, %b47
+ %r48 = mul i8 %a48, %b48
+ %r49 = mul i8 %a49, %b49
+ %r50 = mul i8 %a50, %b50
+ %r51 = mul i8 %a51, %b51
+ %r52 = mul i8 %a52, %b52
+ %r53 = mul i8 %a53, %b53
+ %r54 = mul i8 %a54, %b54
+ %r55 = mul i8 %a55, %b55
+ %r56 = mul i8 %a56, %b56
+ %r57 = mul i8 %a57, %b57
+ %r58 = mul i8 %a58, %b58
+ %r59 = mul i8 %a59, %b59
+ %r60 = mul i8 %a60, %b60
+ %r61 = mul i8 %a61, %b61
+ %r62 = mul i8 %a62, %b62
+ %r63 = mul i8 %a63, %b63
+ store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+ store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+ store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+ store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+ store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+ store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+ store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+ store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+ store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+ store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+ store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+ store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+ store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+ store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+ store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+ store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+ store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+ store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+ store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+ store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+ store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+ store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+ store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+ store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+ store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+ store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+ store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+ store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+ store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+ store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+ store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+ store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+ store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+ store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+ store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+ store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+ store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+ store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+ store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+ store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+ store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+ store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+ store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+ store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+ store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+ store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+ store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+ store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+ store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+ store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+ store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+ store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+ store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+ store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+ store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+ store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+ store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+ store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+ store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+ store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+ store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+ store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+ store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+ store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/arith-sub.ll b/test/Transforms/SLPVectorizer/X86/arith-sub.ll
new file mode 100644
index 000000000000..85838369e226
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/arith-sub.ll
@@ -0,0 +1,649 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8 = common global [64 x i8] zeroinitializer, align 64
+@b8 = common global [64 x i8] zeroinitializer, align 64
+@c8 = common global [64 x i8] zeroinitializer, align 64
+
+define void @sub_v8i64() {
+; SSE-LABEL: @sub_v8i64(
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @a64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @b64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP9:%.*]] = sub <2 x i64> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = sub <2 x i64> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = sub <2 x i64> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = sub <2 x i64> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @sub_v8i64(
+; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX-NEXT: [[TMP5:%.*]] = sub <4 x i64> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = sub <4 x i64> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; AVX-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @sub_v8i64(
+; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP3:%.*]] = sub <8 x i64> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
+; AVX512-NEXT: ret void
+;
+ %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+ %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+ %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+ %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+ %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+ %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+ %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+ %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+ %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+ %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+ %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+ %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+ %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+ %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+ %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+ %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+ %r0 = sub i64 %a0, %b0
+ %r1 = sub i64 %a1, %b1
+ %r2 = sub i64 %a2, %b2
+ %r3 = sub i64 %a3, %b3
+ %r4 = sub i64 %a4, %b4
+ %r5 = sub i64 %a5, %b5
+ %r6 = sub i64 %a6, %b6
+ %r7 = sub i64 %a7, %b7
+ store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+ store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+ store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+ store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+ store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+ store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+ store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+ store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+ ret void
+}
+
+define void @sub_v16i32() {
+; SSE-LABEL: @sub_v16i32(
+; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP9:%.*]] = sub <4 x i32> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = sub <4 x i32> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = sub <4 x i32> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = sub <4 x i32> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @sub_v16i32(
+; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP5:%.*]] = sub <8 x i32> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = sub <8 x i32> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; AVX-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @sub_v16i32(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP3:%.*]] = sub <16 x i32> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
+; AVX512-NEXT: ret void
+;
+ %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+ %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+ %a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+ %a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+ %a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+ %a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+ %a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+ %a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+ %a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+ %a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+ %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+ %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+ %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+ %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+ %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+ %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+ %b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+ %b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+ %b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+ %b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+ %b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+ %b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+ %b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+ %b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+ %b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+ %b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+ %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+ %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+ %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+ %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+ %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+ %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+ %r0 = sub i32 %a0 , %b0
+ %r1 = sub i32 %a1 , %b1
+ %r2 = sub i32 %a2 , %b2
+ %r3 = sub i32 %a3 , %b3
+ %r4 = sub i32 %a4 , %b4
+ %r5 = sub i32 %a5 , %b5
+ %r6 = sub i32 %a6 , %b6
+ %r7 = sub i32 %a7 , %b7
+ %r8 = sub i32 %a8 , %b8
+ %r9 = sub i32 %a9 , %b9
+ %r10 = sub i32 %a10, %b10
+ %r11 = sub i32 %a11, %b11
+ %r12 = sub i32 %a12, %b12
+ %r13 = sub i32 %a13, %b13
+ %r14 = sub i32 %a14, %b14
+ %r15 = sub i32 %a15, %b15
+ store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+ store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+ store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+ store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+ store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+ store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+ store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+ store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+ store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+ store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+ store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+ store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+ store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+ store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+ store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+ store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+ ret void
+}
+
+define void @sub_v32i16() {
+; SSE-LABEL: @sub_v32i16(
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @a16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([32 x i16]* @b16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP7:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP8:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP9:%.*]] = sub <8 x i16> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = sub <8 x i16> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = sub <8 x i16> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = sub <8 x i16> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([32 x i16]* @c16 to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP10]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP11]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP12]], <8 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24) to <8 x i16>*), align 2
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @sub_v32i16(
+; AVX-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP5:%.*]] = sub <16 x i16> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @sub_v32i16(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP5:%.*]] = sub <16 x i16> [[TMP1]], [[TMP3]]
+; AVX512-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[TMP2]], [[TMP4]]
+; AVX512-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: ret void
+;
+ %a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+ %a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+ %a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+ %a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+ %a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+ %a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+ %a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+ %a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+ %a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+ %a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+ %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+ %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+ %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+ %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+ %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+ %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+ %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+ %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+ %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+ %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+ %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+ %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+ %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+ %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+ %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+ %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+ %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+ %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+ %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+ %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+ %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+ %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+ %b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+ %b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+ %b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+ %b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+ %b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+ %b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+ %b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+ %b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+ %b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+ %b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+ %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+ %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+ %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+ %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+ %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+ %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+ %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+ %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+ %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+ %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+ %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+ %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+ %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+ %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+ %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+ %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+ %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+ %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+ %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+ %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+ %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+ %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+ %r0 = sub i16 %a0 , %b0
+ %r1 = sub i16 %a1 , %b1
+ %r2 = sub i16 %a2 , %b2
+ %r3 = sub i16 %a3 , %b3
+ %r4 = sub i16 %a4 , %b4
+ %r5 = sub i16 %a5 , %b5
+ %r6 = sub i16 %a6 , %b6
+ %r7 = sub i16 %a7 , %b7
+ %r8 = sub i16 %a8 , %b8
+ %r9 = sub i16 %a9 , %b9
+ %r10 = sub i16 %a10, %b10
+ %r11 = sub i16 %a11, %b11
+ %r12 = sub i16 %a12, %b12
+ %r13 = sub i16 %a13, %b13
+ %r14 = sub i16 %a14, %b14
+ %r15 = sub i16 %a15, %b15
+ %r16 = sub i16 %a16, %b16
+ %r17 = sub i16 %a17, %b17
+ %r18 = sub i16 %a18, %b18
+ %r19 = sub i16 %a19, %b19
+ %r20 = sub i16 %a20, %b20
+ %r21 = sub i16 %a21, %b21
+ %r22 = sub i16 %a22, %b22
+ %r23 = sub i16 %a23, %b23
+ %r24 = sub i16 %a24, %b24
+ %r25 = sub i16 %a25, %b25
+ %r26 = sub i16 %a26, %b26
+ %r27 = sub i16 %a27, %b27
+ %r28 = sub i16 %a28, %b28
+ %r29 = sub i16 %a29, %b29
+ %r30 = sub i16 %a30, %b30
+ %r31 = sub i16 %a31, %b31
+ store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+ store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+ store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+ store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+ store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+ store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+ store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+ store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+ store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+ store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+ store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+ store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+ store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+ store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+ store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+ store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+ store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+ store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+ store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+ store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+ store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+ store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+ store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+ store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+ store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+ store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+ store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+ store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+ store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+ store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+ store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+ store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+ ret void
+}
+
+define void @sub_v64i8() {
+; CHECK-LABEL: @sub_v64i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP9:%.*]] = sub <16 x i8> [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = sub <16 x i8> [[TMP2]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = sub <16 x i8> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub <16 x i8> [[TMP4]], [[TMP8]]
+; CHECK-NEXT: store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
+;
+ %a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+ %a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+ %a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+ %a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+ %a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+ %a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+ %a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+ %a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+ %a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+ %a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+ %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+ %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+ %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+ %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+ %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+ %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+ %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+ %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+ %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+ %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+ %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+ %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+ %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+ %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+ %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+ %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+ %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+ %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+ %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+ %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+ %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+ %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+ %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+ %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+ %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+ %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+ %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+ %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+ %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+ %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+ %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+ %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+ %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+ %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+ %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+ %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+ %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+ %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+ %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+ %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+ %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+ %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+ %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+ %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+ %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+ %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+ %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+ %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+ %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+ %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+ %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+ %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+ %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+ %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+ %b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+ %b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+ %b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+ %b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+ %b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+ %b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+ %b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+ %b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+ %b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+ %b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+ %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+ %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+ %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+ %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+ %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+ %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+ %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+ %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+ %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+ %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+ %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+ %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+ %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+ %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+ %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+ %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+ %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+ %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+ %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+ %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+ %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+ %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+ %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+ %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+ %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+ %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+ %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+ %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+ %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+ %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+ %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+ %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+ %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+ %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+ %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+ %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+ %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+ %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+ %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+ %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+ %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+ %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+ %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+ %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+ %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+ %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+ %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+ %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+ %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+ %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+ %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+ %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+ %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+ %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+ %r0 = sub i8 %a0 , %b0
+ %r1 = sub i8 %a1 , %b1
+ %r2 = sub i8 %a2 , %b2
+ %r3 = sub i8 %a3 , %b3
+ %r4 = sub i8 %a4 , %b4
+ %r5 = sub i8 %a5 , %b5
+ %r6 = sub i8 %a6 , %b6
+ %r7 = sub i8 %a7 , %b7
+ %r8 = sub i8 %a8 , %b8
+ %r9 = sub i8 %a9 , %b9
+ %r10 = sub i8 %a10, %b10
+ %r11 = sub i8 %a11, %b11
+ %r12 = sub i8 %a12, %b12
+ %r13 = sub i8 %a13, %b13
+ %r14 = sub i8 %a14, %b14
+ %r15 = sub i8 %a15, %b15
+ %r16 = sub i8 %a16, %b16
+ %r17 = sub i8 %a17, %b17
+ %r18 = sub i8 %a18, %b18
+ %r19 = sub i8 %a19, %b19
+ %r20 = sub i8 %a20, %b20
+ %r21 = sub i8 %a21, %b21
+ %r22 = sub i8 %a22, %b22
+ %r23 = sub i8 %a23, %b23
+ %r24 = sub i8 %a24, %b24
+ %r25 = sub i8 %a25, %b25
+ %r26 = sub i8 %a26, %b26
+ %r27 = sub i8 %a27, %b27
+ %r28 = sub i8 %a28, %b28
+ %r29 = sub i8 %a29, %b29
+ %r30 = sub i8 %a30, %b30
+ %r31 = sub i8 %a31, %b31
+ %r32 = sub i8 %a32, %b32
+ %r33 = sub i8 %a33, %b33
+ %r34 = sub i8 %a34, %b34
+ %r35 = sub i8 %a35, %b35
+ %r36 = sub i8 %a36, %b36
+ %r37 = sub i8 %a37, %b37
+ %r38 = sub i8 %a38, %b38
+ %r39 = sub i8 %a39, %b39
+ %r40 = sub i8 %a40, %b40
+ %r41 = sub i8 %a41, %b41
+ %r42 = sub i8 %a42, %b42
+ %r43 = sub i8 %a43, %b43
+ %r44 = sub i8 %a44, %b44
+ %r45 = sub i8 %a45, %b45
+ %r46 = sub i8 %a46, %b46
+ %r47 = sub i8 %a47, %b47
+ %r48 = sub i8 %a48, %b48
+ %r49 = sub i8 %a49, %b49
+ %r50 = sub i8 %a50, %b50
+ %r51 = sub i8 %a51, %b51
+ %r52 = sub i8 %a52, %b52
+ %r53 = sub i8 %a53, %b53
+ %r54 = sub i8 %a54, %b54
+ %r55 = sub i8 %a55, %b55
+ %r56 = sub i8 %a56, %b56
+ %r57 = sub i8 %a57, %b57
+ %r58 = sub i8 %a58, %b58
+ %r59 = sub i8 %a59, %b59
+ %r60 = sub i8 %a60, %b60
+ %r61 = sub i8 %a61, %b61
+ %r62 = sub i8 %a62, %b62
+ %r63 = sub i8 %a63, %b63
+ store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+ store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+ store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+ store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+ store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+ store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+ store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+ store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+ store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+ store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+ store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+ store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+ store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+ store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+ store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+ store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+ store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+ store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+ store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+ store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+ store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+ store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+ store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+ store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+ store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+ store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+ store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+ store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+ store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+ store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+ store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+ store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+ store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+ store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+ store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+ store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+ store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+ store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+ store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+ store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+ store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+ store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+ store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+ store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+ store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+ store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+ store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+ store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+ store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+ store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+ store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+ store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+ store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+ store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+ store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+ store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+ store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+ store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+ store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+ store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+ store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+ store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+ store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+ store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/shift-ashr.ll b/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
new file mode 100644
index 000000000000..646f599ce340
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/shift-ashr.ll
@@ -0,0 +1,913 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=bdver4 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=XOP
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8 = common global [64 x i8] zeroinitializer, align 64
+@b8 = common global [64 x i8] zeroinitializer, align 64
+@c8 = common global [64 x i8] zeroinitializer, align 64
+
+define void @ashr_v8i64() {
+; SSE-LABEL: @ashr_v8i64(
+; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; SSE-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; SSE-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; SSE-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; SSE-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; SSE-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; SSE-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; SSE-NEXT: [[R0:%.*]] = ashr i64 [[A0]], [[B0]]
+; SSE-NEXT: [[R1:%.*]] = ashr i64 [[A1]], [[B1]]
+; SSE-NEXT: [[R2:%.*]] = ashr i64 [[A2]], [[B2]]
+; SSE-NEXT: [[R3:%.*]] = ashr i64 [[A3]], [[B3]]
+; SSE-NEXT: [[R4:%.*]] = ashr i64 [[A4]], [[B4]]
+; SSE-NEXT: [[R5:%.*]] = ashr i64 [[A5]], [[B5]]
+; SSE-NEXT: [[R6:%.*]] = ashr i64 [[A6]], [[B6]]
+; SSE-NEXT: [[R7:%.*]] = ashr i64 [[A7]], [[B7]]
+; SSE-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; SSE-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; SSE-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; SSE-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; SSE-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; SSE-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; SSE-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; SSE-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; SSE-NEXT: ret void
+;
+; AVX1-LABEL: @ashr_v8i64(
+; AVX1-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+; AVX1-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+; AVX1-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+; AVX1-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+; AVX1-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+; AVX1-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+; AVX1-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+; AVX1-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+; AVX1-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+; AVX1-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+; AVX1-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+; AVX1-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+; AVX1-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+; AVX1-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+; AVX1-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+; AVX1-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+; AVX1-NEXT: [[R0:%.*]] = ashr i64 [[A0]], [[B0]]
+; AVX1-NEXT: [[R1:%.*]] = ashr i64 [[A1]], [[B1]]
+; AVX1-NEXT: [[R2:%.*]] = ashr i64 [[A2]], [[B2]]
+; AVX1-NEXT: [[R3:%.*]] = ashr i64 [[A3]], [[B3]]
+; AVX1-NEXT: [[R4:%.*]] = ashr i64 [[A4]], [[B4]]
+; AVX1-NEXT: [[R5:%.*]] = ashr i64 [[A5]], [[B5]]
+; AVX1-NEXT: [[R6:%.*]] = ashr i64 [[A6]], [[B6]]
+; AVX1-NEXT: [[R7:%.*]] = ashr i64 [[A7]], [[B7]]
+; AVX1-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+; AVX1-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+; AVX1-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+; AVX1-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+; AVX1-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+; AVX1-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+; AVX1-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+; AVX1-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+; AVX1-NEXT: ret void
+;
+; AVX2-LABEL: @ashr_v8i64(
+; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP5:%.*]] = ashr <4 x i64> [[TMP1]], [[TMP3]]
+; AVX2-NEXT: [[TMP6:%.*]] = ashr <4 x i64> [[TMP2]], [[TMP4]]
+; AVX2-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; AVX2-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: ret void
+;
+; AVX512-LABEL: @ashr_v8i64(
+; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP3:%.*]] = ashr <8 x i64> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @ashr_v8i64(
+; XOP-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP5:%.*]] = ashr <4 x i64> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = ashr <4 x i64> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; XOP-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: ret void
+;
+ %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+ %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+ %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+ %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+ %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+ %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+ %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+ %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+ %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+ %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+ %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+ %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+ %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+ %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+ %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+ %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+ %r0 = ashr i64 %a0, %b0
+ %r1 = ashr i64 %a1, %b1
+ %r2 = ashr i64 %a2, %b2
+ %r3 = ashr i64 %a3, %b3
+ %r4 = ashr i64 %a4, %b4
+ %r5 = ashr i64 %a5, %b5
+ %r6 = ashr i64 %a6, %b6
+ %r7 = ashr i64 %a7, %b7
+ store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+ store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+ store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+ store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+ store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+ store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+ store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+ store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+ ret void
+}
+
+define void @ashr_v16i32() {
+; SSE-LABEL: @ashr_v16i32(
+; SSE-NEXT: [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4
+; SSE-NEXT: [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4
+; SSE-NEXT: [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4
+; SSE-NEXT: [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4
+; SSE-NEXT: [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4
+; SSE-NEXT: [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4
+; SSE-NEXT: [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4
+; SSE-NEXT: [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4
+; SSE-NEXT: [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4
+; SSE-NEXT: [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4
+; SSE-NEXT: [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+; SSE-NEXT: [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+; SSE-NEXT: [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+; SSE-NEXT: [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+; SSE-NEXT: [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+; SSE-NEXT: [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+; SSE-NEXT: [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4
+; SSE-NEXT: [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4
+; SSE-NEXT: [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4
+; SSE-NEXT: [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4
+; SSE-NEXT: [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4
+; SSE-NEXT: [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4
+; SSE-NEXT: [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4
+; SSE-NEXT: [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4
+; SSE-NEXT: [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4
+; SSE-NEXT: [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4
+; SSE-NEXT: [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+; SSE-NEXT: [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+; SSE-NEXT: [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+; SSE-NEXT: [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+; SSE-NEXT: [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+; SSE-NEXT: [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+; SSE-NEXT: [[R0:%.*]] = ashr i32 [[A0]], [[B0]]
+; SSE-NEXT: [[R1:%.*]] = ashr i32 [[A1]], [[B1]]
+; SSE-NEXT: [[R2:%.*]] = ashr i32 [[A2]], [[B2]]
+; SSE-NEXT: [[R3:%.*]] = ashr i32 [[A3]], [[B3]]
+; SSE-NEXT: [[R4:%.*]] = ashr i32 [[A4]], [[B4]]
+; SSE-NEXT: [[R5:%.*]] = ashr i32 [[A5]], [[B5]]
+; SSE-NEXT: [[R6:%.*]] = ashr i32 [[A6]], [[B6]]
+; SSE-NEXT: [[R7:%.*]] = ashr i32 [[A7]], [[B7]]
+; SSE-NEXT: [[R8:%.*]] = ashr i32 [[A8]], [[B8]]
+; SSE-NEXT: [[R9:%.*]] = ashr i32 [[A9]], [[B9]]
+; SSE-NEXT: [[R10:%.*]] = ashr i32 [[A10]], [[B10]]
+; SSE-NEXT: [[R11:%.*]] = ashr i32 [[A11]], [[B11]]
+; SSE-NEXT: [[R12:%.*]] = ashr i32 [[A12]], [[B12]]
+; SSE-NEXT: [[R13:%.*]] = ashr i32 [[A13]], [[B13]]
+; SSE-NEXT: [[R14:%.*]] = ashr i32 [[A14]], [[B14]]
+; SSE-NEXT: [[R15:%.*]] = ashr i32 [[A15]], [[B15]]
+; SSE-NEXT: store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4
+; SSE-NEXT: store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4
+; SSE-NEXT: store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4
+; SSE-NEXT: store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4
+; SSE-NEXT: store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4
+; SSE-NEXT: store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4
+; SSE-NEXT: store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4
+; SSE-NEXT: store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4
+; SSE-NEXT: store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4
+; SSE-NEXT: store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4
+; SSE-NEXT: store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+; SSE-NEXT: store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+; SSE-NEXT: store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+; SSE-NEXT: store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+; SSE-NEXT: store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+; SSE-NEXT: store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+; SSE-NEXT: ret void
+;
+; AVX1-LABEL: @ashr_v16i32(
+; AVX1-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
+; AVX1-NEXT: [[TMP9:%.*]] = ashr <4 x i32> [[TMP1]], [[TMP5]]
+; AVX1-NEXT: [[TMP10:%.*]] = ashr <4 x i32> [[TMP2]], [[TMP6]]
+; AVX1-NEXT: [[TMP11:%.*]] = ashr <4 x i32> [[TMP3]], [[TMP7]]
+; AVX1-NEXT: [[TMP12:%.*]] = ashr <4 x i32> [[TMP4]], [[TMP8]]
+; AVX1-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
+; AVX1-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
+; AVX1-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
+; AVX1-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
+; AVX1-NEXT: ret void
+;
+; AVX2-LABEL: @ashr_v16i32(
+; AVX2-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; AVX2-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX2-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; AVX2-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX2-NEXT: [[TMP5:%.*]] = ashr <8 x i32> [[TMP1]], [[TMP3]]
+; AVX2-NEXT: [[TMP6:%.*]] = ashr <8 x i32> [[TMP2]], [[TMP4]]
+; AVX2-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; AVX2-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX2-NEXT: ret void
+;
+; AVX512-LABEL: @ashr_v16i32(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP3:%.*]] = ashr <16 x i32> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @ashr_v16i32(
+; XOP-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP5:%.*]] = ashr <8 x i32> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = ashr <8 x i32> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; XOP-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: ret void
+;
+ %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+ %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+ %a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+ %a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+ %a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+ %a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+ %a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+ %a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+ %a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+ %a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+ %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+ %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+ %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+ %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+ %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+ %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+ %b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+ %b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+ %b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+ %b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+ %b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+ %b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+ %b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+ %b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+ %b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+ %b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+ %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+ %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+ %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+ %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+ %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+ %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+ %r0 = ashr i32 %a0 , %b0
+ %r1 = ashr i32 %a1 , %b1
+ %r2 = ashr i32 %a2 , %b2
+ %r3 = ashr i32 %a3 , %b3
+ %r4 = ashr i32 %a4 , %b4
+ %r5 = ashr i32 %a5 , %b5
+ %r6 = ashr i32 %a6 , %b6
+ %r7 = ashr i32 %a7 , %b7
+ %r8 = ashr i32 %a8 , %b8
+ %r9 = ashr i32 %a9 , %b9
+ %r10 = ashr i32 %a10, %b10
+ %r11 = ashr i32 %a11, %b11
+ %r12 = ashr i32 %a12, %b12
+ %r13 = ashr i32 %a13, %b13
+ %r14 = ashr i32 %a14, %b14
+ %r15 = ashr i32 %a15, %b15
+ store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+ store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+ store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+ store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+ store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+ store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+ store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+ store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+ store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+ store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+ store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+ store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+ store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+ store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+ store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+ store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+ ret void
+}
+
+define void @ashr_v32i16() {
+; SSE-LABEL: @ashr_v32i16(
+; SSE-NEXT: [[A0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0), align 2
+; SSE-NEXT: [[A1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1), align 2
+; SSE-NEXT: [[A2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2), align 2
+; SSE-NEXT: [[A3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3), align 2
+; SSE-NEXT: [[A4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4), align 2
+; SSE-NEXT: [[A5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5), align 2
+; SSE-NEXT: [[A6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6), align 2
+; SSE-NEXT: [[A7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7), align 2
+; SSE-NEXT: [[A8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8), align 2
+; SSE-NEXT: [[A9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9), align 2
+; SSE-NEXT: [[A10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+; SSE-NEXT: [[A11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+; SSE-NEXT: [[A12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+; SSE-NEXT: [[A13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+; SSE-NEXT: [[A14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+; SSE-NEXT: [[A15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+; SSE-NEXT: [[A16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+; SSE-NEXT: [[A17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+; SSE-NEXT: [[A18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+; SSE-NEXT: [[A19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+; SSE-NEXT: [[A20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+; SSE-NEXT: [[A21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+; SSE-NEXT: [[A22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+; SSE-NEXT: [[A23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+; SSE-NEXT: [[A24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+; SSE-NEXT: [[A25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+; SSE-NEXT: [[A26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+; SSE-NEXT: [[A27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+; SSE-NEXT: [[A28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+; SSE-NEXT: [[A29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+; SSE-NEXT: [[A30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+; SSE-NEXT: [[A31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+; SSE-NEXT: [[B0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0), align 2
+; SSE-NEXT: [[B1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1), align 2
+; SSE-NEXT: [[B2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2), align 2
+; SSE-NEXT: [[B3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3), align 2
+; SSE-NEXT: [[B4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4), align 2
+; SSE-NEXT: [[B5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5), align 2
+; SSE-NEXT: [[B6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6), align 2
+; SSE-NEXT: [[B7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7), align 2
+; SSE-NEXT: [[B8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8), align 2
+; SSE-NEXT: [[B9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9), align 2
+; SSE-NEXT: [[B10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+; SSE-NEXT: [[B11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+; SSE-NEXT: [[B12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+; SSE-NEXT: [[B13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+; SSE-NEXT: [[B14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+; SSE-NEXT: [[B15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+; SSE-NEXT: [[B16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+; SSE-NEXT: [[B17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+; SSE-NEXT: [[B18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+; SSE-NEXT: [[B19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+; SSE-NEXT: [[B20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+; SSE-NEXT: [[B21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+; SSE-NEXT: [[B22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+; SSE-NEXT: [[B23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+; SSE-NEXT: [[B24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+; SSE-NEXT: [[B25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+; SSE-NEXT: [[B26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+; SSE-NEXT: [[B27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+; SSE-NEXT: [[B28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+; SSE-NEXT: [[B29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+; SSE-NEXT: [[B30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+; SSE-NEXT: [[B31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+; SSE-NEXT: [[R0:%.*]] = ashr i16 [[A0]], [[B0]]
+; SSE-NEXT: [[R1:%.*]] = ashr i16 [[A1]], [[B1]]
+; SSE-NEXT: [[R2:%.*]] = ashr i16 [[A2]], [[B2]]
+; SSE-NEXT: [[R3:%.*]] = ashr i16 [[A3]], [[B3]]
+; SSE-NEXT: [[R4:%.*]] = ashr i16 [[A4]], [[B4]]
+; SSE-NEXT: [[R5:%.*]] = ashr i16 [[A5]], [[B5]]
+; SSE-NEXT: [[R6:%.*]] = ashr i16 [[A6]], [[B6]]
+; SSE-NEXT: [[R7:%.*]] = ashr i16 [[A7]], [[B7]]
+; SSE-NEXT: [[R8:%.*]] = ashr i16 [[A8]], [[B8]]
+; SSE-NEXT: [[R9:%.*]] = ashr i16 [[A9]], [[B9]]
+; SSE-NEXT: [[R10:%.*]] = ashr i16 [[A10]], [[B10]]
+; SSE-NEXT: [[R11:%.*]] = ashr i16 [[A11]], [[B11]]
+; SSE-NEXT: [[R12:%.*]] = ashr i16 [[A12]], [[B12]]
+; SSE-NEXT: [[R13:%.*]] = ashr i16 [[A13]], [[B13]]
+; SSE-NEXT: [[R14:%.*]] = ashr i16 [[A14]], [[B14]]
+; SSE-NEXT: [[R15:%.*]] = ashr i16 [[A15]], [[B15]]
+; SSE-NEXT: [[R16:%.*]] = ashr i16 [[A16]], [[B16]]
+; SSE-NEXT: [[R17:%.*]] = ashr i16 [[A17]], [[B17]]
+; SSE-NEXT: [[R18:%.*]] = ashr i16 [[A18]], [[B18]]
+; SSE-NEXT: [[R19:%.*]] = ashr i16 [[A19]], [[B19]]
+; SSE-NEXT: [[R20:%.*]] = ashr i16 [[A20]], [[B20]]
+; SSE-NEXT: [[R21:%.*]] = ashr i16 [[A21]], [[B21]]
+; SSE-NEXT: [[R22:%.*]] = ashr i16 [[A22]], [[B22]]
+; SSE-NEXT: [[R23:%.*]] = ashr i16 [[A23]], [[B23]]
+; SSE-NEXT: [[R24:%.*]] = ashr i16 [[A24]], [[B24]]
+; SSE-NEXT: [[R25:%.*]] = ashr i16 [[A25]], [[B25]]
+; SSE-NEXT: [[R26:%.*]] = ashr i16 [[A26]], [[B26]]
+; SSE-NEXT: [[R27:%.*]] = ashr i16 [[A27]], [[B27]]
+; SSE-NEXT: [[R28:%.*]] = ashr i16 [[A28]], [[B28]]
+; SSE-NEXT: [[R29:%.*]] = ashr i16 [[A29]], [[B29]]
+; SSE-NEXT: [[R30:%.*]] = ashr i16 [[A30]], [[B30]]
+; SSE-NEXT: [[R31:%.*]] = ashr i16 [[A31]], [[B31]]
+; SSE-NEXT: store i16 [[R0]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0), align 2
+; SSE-NEXT: store i16 [[R1]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1), align 2
+; SSE-NEXT: store i16 [[R2]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2), align 2
+; SSE-NEXT: store i16 [[R3]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3), align 2
+; SSE-NEXT: store i16 [[R4]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4), align 2
+; SSE-NEXT: store i16 [[R5]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5), align 2
+; SSE-NEXT: store i16 [[R6]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6), align 2
+; SSE-NEXT: store i16 [[R7]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7), align 2
+; SSE-NEXT: store i16 [[R8]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8), align 2
+; SSE-NEXT: store i16 [[R9]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9), align 2
+; SSE-NEXT: store i16 [[R10]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+; SSE-NEXT: store i16 [[R11]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+; SSE-NEXT: store i16 [[R12]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+; SSE-NEXT: store i16 [[R13]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+; SSE-NEXT: store i16 [[R14]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+; SSE-NEXT: store i16 [[R15]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+; SSE-NEXT: store i16 [[R16]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+; SSE-NEXT: store i16 [[R17]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+; SSE-NEXT: store i16 [[R18]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+; SSE-NEXT: store i16 [[R19]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+; SSE-NEXT: store i16 [[R20]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+; SSE-NEXT: store i16 [[R21]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+; SSE-NEXT: store i16 [[R22]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+; SSE-NEXT: store i16 [[R23]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+; SSE-NEXT: store i16 [[R24]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+; SSE-NEXT: store i16 [[R25]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+; SSE-NEXT: store i16 [[R26]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+; SSE-NEXT: store i16 [[R27]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+; SSE-NEXT: store i16 [[R28]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+; SSE-NEXT: store i16 [[R29]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+; SSE-NEXT: store i16 [[R30]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+; SSE-NEXT: store i16 [[R31]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @ashr_v32i16(
+; AVX-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP5:%.*]] = ashr <16 x i16> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = ashr <16 x i16> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @ashr_v32i16(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP5:%.*]] = ashr <16 x i16> [[TMP1]], [[TMP3]]
+; AVX512-NEXT: [[TMP6:%.*]] = ashr <16 x i16> [[TMP2]], [[TMP4]]
+; AVX512-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @ashr_v32i16(
+; XOP-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP5:%.*]] = ashr <16 x i16> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = ashr <16 x i16> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; XOP-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: ret void
+;
+ %a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+ %a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+ %a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+ %a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+ %a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+ %a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+ %a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+ %a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+ %a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+ %a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+ %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+ %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+ %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+ %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+ %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+ %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+ %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+ %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+ %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+ %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+ %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+ %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+ %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+ %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+ %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+ %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+ %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+ %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+ %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+ %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+ %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+ %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+ %b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+ %b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+ %b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+ %b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+ %b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+ %b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+ %b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+ %b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+ %b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+ %b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+ %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+ %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+ %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+ %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+ %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+ %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+ %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+ %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+ %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+ %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+ %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+ %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+ %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+ %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+ %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+ %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+ %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+ %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+ %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+ %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+ %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+ %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+ %r0 = ashr i16 %a0 , %b0
+ %r1 = ashr i16 %a1 , %b1
+ %r2 = ashr i16 %a2 , %b2
+ %r3 = ashr i16 %a3 , %b3
+ %r4 = ashr i16 %a4 , %b4
+ %r5 = ashr i16 %a5 , %b5
+ %r6 = ashr i16 %a6 , %b6
+ %r7 = ashr i16 %a7 , %b7
+ %r8 = ashr i16 %a8 , %b8
+ %r9 = ashr i16 %a9 , %b9
+ %r10 = ashr i16 %a10, %b10
+ %r11 = ashr i16 %a11, %b11
+ %r12 = ashr i16 %a12, %b12
+ %r13 = ashr i16 %a13, %b13
+ %r14 = ashr i16 %a14, %b14
+ %r15 = ashr i16 %a15, %b15
+ %r16 = ashr i16 %a16, %b16
+ %r17 = ashr i16 %a17, %b17
+ %r18 = ashr i16 %a18, %b18
+ %r19 = ashr i16 %a19, %b19
+ %r20 = ashr i16 %a20, %b20
+ %r21 = ashr i16 %a21, %b21
+ %r22 = ashr i16 %a22, %b22
+ %r23 = ashr i16 %a23, %b23
+ %r24 = ashr i16 %a24, %b24
+ %r25 = ashr i16 %a25, %b25
+ %r26 = ashr i16 %a26, %b26
+ %r27 = ashr i16 %a27, %b27
+ %r28 = ashr i16 %a28, %b28
+ %r29 = ashr i16 %a29, %b29
+ %r30 = ashr i16 %a30, %b30
+ %r31 = ashr i16 %a31, %b31
+ store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+ store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+ store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+ store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+ store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+ store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+ store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+ store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+ store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+ store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+ store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+ store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+ store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+ store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+ store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+ store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+ store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+ store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+ store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+ store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+ store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+ store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+ store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+ store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+ store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+ store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+ store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+ store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+ store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+ store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+ store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+ store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+ ret void
+}
+
+define void @ashr_v64i8() {
+; CHECK-LABEL: @ashr_v64i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP9:%.*]] = ashr <16 x i8> [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = ashr <16 x i8> [[TMP2]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = ashr <16 x i8> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = ashr <16 x i8> [[TMP4]], [[TMP8]]
+; CHECK-NEXT: store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
+;
+ %a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+ %a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+ %a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+ %a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+ %a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+ %a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+ %a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+ %a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+ %a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+ %a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+ %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+ %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+ %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+ %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+ %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+ %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+ %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+ %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+ %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+ %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+ %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+ %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+ %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+ %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+ %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+ %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+ %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+ %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+ %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+ %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+ %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+ %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+ %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+ %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+ %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+ %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+ %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+ %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+ %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+ %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+ %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+ %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+ %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+ %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+ %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+ %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+ %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+ %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+ %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+ %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+ %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+ %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+ %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+ %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+ %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+ %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+ %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+ %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+ %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+ %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+ %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+ %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+ %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+ %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+ %b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+ %b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+ %b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+ %b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+ %b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+ %b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+ %b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+ %b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+ %b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+ %b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+ %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+ %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+ %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+ %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+ %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+ %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+ %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+ %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+ %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+ %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+ %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+ %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+ %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+ %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+ %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+ %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+ %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+ %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+ %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+ %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+ %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+ %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+ %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+ %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+ %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+ %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+ %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+ %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+ %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+ %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+ %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+ %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+ %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+ %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+ %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+ %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+ %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+ %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+ %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+ %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+ %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+ %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+ %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+ %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+ %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+ %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+ %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+ %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+ %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+ %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+ %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+ %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+ %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+ %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+ %r0 = ashr i8 %a0 , %b0
+ %r1 = ashr i8 %a1 , %b1
+ %r2 = ashr i8 %a2 , %b2
+ %r3 = ashr i8 %a3 , %b3
+ %r4 = ashr i8 %a4 , %b4
+ %r5 = ashr i8 %a5 , %b5
+ %r6 = ashr i8 %a6 , %b6
+ %r7 = ashr i8 %a7 , %b7
+ %r8 = ashr i8 %a8 , %b8
+ %r9 = ashr i8 %a9 , %b9
+ %r10 = ashr i8 %a10, %b10
+ %r11 = ashr i8 %a11, %b11
+ %r12 = ashr i8 %a12, %b12
+ %r13 = ashr i8 %a13, %b13
+ %r14 = ashr i8 %a14, %b14
+ %r15 = ashr i8 %a15, %b15
+ %r16 = ashr i8 %a16, %b16
+ %r17 = ashr i8 %a17, %b17
+ %r18 = ashr i8 %a18, %b18
+ %r19 = ashr i8 %a19, %b19
+ %r20 = ashr i8 %a20, %b20
+ %r21 = ashr i8 %a21, %b21
+ %r22 = ashr i8 %a22, %b22
+ %r23 = ashr i8 %a23, %b23
+ %r24 = ashr i8 %a24, %b24
+ %r25 = ashr i8 %a25, %b25
+ %r26 = ashr i8 %a26, %b26
+ %r27 = ashr i8 %a27, %b27
+ %r28 = ashr i8 %a28, %b28
+ %r29 = ashr i8 %a29, %b29
+ %r30 = ashr i8 %a30, %b30
+ %r31 = ashr i8 %a31, %b31
+ %r32 = ashr i8 %a32, %b32
+ %r33 = ashr i8 %a33, %b33
+ %r34 = ashr i8 %a34, %b34
+ %r35 = ashr i8 %a35, %b35
+ %r36 = ashr i8 %a36, %b36
+ %r37 = ashr i8 %a37, %b37
+ %r38 = ashr i8 %a38, %b38
+ %r39 = ashr i8 %a39, %b39
+ %r40 = ashr i8 %a40, %b40
+ %r41 = ashr i8 %a41, %b41
+ %r42 = ashr i8 %a42, %b42
+ %r43 = ashr i8 %a43, %b43
+ %r44 = ashr i8 %a44, %b44
+ %r45 = ashr i8 %a45, %b45
+ %r46 = ashr i8 %a46, %b46
+ %r47 = ashr i8 %a47, %b47
+ %r48 = ashr i8 %a48, %b48
+ %r49 = ashr i8 %a49, %b49
+ %r50 = ashr i8 %a50, %b50
+ %r51 = ashr i8 %a51, %b51
+ %r52 = ashr i8 %a52, %b52
+ %r53 = ashr i8 %a53, %b53
+ %r54 = ashr i8 %a54, %b54
+ %r55 = ashr i8 %a55, %b55
+ %r56 = ashr i8 %a56, %b56
+ %r57 = ashr i8 %a57, %b57
+ %r58 = ashr i8 %a58, %b58
+ %r59 = ashr i8 %a59, %b59
+ %r60 = ashr i8 %a60, %b60
+ %r61 = ashr i8 %a61, %b61
+ %r62 = ashr i8 %a62, %b62
+ %r63 = ashr i8 %a63, %b63
+ store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+ store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+ store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+ store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+ store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+ store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+ store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+ store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+ store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+ store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+ store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+ store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+ store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+ store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+ store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+ store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+ store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+ store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+ store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+ store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+ store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+ store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+ store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+ store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+ store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+ store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+ store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+ store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+ store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+ store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+ store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+ store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+ store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+ store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+ store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+ store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+ store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+ store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+ store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+ store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+ store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+ store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+ store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+ store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+ store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+ store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+ store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+ store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+ store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+ store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+ store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+ store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+ store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+ store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+ store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+ store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+ store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+ store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+ store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+ store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+ store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+ store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+ store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+ store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/shift-lshr.ll b/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
new file mode 100644
index 000000000000..6fd78e7c9699
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/shift-lshr.ll
@@ -0,0 +1,862 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=bdver4 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=XOP
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8 = common global [64 x i8] zeroinitializer, align 64
+@b8 = common global [64 x i8] zeroinitializer, align 64
+@c8 = common global [64 x i8] zeroinitializer, align 64
+
+define void @lshr_v8i64() {
+; SSE-LABEL: @lshr_v8i64(
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @a64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @b64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP9:%.*]] = lshr <2 x i64> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = lshr <2 x i64> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = lshr <2 x i64> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = lshr <2 x i64> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: ret void
+;
+; AVX1-LABEL: @lshr_v8i64(
+; AVX1-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @a64 to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @b64 to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP9:%.*]] = lshr <2 x i64> [[TMP1]], [[TMP5]]
+; AVX1-NEXT: [[TMP10:%.*]] = lshr <2 x i64> [[TMP2]], [[TMP6]]
+; AVX1-NEXT: [[TMP11:%.*]] = lshr <2 x i64> [[TMP3]], [[TMP7]]
+; AVX1-NEXT: [[TMP12:%.*]] = lshr <2 x i64> [[TMP4]], [[TMP8]]
+; AVX1-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8
+; AVX1-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <2 x i64>*), align 8
+; AVX1-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <2 x i64>*), align 8
+; AVX1-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <2 x i64>*), align 8
+; AVX1-NEXT: ret void
+;
+; AVX2-LABEL: @lshr_v8i64(
+; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP5:%.*]] = lshr <4 x i64> [[TMP1]], [[TMP3]]
+; AVX2-NEXT: [[TMP6:%.*]] = lshr <4 x i64> [[TMP2]], [[TMP4]]
+; AVX2-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; AVX2-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: ret void
+;
+; AVX512-LABEL: @lshr_v8i64(
+; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP3:%.*]] = lshr <8 x i64> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @lshr_v8i64(
+; XOP-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP5:%.*]] = lshr <4 x i64> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = lshr <4 x i64> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; XOP-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: ret void
+;
+ %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+ %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+ %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+ %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+ %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+ %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+ %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+ %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+ %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+ %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+ %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+ %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+ %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+ %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+ %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+ %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+ %r0 = lshr i64 %a0, %b0
+ %r1 = lshr i64 %a1, %b1
+ %r2 = lshr i64 %a2, %b2
+ %r3 = lshr i64 %a3, %b3
+ %r4 = lshr i64 %a4, %b4
+ %r5 = lshr i64 %a5, %b5
+ %r6 = lshr i64 %a6, %b6
+ %r7 = lshr i64 %a7, %b7
+ store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+ store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+ store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+ store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+ store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+ store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+ store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+ store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+ ret void
+}
+
+define void @lshr_v16i32() {
+; SSE-LABEL: @lshr_v16i32(
+; SSE-NEXT: [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4
+; SSE-NEXT: [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4
+; SSE-NEXT: [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4
+; SSE-NEXT: [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4
+; SSE-NEXT: [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4
+; SSE-NEXT: [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4
+; SSE-NEXT: [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4
+; SSE-NEXT: [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4
+; SSE-NEXT: [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4
+; SSE-NEXT: [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4
+; SSE-NEXT: [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+; SSE-NEXT: [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+; SSE-NEXT: [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+; SSE-NEXT: [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+; SSE-NEXT: [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+; SSE-NEXT: [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+; SSE-NEXT: [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4
+; SSE-NEXT: [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4
+; SSE-NEXT: [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4
+; SSE-NEXT: [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4
+; SSE-NEXT: [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4
+; SSE-NEXT: [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4
+; SSE-NEXT: [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4
+; SSE-NEXT: [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4
+; SSE-NEXT: [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4
+; SSE-NEXT: [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4
+; SSE-NEXT: [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+; SSE-NEXT: [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+; SSE-NEXT: [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+; SSE-NEXT: [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+; SSE-NEXT: [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+; SSE-NEXT: [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+; SSE-NEXT: [[R0:%.*]] = lshr i32 [[A0]], [[B0]]
+; SSE-NEXT: [[R1:%.*]] = lshr i32 [[A1]], [[B1]]
+; SSE-NEXT: [[R2:%.*]] = lshr i32 [[A2]], [[B2]]
+; SSE-NEXT: [[R3:%.*]] = lshr i32 [[A3]], [[B3]]
+; SSE-NEXT: [[R4:%.*]] = lshr i32 [[A4]], [[B4]]
+; SSE-NEXT: [[R5:%.*]] = lshr i32 [[A5]], [[B5]]
+; SSE-NEXT: [[R6:%.*]] = lshr i32 [[A6]], [[B6]]
+; SSE-NEXT: [[R7:%.*]] = lshr i32 [[A7]], [[B7]]
+; SSE-NEXT: [[R8:%.*]] = lshr i32 [[A8]], [[B8]]
+; SSE-NEXT: [[R9:%.*]] = lshr i32 [[A9]], [[B9]]
+; SSE-NEXT: [[R10:%.*]] = lshr i32 [[A10]], [[B10]]
+; SSE-NEXT: [[R11:%.*]] = lshr i32 [[A11]], [[B11]]
+; SSE-NEXT: [[R12:%.*]] = lshr i32 [[A12]], [[B12]]
+; SSE-NEXT: [[R13:%.*]] = lshr i32 [[A13]], [[B13]]
+; SSE-NEXT: [[R14:%.*]] = lshr i32 [[A14]], [[B14]]
+; SSE-NEXT: [[R15:%.*]] = lshr i32 [[A15]], [[B15]]
+; SSE-NEXT: store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4
+; SSE-NEXT: store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4
+; SSE-NEXT: store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4
+; SSE-NEXT: store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4
+; SSE-NEXT: store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4
+; SSE-NEXT: store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4
+; SSE-NEXT: store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4
+; SSE-NEXT: store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4
+; SSE-NEXT: store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4
+; SSE-NEXT: store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4
+; SSE-NEXT: store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+; SSE-NEXT: store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+; SSE-NEXT: store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+; SSE-NEXT: store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+; SSE-NEXT: store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+; SSE-NEXT: store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @lshr_v16i32(
+; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP5:%.*]] = lshr <8 x i32> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = lshr <8 x i32> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; AVX-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @lshr_v16i32(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP3:%.*]] = lshr <16 x i32> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @lshr_v16i32(
+; XOP-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP5:%.*]] = lshr <8 x i32> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = lshr <8 x i32> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; XOP-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: ret void
+;
+ %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+ %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+ %a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+ %a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+ %a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+ %a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+ %a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+ %a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+ %a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+ %a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+ %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+ %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+ %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+ %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+ %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+ %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+ %b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+ %b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+ %b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+ %b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+ %b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+ %b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+ %b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+ %b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+ %b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+ %b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+ %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+ %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+ %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+ %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+ %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+ %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+ %r0 = lshr i32 %a0 , %b0
+ %r1 = lshr i32 %a1 , %b1
+ %r2 = lshr i32 %a2 , %b2
+ %r3 = lshr i32 %a3 , %b3
+ %r4 = lshr i32 %a4 , %b4
+ %r5 = lshr i32 %a5 , %b5
+ %r6 = lshr i32 %a6 , %b6
+ %r7 = lshr i32 %a7 , %b7
+ %r8 = lshr i32 %a8 , %b8
+ %r9 = lshr i32 %a9 , %b9
+ %r10 = lshr i32 %a10, %b10
+ %r11 = lshr i32 %a11, %b11
+ %r12 = lshr i32 %a12, %b12
+ %r13 = lshr i32 %a13, %b13
+ %r14 = lshr i32 %a14, %b14
+ %r15 = lshr i32 %a15, %b15
+ store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+ store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+ store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+ store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+ store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+ store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+ store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+ store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+ store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+ store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+ store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+ store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+ store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+ store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+ store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+ store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+ ret void
+}
+
+define void @lshr_v32i16() {
+; SSE-LABEL: @lshr_v32i16(
+; SSE-NEXT: [[A0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0), align 2
+; SSE-NEXT: [[A1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1), align 2
+; SSE-NEXT: [[A2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2), align 2
+; SSE-NEXT: [[A3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3), align 2
+; SSE-NEXT: [[A4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4), align 2
+; SSE-NEXT: [[A5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5), align 2
+; SSE-NEXT: [[A6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6), align 2
+; SSE-NEXT: [[A7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7), align 2
+; SSE-NEXT: [[A8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8), align 2
+; SSE-NEXT: [[A9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9), align 2
+; SSE-NEXT: [[A10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+; SSE-NEXT: [[A11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+; SSE-NEXT: [[A12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+; SSE-NEXT: [[A13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+; SSE-NEXT: [[A14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+; SSE-NEXT: [[A15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+; SSE-NEXT: [[A16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+; SSE-NEXT: [[A17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+; SSE-NEXT: [[A18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+; SSE-NEXT: [[A19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+; SSE-NEXT: [[A20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+; SSE-NEXT: [[A21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+; SSE-NEXT: [[A22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+; SSE-NEXT: [[A23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+; SSE-NEXT: [[A24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+; SSE-NEXT: [[A25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+; SSE-NEXT: [[A26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+; SSE-NEXT: [[A27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+; SSE-NEXT: [[A28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+; SSE-NEXT: [[A29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+; SSE-NEXT: [[A30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+; SSE-NEXT: [[A31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+; SSE-NEXT: [[B0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0), align 2
+; SSE-NEXT: [[B1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1), align 2
+; SSE-NEXT: [[B2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2), align 2
+; SSE-NEXT: [[B3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3), align 2
+; SSE-NEXT: [[B4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4), align 2
+; SSE-NEXT: [[B5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5), align 2
+; SSE-NEXT: [[B6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6), align 2
+; SSE-NEXT: [[B7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7), align 2
+; SSE-NEXT: [[B8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8), align 2
+; SSE-NEXT: [[B9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9), align 2
+; SSE-NEXT: [[B10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+; SSE-NEXT: [[B11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+; SSE-NEXT: [[B12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+; SSE-NEXT: [[B13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+; SSE-NEXT: [[B14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+; SSE-NEXT: [[B15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+; SSE-NEXT: [[B16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+; SSE-NEXT: [[B17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+; SSE-NEXT: [[B18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+; SSE-NEXT: [[B19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+; SSE-NEXT: [[B20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+; SSE-NEXT: [[B21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+; SSE-NEXT: [[B22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+; SSE-NEXT: [[B23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+; SSE-NEXT: [[B24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+; SSE-NEXT: [[B25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+; SSE-NEXT: [[B26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+; SSE-NEXT: [[B27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+; SSE-NEXT: [[B28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+; SSE-NEXT: [[B29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+; SSE-NEXT: [[B30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+; SSE-NEXT: [[B31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+; SSE-NEXT: [[R0:%.*]] = lshr i16 [[A0]], [[B0]]
+; SSE-NEXT: [[R1:%.*]] = lshr i16 [[A1]], [[B1]]
+; SSE-NEXT: [[R2:%.*]] = lshr i16 [[A2]], [[B2]]
+; SSE-NEXT: [[R3:%.*]] = lshr i16 [[A3]], [[B3]]
+; SSE-NEXT: [[R4:%.*]] = lshr i16 [[A4]], [[B4]]
+; SSE-NEXT: [[R5:%.*]] = lshr i16 [[A5]], [[B5]]
+; SSE-NEXT: [[R6:%.*]] = lshr i16 [[A6]], [[B6]]
+; SSE-NEXT: [[R7:%.*]] = lshr i16 [[A7]], [[B7]]
+; SSE-NEXT: [[R8:%.*]] = lshr i16 [[A8]], [[B8]]
+; SSE-NEXT: [[R9:%.*]] = lshr i16 [[A9]], [[B9]]
+; SSE-NEXT: [[R10:%.*]] = lshr i16 [[A10]], [[B10]]
+; SSE-NEXT: [[R11:%.*]] = lshr i16 [[A11]], [[B11]]
+; SSE-NEXT: [[R12:%.*]] = lshr i16 [[A12]], [[B12]]
+; SSE-NEXT: [[R13:%.*]] = lshr i16 [[A13]], [[B13]]
+; SSE-NEXT: [[R14:%.*]] = lshr i16 [[A14]], [[B14]]
+; SSE-NEXT: [[R15:%.*]] = lshr i16 [[A15]], [[B15]]
+; SSE-NEXT: [[R16:%.*]] = lshr i16 [[A16]], [[B16]]
+; SSE-NEXT: [[R17:%.*]] = lshr i16 [[A17]], [[B17]]
+; SSE-NEXT: [[R18:%.*]] = lshr i16 [[A18]], [[B18]]
+; SSE-NEXT: [[R19:%.*]] = lshr i16 [[A19]], [[B19]]
+; SSE-NEXT: [[R20:%.*]] = lshr i16 [[A20]], [[B20]]
+; SSE-NEXT: [[R21:%.*]] = lshr i16 [[A21]], [[B21]]
+; SSE-NEXT: [[R22:%.*]] = lshr i16 [[A22]], [[B22]]
+; SSE-NEXT: [[R23:%.*]] = lshr i16 [[A23]], [[B23]]
+; SSE-NEXT: [[R24:%.*]] = lshr i16 [[A24]], [[B24]]
+; SSE-NEXT: [[R25:%.*]] = lshr i16 [[A25]], [[B25]]
+; SSE-NEXT: [[R26:%.*]] = lshr i16 [[A26]], [[B26]]
+; SSE-NEXT: [[R27:%.*]] = lshr i16 [[A27]], [[B27]]
+; SSE-NEXT: [[R28:%.*]] = lshr i16 [[A28]], [[B28]]
+; SSE-NEXT: [[R29:%.*]] = lshr i16 [[A29]], [[B29]]
+; SSE-NEXT: [[R30:%.*]] = lshr i16 [[A30]], [[B30]]
+; SSE-NEXT: [[R31:%.*]] = lshr i16 [[A31]], [[B31]]
+; SSE-NEXT: store i16 [[R0]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0), align 2
+; SSE-NEXT: store i16 [[R1]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1), align 2
+; SSE-NEXT: store i16 [[R2]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2), align 2
+; SSE-NEXT: store i16 [[R3]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3), align 2
+; SSE-NEXT: store i16 [[R4]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4), align 2
+; SSE-NEXT: store i16 [[R5]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5), align 2
+; SSE-NEXT: store i16 [[R6]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6), align 2
+; SSE-NEXT: store i16 [[R7]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7), align 2
+; SSE-NEXT: store i16 [[R8]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8), align 2
+; SSE-NEXT: store i16 [[R9]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9), align 2
+; SSE-NEXT: store i16 [[R10]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+; SSE-NEXT: store i16 [[R11]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+; SSE-NEXT: store i16 [[R12]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+; SSE-NEXT: store i16 [[R13]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+; SSE-NEXT: store i16 [[R14]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+; SSE-NEXT: store i16 [[R15]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+; SSE-NEXT: store i16 [[R16]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+; SSE-NEXT: store i16 [[R17]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+; SSE-NEXT: store i16 [[R18]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+; SSE-NEXT: store i16 [[R19]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+; SSE-NEXT: store i16 [[R20]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+; SSE-NEXT: store i16 [[R21]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+; SSE-NEXT: store i16 [[R22]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+; SSE-NEXT: store i16 [[R23]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+; SSE-NEXT: store i16 [[R24]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+; SSE-NEXT: store i16 [[R25]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+; SSE-NEXT: store i16 [[R26]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+; SSE-NEXT: store i16 [[R27]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+; SSE-NEXT: store i16 [[R28]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+; SSE-NEXT: store i16 [[R29]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+; SSE-NEXT: store i16 [[R30]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+; SSE-NEXT: store i16 [[R31]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @lshr_v32i16(
+; AVX-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP5:%.*]] = lshr <16 x i16> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = lshr <16 x i16> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @lshr_v32i16(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP5:%.*]] = lshr <16 x i16> [[TMP1]], [[TMP3]]
+; AVX512-NEXT: [[TMP6:%.*]] = lshr <16 x i16> [[TMP2]], [[TMP4]]
+; AVX512-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @lshr_v32i16(
+; XOP-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP5:%.*]] = lshr <16 x i16> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = lshr <16 x i16> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; XOP-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: ret void
+;
+ %a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+ %a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+ %a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+ %a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+ %a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+ %a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+ %a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+ %a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+ %a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+ %a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+ %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+ %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+ %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+ %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+ %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+ %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+ %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+ %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+ %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+ %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+ %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+ %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+ %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+ %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+ %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+ %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+ %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+ %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+ %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+ %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+ %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+ %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+ %b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+ %b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+ %b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+ %b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+ %b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+ %b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+ %b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+ %b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+ %b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+ %b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+ %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+ %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+ %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+ %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+ %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+ %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+ %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+ %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+ %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+ %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+ %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+ %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+ %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+ %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+ %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+ %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+ %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+ %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+ %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+ %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+ %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+ %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+ %r0 = lshr i16 %a0 , %b0
+ %r1 = lshr i16 %a1 , %b1
+ %r2 = lshr i16 %a2 , %b2
+ %r3 = lshr i16 %a3 , %b3
+ %r4 = lshr i16 %a4 , %b4
+ %r5 = lshr i16 %a5 , %b5
+ %r6 = lshr i16 %a6 , %b6
+ %r7 = lshr i16 %a7 , %b7
+ %r8 = lshr i16 %a8 , %b8
+ %r9 = lshr i16 %a9 , %b9
+ %r10 = lshr i16 %a10, %b10
+ %r11 = lshr i16 %a11, %b11
+ %r12 = lshr i16 %a12, %b12
+ %r13 = lshr i16 %a13, %b13
+ %r14 = lshr i16 %a14, %b14
+ %r15 = lshr i16 %a15, %b15
+ %r16 = lshr i16 %a16, %b16
+ %r17 = lshr i16 %a17, %b17
+ %r18 = lshr i16 %a18, %b18
+ %r19 = lshr i16 %a19, %b19
+ %r20 = lshr i16 %a20, %b20
+ %r21 = lshr i16 %a21, %b21
+ %r22 = lshr i16 %a22, %b22
+ %r23 = lshr i16 %a23, %b23
+ %r24 = lshr i16 %a24, %b24
+ %r25 = lshr i16 %a25, %b25
+ %r26 = lshr i16 %a26, %b26
+ %r27 = lshr i16 %a27, %b27
+ %r28 = lshr i16 %a28, %b28
+ %r29 = lshr i16 %a29, %b29
+ %r30 = lshr i16 %a30, %b30
+ %r31 = lshr i16 %a31, %b31
+ store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+ store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+ store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+ store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+ store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+ store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+ store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+ store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+ store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+ store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+ store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+ store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+ store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+ store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+ store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+ store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+ store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+ store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+ store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+ store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+ store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+ store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+ store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+ store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+ store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+ store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+ store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+ store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+ store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+ store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+ store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+ store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+ ret void
+}
+
+define void @lshr_v64i8() {
+; CHECK-LABEL: @lshr_v64i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP9:%.*]] = lshr <16 x i8> [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = lshr <16 x i8> [[TMP2]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = lshr <16 x i8> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = lshr <16 x i8> [[TMP4]], [[TMP8]]
+; CHECK-NEXT: store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
+;
+ %a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+ %a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+ %a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+ %a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+ %a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+ %a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+ %a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+ %a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+ %a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+ %a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+ %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+ %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+ %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+ %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+ %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+ %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+ %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+ %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+ %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+ %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+ %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+ %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+ %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+ %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+ %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+ %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+ %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+ %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+ %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+ %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+ %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+ %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+ %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+ %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+ %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+ %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+ %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+ %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+ %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+ %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+ %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+ %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+ %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+ %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+ %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+ %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+ %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+ %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+ %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+ %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+ %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+ %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+ %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+ %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+ %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+ %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+ %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+ %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+ %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+ %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+ %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+ %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+ %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+ %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+ %b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+ %b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+ %b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+ %b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+ %b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+ %b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+ %b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+ %b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+ %b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+ %b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+ %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+ %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+ %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+ %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+ %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+ %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+ %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+ %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+ %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+ %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+ %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+ %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+ %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+ %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+ %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+ %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+ %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+ %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+ %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+ %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+ %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+ %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+ %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+ %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+ %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+ %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+ %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+ %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+ %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+ %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+ %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+ %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+ %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+ %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+ %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+ %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+ %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+ %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+ %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+ %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+ %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+ %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+ %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+ %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+ %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+ %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+ %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+ %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+ %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+ %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+ %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+ %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+ %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+ %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+ %r0 = lshr i8 %a0 , %b0
+ %r1 = lshr i8 %a1 , %b1
+ %r2 = lshr i8 %a2 , %b2
+ %r3 = lshr i8 %a3 , %b3
+ %r4 = lshr i8 %a4 , %b4
+ %r5 = lshr i8 %a5 , %b5
+ %r6 = lshr i8 %a6 , %b6
+ %r7 = lshr i8 %a7 , %b7
+ %r8 = lshr i8 %a8 , %b8
+ %r9 = lshr i8 %a9 , %b9
+ %r10 = lshr i8 %a10, %b10
+ %r11 = lshr i8 %a11, %b11
+ %r12 = lshr i8 %a12, %b12
+ %r13 = lshr i8 %a13, %b13
+ %r14 = lshr i8 %a14, %b14
+ %r15 = lshr i8 %a15, %b15
+ %r16 = lshr i8 %a16, %b16
+ %r17 = lshr i8 %a17, %b17
+ %r18 = lshr i8 %a18, %b18
+ %r19 = lshr i8 %a19, %b19
+ %r20 = lshr i8 %a20, %b20
+ %r21 = lshr i8 %a21, %b21
+ %r22 = lshr i8 %a22, %b22
+ %r23 = lshr i8 %a23, %b23
+ %r24 = lshr i8 %a24, %b24
+ %r25 = lshr i8 %a25, %b25
+ %r26 = lshr i8 %a26, %b26
+ %r27 = lshr i8 %a27, %b27
+ %r28 = lshr i8 %a28, %b28
+ %r29 = lshr i8 %a29, %b29
+ %r30 = lshr i8 %a30, %b30
+ %r31 = lshr i8 %a31, %b31
+ %r32 = lshr i8 %a32, %b32
+ %r33 = lshr i8 %a33, %b33
+ %r34 = lshr i8 %a34, %b34
+ %r35 = lshr i8 %a35, %b35
+ %r36 = lshr i8 %a36, %b36
+ %r37 = lshr i8 %a37, %b37
+ %r38 = lshr i8 %a38, %b38
+ %r39 = lshr i8 %a39, %b39
+ %r40 = lshr i8 %a40, %b40
+ %r41 = lshr i8 %a41, %b41
+ %r42 = lshr i8 %a42, %b42
+ %r43 = lshr i8 %a43, %b43
+ %r44 = lshr i8 %a44, %b44
+ %r45 = lshr i8 %a45, %b45
+ %r46 = lshr i8 %a46, %b46
+ %r47 = lshr i8 %a47, %b47
+ %r48 = lshr i8 %a48, %b48
+ %r49 = lshr i8 %a49, %b49
+ %r50 = lshr i8 %a50, %b50
+ %r51 = lshr i8 %a51, %b51
+ %r52 = lshr i8 %a52, %b52
+ %r53 = lshr i8 %a53, %b53
+ %r54 = lshr i8 %a54, %b54
+ %r55 = lshr i8 %a55, %b55
+ %r56 = lshr i8 %a56, %b56
+ %r57 = lshr i8 %a57, %b57
+ %r58 = lshr i8 %a58, %b58
+ %r59 = lshr i8 %a59, %b59
+ %r60 = lshr i8 %a60, %b60
+ %r61 = lshr i8 %a61, %b61
+ %r62 = lshr i8 %a62, %b62
+ %r63 = lshr i8 %a63, %b63
+ store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+ store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+ store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+ store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+ store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+ store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+ store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+ store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+ store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+ store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+ store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+ store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+ store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+ store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+ store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+ store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+ store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+ store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+ store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+ store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+ store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+ store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+ store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+ store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+ store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+ store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+ store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+ store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+ store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+ store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+ store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+ store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+ store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+ store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+ store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+ store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+ store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+ store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+ store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+ store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+ store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+ store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+ store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+ store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+ store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+ store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+ store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+ store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+ store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+ store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+ store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+ store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+ store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+ store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+ store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+ store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+ store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+ store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+ store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+ store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+ store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+ store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+ store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+ store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/shift-shl.ll b/test/Transforms/SLPVectorizer/X86/shift-shl.ll
new file mode 100644
index 000000000000..70de82bdea5f
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/shift-shl.ll
@@ -0,0 +1,814 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=bdver4 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=XOP
+
+@a64 = common global [8 x i64] zeroinitializer, align 64
+@b64 = common global [8 x i64] zeroinitializer, align 64
+@c64 = common global [8 x i64] zeroinitializer, align 64
+@a32 = common global [16 x i32] zeroinitializer, align 64
+@b32 = common global [16 x i32] zeroinitializer, align 64
+@c32 = common global [16 x i32] zeroinitializer, align 64
+@a16 = common global [32 x i16] zeroinitializer, align 64
+@b16 = common global [32 x i16] zeroinitializer, align 64
+@c16 = common global [32 x i16] zeroinitializer, align 64
+@a8 = common global [64 x i8] zeroinitializer, align 64
+@b8 = common global [64 x i8] zeroinitializer, align 64
+@c8 = common global [64 x i8] zeroinitializer, align 64
+
+define void @shl_v8i64() {
+; SSE-LABEL: @shl_v8i64(
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @a64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @b64 to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: [[TMP9:%.*]] = shl <2 x i64> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = shl <2 x i64> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = shl <2 x i64> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = shl <2 x i64> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <2 x i64>*), align 8
+; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <2 x i64>*), align 8
+; SSE-NEXT: ret void
+;
+; AVX1-LABEL: @shl_v8i64(
+; AVX1-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @a64 to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @b64 to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6) to <2 x i64>*), align 8
+; AVX1-NEXT: [[TMP9:%.*]] = shl <2 x i64> [[TMP1]], [[TMP5]]
+; AVX1-NEXT: [[TMP10:%.*]] = shl <2 x i64> [[TMP2]], [[TMP6]]
+; AVX1-NEXT: [[TMP11:%.*]] = shl <2 x i64> [[TMP3]], [[TMP7]]
+; AVX1-NEXT: [[TMP12:%.*]] = shl <2 x i64> [[TMP4]], [[TMP8]]
+; AVX1-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8
+; AVX1-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <2 x i64>*), align 8
+; AVX1-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <2 x i64>*), align 8
+; AVX1-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <2 x i64>*), align 8
+; AVX1-NEXT: ret void
+;
+; AVX2-LABEL: @shl_v8i64(
+; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: [[TMP5:%.*]] = shl <4 x i64> [[TMP1]], [[TMP3]]
+; AVX2-NEXT: [[TMP6:%.*]] = shl <4 x i64> [[TMP2]], [[TMP4]]
+; AVX2-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; AVX2-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; AVX2-NEXT: ret void
+;
+; AVX512-LABEL: @shl_v8i64(
+; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8
+; AVX512-NEXT: [[TMP3:%.*]] = shl <8 x i64> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @shl_v8i64(
+; XOP-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: [[TMP5:%.*]] = shl <4 x i64> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = shl <4 x i64> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8
+; XOP-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8
+; XOP-NEXT: ret void
+;
+ %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
+ %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
+ %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
+ %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
+ %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
+ %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
+ %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
+ %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
+ %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
+ %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
+ %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
+ %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
+ %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
+ %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
+ %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
+ %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
+ %r0 = shl i64 %a0, %b0
+ %r1 = shl i64 %a1, %b1
+ %r2 = shl i64 %a2, %b2
+ %r3 = shl i64 %a3, %b3
+ %r4 = shl i64 %a4, %b4
+ %r5 = shl i64 %a5, %b5
+ %r6 = shl i64 %a6, %b6
+ %r7 = shl i64 %a7, %b7
+ store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
+ store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
+ store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
+ store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
+ store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
+ store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
+ store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
+ store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
+ ret void
+}
+
+define void @shl_v16i32() {
+; SSE-LABEL: @shl_v16i32(
+; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: [[TMP9:%.*]] = shl <4 x i32> [[TMP1]], [[TMP5]]
+; SSE-NEXT: [[TMP10:%.*]] = shl <4 x i32> [[TMP2]], [[TMP6]]
+; SSE-NEXT: [[TMP11:%.*]] = shl <4 x i32> [[TMP3]], [[TMP7]]
+; SSE-NEXT: [[TMP12:%.*]] = shl <4 x i32> [[TMP4]], [[TMP8]]
+; SSE-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4
+; SSE-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @shl_v16i32(
+; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: [[TMP5:%.*]] = shl <8 x i32> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = shl <8 x i32> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; AVX-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @shl_v16i32(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4
+; AVX512-NEXT: [[TMP3:%.*]] = shl <16 x i32> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @shl_v16i32(
+; XOP-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: [[TMP5:%.*]] = shl <8 x i32> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = shl <8 x i32> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4
+; XOP-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4
+; XOP-NEXT: ret void
+;
+ %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
+ %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
+ %a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
+ %a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
+ %a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
+ %a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
+ %a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
+ %a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
+ %a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
+ %a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
+ %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
+ %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
+ %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
+ %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
+ %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
+ %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
+ %b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
+ %b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
+ %b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
+ %b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
+ %b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
+ %b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
+ %b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
+ %b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
+ %b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
+ %b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
+ %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
+ %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
+ %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
+ %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
+ %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
+ %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
+ %r0 = shl i32 %a0 , %b0
+ %r1 = shl i32 %a1 , %b1
+ %r2 = shl i32 %a2 , %b2
+ %r3 = shl i32 %a3 , %b3
+ %r4 = shl i32 %a4 , %b4
+ %r5 = shl i32 %a5 , %b5
+ %r6 = shl i32 %a6 , %b6
+ %r7 = shl i32 %a7 , %b7
+ %r8 = shl i32 %a8 , %b8
+ %r9 = shl i32 %a9 , %b9
+ %r10 = shl i32 %a10, %b10
+ %r11 = shl i32 %a11, %b11
+ %r12 = shl i32 %a12, %b12
+ %r13 = shl i32 %a13, %b13
+ %r14 = shl i32 %a14, %b14
+ %r15 = shl i32 %a15, %b15
+ store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
+ store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
+ store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
+ store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
+ store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
+ store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
+ store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
+ store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
+ store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
+ store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
+ store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
+ store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
+ store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
+ store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
+ store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
+ store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
+ ret void
+}
+
+define void @shl_v32i16() {
+; SSE-LABEL: @shl_v32i16(
+; SSE-NEXT: [[A0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0), align 2
+; SSE-NEXT: [[A1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1), align 2
+; SSE-NEXT: [[A2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2), align 2
+; SSE-NEXT: [[A3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3), align 2
+; SSE-NEXT: [[A4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4), align 2
+; SSE-NEXT: [[A5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5), align 2
+; SSE-NEXT: [[A6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6), align 2
+; SSE-NEXT: [[A7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7), align 2
+; SSE-NEXT: [[A8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8), align 2
+; SSE-NEXT: [[A9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9), align 2
+; SSE-NEXT: [[A10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+; SSE-NEXT: [[A11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+; SSE-NEXT: [[A12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+; SSE-NEXT: [[A13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+; SSE-NEXT: [[A14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+; SSE-NEXT: [[A15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+; SSE-NEXT: [[A16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+; SSE-NEXT: [[A17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+; SSE-NEXT: [[A18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+; SSE-NEXT: [[A19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+; SSE-NEXT: [[A20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+; SSE-NEXT: [[A21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+; SSE-NEXT: [[A22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+; SSE-NEXT: [[A23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+; SSE-NEXT: [[A24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+; SSE-NEXT: [[A25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+; SSE-NEXT: [[A26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+; SSE-NEXT: [[A27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+; SSE-NEXT: [[A28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+; SSE-NEXT: [[A29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+; SSE-NEXT: [[A30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+; SSE-NEXT: [[A31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+; SSE-NEXT: [[B0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0), align 2
+; SSE-NEXT: [[B1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1), align 2
+; SSE-NEXT: [[B2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2), align 2
+; SSE-NEXT: [[B3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3), align 2
+; SSE-NEXT: [[B4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4), align 2
+; SSE-NEXT: [[B5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5), align 2
+; SSE-NEXT: [[B6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6), align 2
+; SSE-NEXT: [[B7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7), align 2
+; SSE-NEXT: [[B8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8), align 2
+; SSE-NEXT: [[B9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9), align 2
+; SSE-NEXT: [[B10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+; SSE-NEXT: [[B11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+; SSE-NEXT: [[B12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+; SSE-NEXT: [[B13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+; SSE-NEXT: [[B14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+; SSE-NEXT: [[B15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+; SSE-NEXT: [[B16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+; SSE-NEXT: [[B17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+; SSE-NEXT: [[B18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+; SSE-NEXT: [[B19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+; SSE-NEXT: [[B20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+; SSE-NEXT: [[B21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+; SSE-NEXT: [[B22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+; SSE-NEXT: [[B23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+; SSE-NEXT: [[B24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+; SSE-NEXT: [[B25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+; SSE-NEXT: [[B26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+; SSE-NEXT: [[B27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+; SSE-NEXT: [[B28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+; SSE-NEXT: [[B29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+; SSE-NEXT: [[B30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+; SSE-NEXT: [[B31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+; SSE-NEXT: [[R0:%.*]] = shl i16 [[A0]], [[B0]]
+; SSE-NEXT: [[R1:%.*]] = shl i16 [[A1]], [[B1]]
+; SSE-NEXT: [[R2:%.*]] = shl i16 [[A2]], [[B2]]
+; SSE-NEXT: [[R3:%.*]] = shl i16 [[A3]], [[B3]]
+; SSE-NEXT: [[R4:%.*]] = shl i16 [[A4]], [[B4]]
+; SSE-NEXT: [[R5:%.*]] = shl i16 [[A5]], [[B5]]
+; SSE-NEXT: [[R6:%.*]] = shl i16 [[A6]], [[B6]]
+; SSE-NEXT: [[R7:%.*]] = shl i16 [[A7]], [[B7]]
+; SSE-NEXT: [[R8:%.*]] = shl i16 [[A8]], [[B8]]
+; SSE-NEXT: [[R9:%.*]] = shl i16 [[A9]], [[B9]]
+; SSE-NEXT: [[R10:%.*]] = shl i16 [[A10]], [[B10]]
+; SSE-NEXT: [[R11:%.*]] = shl i16 [[A11]], [[B11]]
+; SSE-NEXT: [[R12:%.*]] = shl i16 [[A12]], [[B12]]
+; SSE-NEXT: [[R13:%.*]] = shl i16 [[A13]], [[B13]]
+; SSE-NEXT: [[R14:%.*]] = shl i16 [[A14]], [[B14]]
+; SSE-NEXT: [[R15:%.*]] = shl i16 [[A15]], [[B15]]
+; SSE-NEXT: [[R16:%.*]] = shl i16 [[A16]], [[B16]]
+; SSE-NEXT: [[R17:%.*]] = shl i16 [[A17]], [[B17]]
+; SSE-NEXT: [[R18:%.*]] = shl i16 [[A18]], [[B18]]
+; SSE-NEXT: [[R19:%.*]] = shl i16 [[A19]], [[B19]]
+; SSE-NEXT: [[R20:%.*]] = shl i16 [[A20]], [[B20]]
+; SSE-NEXT: [[R21:%.*]] = shl i16 [[A21]], [[B21]]
+; SSE-NEXT: [[R22:%.*]] = shl i16 [[A22]], [[B22]]
+; SSE-NEXT: [[R23:%.*]] = shl i16 [[A23]], [[B23]]
+; SSE-NEXT: [[R24:%.*]] = shl i16 [[A24]], [[B24]]
+; SSE-NEXT: [[R25:%.*]] = shl i16 [[A25]], [[B25]]
+; SSE-NEXT: [[R26:%.*]] = shl i16 [[A26]], [[B26]]
+; SSE-NEXT: [[R27:%.*]] = shl i16 [[A27]], [[B27]]
+; SSE-NEXT: [[R28:%.*]] = shl i16 [[A28]], [[B28]]
+; SSE-NEXT: [[R29:%.*]] = shl i16 [[A29]], [[B29]]
+; SSE-NEXT: [[R30:%.*]] = shl i16 [[A30]], [[B30]]
+; SSE-NEXT: [[R31:%.*]] = shl i16 [[A31]], [[B31]]
+; SSE-NEXT: store i16 [[R0]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0), align 2
+; SSE-NEXT: store i16 [[R1]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1), align 2
+; SSE-NEXT: store i16 [[R2]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2), align 2
+; SSE-NEXT: store i16 [[R3]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3), align 2
+; SSE-NEXT: store i16 [[R4]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4), align 2
+; SSE-NEXT: store i16 [[R5]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5), align 2
+; SSE-NEXT: store i16 [[R6]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6), align 2
+; SSE-NEXT: store i16 [[R7]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7), align 2
+; SSE-NEXT: store i16 [[R8]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8), align 2
+; SSE-NEXT: store i16 [[R9]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9), align 2
+; SSE-NEXT: store i16 [[R10]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+; SSE-NEXT: store i16 [[R11]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+; SSE-NEXT: store i16 [[R12]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+; SSE-NEXT: store i16 [[R13]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+; SSE-NEXT: store i16 [[R14]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+; SSE-NEXT: store i16 [[R15]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+; SSE-NEXT: store i16 [[R16]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+; SSE-NEXT: store i16 [[R17]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+; SSE-NEXT: store i16 [[R18]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+; SSE-NEXT: store i16 [[R19]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+; SSE-NEXT: store i16 [[R20]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+; SSE-NEXT: store i16 [[R21]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+; SSE-NEXT: store i16 [[R22]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+; SSE-NEXT: store i16 [[R23]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+; SSE-NEXT: store i16 [[R24]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+; SSE-NEXT: store i16 [[R25]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+; SSE-NEXT: store i16 [[R26]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+; SSE-NEXT: store i16 [[R27]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+; SSE-NEXT: store i16 [[R28]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+; SSE-NEXT: store i16 [[R29]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+; SSE-NEXT: store i16 [[R30]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+; SSE-NEXT: store i16 [[R31]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @shl_v32i16(
+; AVX-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: [[TMP5:%.*]] = shl <16 x i16> [[TMP1]], [[TMP3]]
+; AVX-NEXT: [[TMP6:%.*]] = shl <16 x i16> [[TMP2]], [[TMP4]]
+; AVX-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX-NEXT: ret void
+;
+; AVX512-LABEL: @shl_v32i16(
+; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: [[TMP5:%.*]] = shl <16 x i16> [[TMP1]], [[TMP3]]
+; AVX512-NEXT: [[TMP6:%.*]] = shl <16 x i16> [[TMP2]], [[TMP4]]
+; AVX512-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; AVX512-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; AVX512-NEXT: ret void
+;
+; XOP-LABEL: @shl_v32i16(
+; XOP-NEXT: [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @a16 to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP3:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([32 x i16]* @b16 to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP4:%.*]] = load <16 x i16>, <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: [[TMP5:%.*]] = shl <16 x i16> [[TMP1]], [[TMP3]]
+; XOP-NEXT: [[TMP6:%.*]] = shl <16 x i16> [[TMP2]], [[TMP4]]
+; XOP-NEXT: store <16 x i16> [[TMP5]], <16 x i16>* bitcast ([32 x i16]* @c16 to <16 x i16>*), align 2
+; XOP-NEXT: store <16 x i16> [[TMP6]], <16 x i16>* bitcast (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16) to <16 x i16>*), align 2
+; XOP-NEXT: ret void
+;
+ %a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
+ %a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
+ %a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
+ %a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
+ %a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
+ %a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
+ %a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
+ %a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
+ %a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
+ %a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
+ %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
+ %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
+ %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
+ %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
+ %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
+ %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
+ %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
+ %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
+ %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
+ %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
+ %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
+ %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
+ %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
+ %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
+ %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
+ %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
+ %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
+ %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
+ %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
+ %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
+ %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
+ %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
+ %b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
+ %b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
+ %b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
+ %b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
+ %b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
+ %b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
+ %b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
+ %b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
+ %b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
+ %b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
+ %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
+ %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
+ %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
+ %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
+ %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
+ %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
+ %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
+ %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
+ %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
+ %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
+ %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
+ %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
+ %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
+ %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
+ %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
+ %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
+ %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
+ %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
+ %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
+ %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
+ %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
+ %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
+ %r0 = shl i16 %a0 , %b0
+ %r1 = shl i16 %a1 , %b1
+ %r2 = shl i16 %a2 , %b2
+ %r3 = shl i16 %a3 , %b3
+ %r4 = shl i16 %a4 , %b4
+ %r5 = shl i16 %a5 , %b5
+ %r6 = shl i16 %a6 , %b6
+ %r7 = shl i16 %a7 , %b7
+ %r8 = shl i16 %a8 , %b8
+ %r9 = shl i16 %a9 , %b9
+ %r10 = shl i16 %a10, %b10
+ %r11 = shl i16 %a11, %b11
+ %r12 = shl i16 %a12, %b12
+ %r13 = shl i16 %a13, %b13
+ %r14 = shl i16 %a14, %b14
+ %r15 = shl i16 %a15, %b15
+ %r16 = shl i16 %a16, %b16
+ %r17 = shl i16 %a17, %b17
+ %r18 = shl i16 %a18, %b18
+ %r19 = shl i16 %a19, %b19
+ %r20 = shl i16 %a20, %b20
+ %r21 = shl i16 %a21, %b21
+ %r22 = shl i16 %a22, %b22
+ %r23 = shl i16 %a23, %b23
+ %r24 = shl i16 %a24, %b24
+ %r25 = shl i16 %a25, %b25
+ %r26 = shl i16 %a26, %b26
+ %r27 = shl i16 %a27, %b27
+ %r28 = shl i16 %a28, %b28
+ %r29 = shl i16 %a29, %b29
+ %r30 = shl i16 %a30, %b30
+ %r31 = shl i16 %a31, %b31
+ store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
+ store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
+ store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
+ store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
+ store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
+ store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
+ store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
+ store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
+ store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
+ store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
+ store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
+ store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
+ store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
+ store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
+ store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
+ store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
+ store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
+ store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
+ store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
+ store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
+ store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
+ store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
+ store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
+ store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
+ store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
+ store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
+ store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
+ store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
+ store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
+ store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
+ store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
+ store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
+ ret void
+}
+
+define void @shl_v64i8() {
+; CHECK-LABEL: @shl_v64i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @a8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([64 x i8]* @b8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP9:%.*]] = shl <16 x i8> [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = shl <16 x i8> [[TMP2]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = shl <16 x i8> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = shl <16 x i8> [[TMP4]], [[TMP8]]
+; CHECK-NEXT: store <16 x i8> [[TMP9]], <16 x i8>* bitcast ([64 x i8]* @c8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP10]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP11]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32) to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP12]], <16 x i8>* bitcast (i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
+;
+ %a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
+ %a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
+ %a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
+ %a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
+ %a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
+ %a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
+ %a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
+ %a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
+ %a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
+ %a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
+ %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
+ %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
+ %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
+ %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
+ %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
+ %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
+ %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
+ %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
+ %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
+ %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
+ %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
+ %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
+ %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
+ %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
+ %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
+ %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
+ %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
+ %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
+ %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
+ %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
+ %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
+ %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
+ %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
+ %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
+ %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
+ %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
+ %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
+ %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
+ %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
+ %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
+ %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
+ %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
+ %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
+ %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
+ %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
+ %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
+ %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
+ %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
+ %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
+ %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
+ %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
+ %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
+ %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
+ %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
+ %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
+ %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
+ %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
+ %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
+ %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
+ %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
+ %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
+ %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
+ %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
+ %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
+ %b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
+ %b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
+ %b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
+ %b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
+ %b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
+ %b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
+ %b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
+ %b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
+ %b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
+ %b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
+ %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
+ %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
+ %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
+ %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
+ %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
+ %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
+ %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
+ %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
+ %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
+ %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
+ %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
+ %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
+ %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
+ %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
+ %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
+ %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
+ %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
+ %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
+ %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
+ %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
+ %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
+ %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
+ %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
+ %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
+ %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
+ %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
+ %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
+ %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
+ %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
+ %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
+ %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
+ %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
+ %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
+ %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
+ %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
+ %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
+ %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
+ %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
+ %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
+ %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
+ %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
+ %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
+ %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
+ %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
+ %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
+ %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
+ %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
+ %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
+ %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
+ %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
+ %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
+ %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
+ %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
+ %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
+ %r0 = shl i8 %a0 , %b0
+ %r1 = shl i8 %a1 , %b1
+ %r2 = shl i8 %a2 , %b2
+ %r3 = shl i8 %a3 , %b3
+ %r4 = shl i8 %a4 , %b4
+ %r5 = shl i8 %a5 , %b5
+ %r6 = shl i8 %a6 , %b6
+ %r7 = shl i8 %a7 , %b7
+ %r8 = shl i8 %a8 , %b8
+ %r9 = shl i8 %a9 , %b9
+ %r10 = shl i8 %a10, %b10
+ %r11 = shl i8 %a11, %b11
+ %r12 = shl i8 %a12, %b12
+ %r13 = shl i8 %a13, %b13
+ %r14 = shl i8 %a14, %b14
+ %r15 = shl i8 %a15, %b15
+ %r16 = shl i8 %a16, %b16
+ %r17 = shl i8 %a17, %b17
+ %r18 = shl i8 %a18, %b18
+ %r19 = shl i8 %a19, %b19
+ %r20 = shl i8 %a20, %b20
+ %r21 = shl i8 %a21, %b21
+ %r22 = shl i8 %a22, %b22
+ %r23 = shl i8 %a23, %b23
+ %r24 = shl i8 %a24, %b24
+ %r25 = shl i8 %a25, %b25
+ %r26 = shl i8 %a26, %b26
+ %r27 = shl i8 %a27, %b27
+ %r28 = shl i8 %a28, %b28
+ %r29 = shl i8 %a29, %b29
+ %r30 = shl i8 %a30, %b30
+ %r31 = shl i8 %a31, %b31
+ %r32 = shl i8 %a32, %b32
+ %r33 = shl i8 %a33, %b33
+ %r34 = shl i8 %a34, %b34
+ %r35 = shl i8 %a35, %b35
+ %r36 = shl i8 %a36, %b36
+ %r37 = shl i8 %a37, %b37
+ %r38 = shl i8 %a38, %b38
+ %r39 = shl i8 %a39, %b39
+ %r40 = shl i8 %a40, %b40
+ %r41 = shl i8 %a41, %b41
+ %r42 = shl i8 %a42, %b42
+ %r43 = shl i8 %a43, %b43
+ %r44 = shl i8 %a44, %b44
+ %r45 = shl i8 %a45, %b45
+ %r46 = shl i8 %a46, %b46
+ %r47 = shl i8 %a47, %b47
+ %r48 = shl i8 %a48, %b48
+ %r49 = shl i8 %a49, %b49
+ %r50 = shl i8 %a50, %b50
+ %r51 = shl i8 %a51, %b51
+ %r52 = shl i8 %a52, %b52
+ %r53 = shl i8 %a53, %b53
+ %r54 = shl i8 %a54, %b54
+ %r55 = shl i8 %a55, %b55
+ %r56 = shl i8 %a56, %b56
+ %r57 = shl i8 %a57, %b57
+ %r58 = shl i8 %a58, %b58
+ %r59 = shl i8 %a59, %b59
+ %r60 = shl i8 %a60, %b60
+ %r61 = shl i8 %a61, %b61
+ %r62 = shl i8 %a62, %b62
+ %r63 = shl i8 %a63, %b63
+ store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
+ store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
+ store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
+ store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
+ store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
+ store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
+ store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
+ store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
+ store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
+ store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
+ store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
+ store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
+ store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
+ store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
+ store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
+ store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
+ store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
+ store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
+ store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
+ store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
+ store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
+ store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
+ store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
+ store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
+ store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
+ store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
+ store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
+ store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
+ store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
+ store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
+ store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
+ store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
+ store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
+ store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
+ store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
+ store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
+ store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
+ store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
+ store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
+ store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
+ store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
+ store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
+ store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
+ store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
+ store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
+ store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
+ store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
+ store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
+ store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
+ store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
+ store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
+ store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
+ store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
+ store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
+ store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
+ store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
+ store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
+ store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
+ store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
+ store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
+ store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
+ store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
+ store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
+ store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
+ ret void
+}
diff --git a/test/Transforms/SimpleLoopUnswitch/trivial-unswitch.ll b/test/Transforms/SimpleLoopUnswitch/trivial-unswitch.ll
index 42b4f3dea75b..3ac3c5138ae7 100644
--- a/test/Transforms/SimpleLoopUnswitch/trivial-unswitch.ll
+++ b/test/Transforms/SimpleLoopUnswitch/trivial-unswitch.ll
@@ -183,3 +183,202 @@ loop_exit3:
; CHECK: [[UNREACHABLE]]:
; CHECK-NEXT: unreachable
}
+
+; This test contains a trivially unswitchable branch with an LCSSA phi node in
+; a loop exit block.
+define i32 @test5(i1 %cond1, i32 %x, i32 %y) {
+; CHECK-LABEL: @test5(
+entry:
+ br label %loop_begin
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %{{.*}}, label %entry.split, label %loop_exit
+;
+; CHECK: entry.split:
+; CHECK-NEXT: br label %loop_begin
+
+loop_begin:
+ br i1 %cond1, label %latch, label %loop_exit
+; CHECK: loop_begin:
+; CHECK-NEXT: br label %latch
+
+latch:
+ call void @some_func() noreturn nounwind
+ br label %loop_begin
+; CHECK: latch:
+; CHECK-NEXT: call
+; CHECK-NEXT: br label %loop_begin
+
+loop_exit:
+ %result1 = phi i32 [ %x, %loop_begin ]
+ %result2 = phi i32 [ %y, %loop_begin ]
+ %result = add i32 %result1, %result2
+ ret i32 %result
+; CHECK: loop_exit:
+; CHECK-NEXT: %[[R1:.*]] = phi i32 [ %x, %entry ]
+; CHECK-NEXT: %[[R2:.*]] = phi i32 [ %y, %entry ]
+; CHECK-NEXT: %[[R:.*]] = add i32 %[[R1]], %[[R2]]
+; CHECK-NEXT: ret i32 %[[R]]
+}
+
+; This test contains a trivially unswitchable branch with a real phi node in LCSSA
+; position in a shared exit block where a different path through the loop
+; produces a non-invariant input to the PHI node.
+define i32 @test6(i32* %var, i1 %cond1, i1 %cond2, i32 %x, i32 %y) {
+; CHECK-LABEL: @test6(
+entry:
+ br label %loop_begin
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %{{.*}}, label %entry.split, label %loop_exit.split
+;
+; CHECK: entry.split:
+; CHECK-NEXT: br label %loop_begin
+
+loop_begin:
+ br i1 %cond1, label %continue, label %loop_exit
+; CHECK: loop_begin:
+; CHECK-NEXT: br label %continue
+
+continue:
+ %var_val = load i32, i32* %var
+ br i1 %cond2, label %latch, label %loop_exit
+; CHECK: continue:
+; CHECK-NEXT: load
+; CHECK-NEXT: br i1 %cond2, label %latch, label %loop_exit
+
+latch:
+ call void @some_func() noreturn nounwind
+ br label %loop_begin
+; CHECK: latch:
+; CHECK-NEXT: call
+; CHECK-NEXT: br label %loop_begin
+
+loop_exit:
+ %result1 = phi i32 [ %x, %loop_begin ], [ %var_val, %continue ]
+ %result2 = phi i32 [ %var_val, %continue ], [ %y, %loop_begin ]
+ %result = add i32 %result1, %result2
+ ret i32 %result
+; CHECK: loop_exit:
+; CHECK-NEXT: %[[R1:.*]] = phi i32 [ %var_val, %continue ]
+; CHECK-NEXT: %[[R2:.*]] = phi i32 [ %var_val, %continue ]
+; CHECK-NEXT: br label %loop_exit.split
+;
+; CHECK: loop_exit.split:
+; CHECK-NEXT: %[[R1S:.*]] = phi i32 [ %x, %entry ], [ %[[R1]], %loop_exit ]
+; CHECK-NEXT: %[[R2S:.*]] = phi i32 [ %y, %entry ], [ %[[R2]], %loop_exit ]
+; CHECK-NEXT: %[[R:.*]] = add i32 %[[R1S]], %[[R2S]]
+; CHECK-NEXT: ret i32 %[[R]]
+}
+
+; This test contains a trivially unswitchable switch with an LCSSA phi node in
+; a loop exit block.
+define i32 @test7(i32 %cond1, i32 %x, i32 %y) {
+; CHECK-LABEL: @test7(
+entry:
+ br label %loop_begin
+; CHECK-NEXT: entry:
+; CHECK-NEXT: switch i32 %cond1, label %entry.split [
+; CHECK-NEXT: i32 0, label %loop_exit
+; CHECK-NEXT: i32 1, label %loop_exit
+; CHECK-NEXT: ]
+;
+; CHECK: entry.split:
+; CHECK-NEXT: br label %loop_begin
+
+loop_begin:
+ switch i32 %cond1, label %latch [
+ i32 0, label %loop_exit
+ i32 1, label %loop_exit
+ ]
+; CHECK: loop_begin:
+; CHECK-NEXT: br label %latch
+
+latch:
+ call void @some_func() noreturn nounwind
+ br label %loop_begin
+; CHECK: latch:
+; CHECK-NEXT: call
+; CHECK-NEXT: br label %loop_begin
+
+loop_exit:
+ %result1 = phi i32 [ %x, %loop_begin ], [ %x, %loop_begin ]
+ %result2 = phi i32 [ %y, %loop_begin ], [ %y, %loop_begin ]
+ %result = add i32 %result1, %result2
+ ret i32 %result
+; CHECK: loop_exit:
+; CHECK-NEXT: %[[R1:.*]] = phi i32 [ %x, %entry ], [ %x, %entry ]
+; CHECK-NEXT: %[[R2:.*]] = phi i32 [ %y, %entry ], [ %y, %entry ]
+; CHECK-NEXT: %[[R:.*]] = add i32 %[[R1]], %[[R2]]
+; CHECK-NEXT: ret i32 %[[R]]
+}
+
+; This test contains a trivially unswitchable switch with a real phi node in
+; LCSSA position in a shared exit block where a different path through the loop
+; produces a non-invariant input to the PHI node.
+define i32 @test8(i32* %var, i32 %cond1, i32 %cond2, i32 %x, i32 %y) {
+; CHECK-LABEL: @test8(
+entry:
+ br label %loop_begin
+; CHECK-NEXT: entry:
+; CHECK-NEXT: switch i32 %cond1, label %entry.split [
+; CHECK-NEXT: i32 0, label %loop_exit.split
+; CHECK-NEXT: i32 1, label %loop_exit2
+; CHECK-NEXT: i32 2, label %loop_exit.split
+; CHECK-NEXT: ]
+;
+; CHECK: entry.split:
+; CHECK-NEXT: br label %loop_begin
+
+loop_begin:
+ switch i32 %cond1, label %continue [
+ i32 0, label %loop_exit
+ i32 1, label %loop_exit2
+ i32 2, label %loop_exit
+ ]
+; CHECK: loop_begin:
+; CHECK-NEXT: br label %continue
+
+continue:
+ %var_val = load i32, i32* %var
+ switch i32 %cond2, label %latch [
+ i32 0, label %loop_exit
+ ]
+; CHECK: continue:
+; CHECK-NEXT: load
+; CHECK-NEXT: switch i32 %cond2, label %latch [
+; CHECK-NEXT: i32 0, label %loop_exit
+; CHECK-NEXT: ]
+
+latch:
+ call void @some_func() noreturn nounwind
+ br label %loop_begin
+; CHECK: latch:
+; CHECK-NEXT: call
+; CHECK-NEXT: br label %loop_begin
+
+loop_exit:
+ %result1.1 = phi i32 [ %x, %loop_begin ], [ %x, %loop_begin ], [ %var_val, %continue ]
+ %result1.2 = phi i32 [ %var_val, %continue ], [ %y, %loop_begin ], [ %y, %loop_begin ]
+ %result1 = add i32 %result1.1, %result1.2
+ ret i32 %result1
+; CHECK: loop_exit:
+; CHECK-NEXT: %[[R1:.*]] = phi i32 [ %var_val, %continue ]
+; CHECK-NEXT: %[[R2:.*]] = phi i32 [ %var_val, %continue ]
+; CHECK-NEXT: br label %loop_exit.split
+;
+; CHECK: loop_exit.split:
+; CHECK-NEXT: %[[R1S:.*]] = phi i32 [ %x, %entry ], [ %x, %entry ], [ %[[R1]], %loop_exit ]
+; CHECK-NEXT: %[[R2S:.*]] = phi i32 [ %y, %entry ], [ %y, %entry ], [ %[[R2]], %loop_exit ]
+; CHECK-NEXT: %[[R:.*]] = add i32 %[[R1S]], %[[R2S]]
+; CHECK-NEXT: ret i32 %[[R]]
+
+loop_exit2:
+ %result2.1 = phi i32 [ %x, %loop_begin ]
+ %result2.2 = phi i32 [ %y, %loop_begin ]
+ %result2 = add i32 %result2.1, %result2.2
+ ret i32 %result2
+; CHECK: loop_exit2:
+; CHECK-NEXT: %[[R1:.*]] = phi i32 [ %x, %entry ]
+; CHECK-NEXT: %[[R2:.*]] = phi i32 [ %y, %entry ]
+; CHECK-NEXT: %[[R:.*]] = add i32 %[[R1]], %[[R2]]
+; CHECK-NEXT: ret i32 %[[R]]
+}
diff --git a/test/Transforms/SpeculativeExecution/spec-other.ll b/test/Transforms/SpeculativeExecution/spec-other.ll
deleted file mode 100644
index 65e14b69e9e6..000000000000
--- a/test/Transforms/SpeculativeExecution/spec-other.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: opt < %s -S -speculative-execution \
-; RUN: -spec-exec-max-speculation-cost 4 -spec-exec-max-not-hoisted 3 \
-; RUN: | FileCheck %s
-
-; CHECK-LABEL: @ifThen_extractvalue(
-; CHECK: extractvalue
-; CHECK: br i1 true
-define void @ifThen_extractvalue() {
- br i1 true, label %a, label %b
-
-a:
- %x = extractvalue { i32, i32 } undef, 0
- br label %b
-
-b:
- ret void
-}
-
-; CHECK-LABEL: @ifThen_insertvalue(
-; CHECK: insertvalue
-; CHECK: br i1 true
-define void @ifThen_insertvalue() {
- br i1 true, label %a, label %b
-
-a:
- %x = insertvalue { i32, i32 } undef, i32 undef, 0
- br label %b
-
-b:
- ret void
-}
-
diff --git a/test/Transforms/SpeculativeExecution/spec-vector.ll b/test/Transforms/SpeculativeExecution/spec-vector.ll
deleted file mode 100644
index 9c64f1fb1005..000000000000
--- a/test/Transforms/SpeculativeExecution/spec-vector.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: opt < %s -S -speculative-execution \
-; RUN: -spec-exec-max-speculation-cost 4 -spec-exec-max-not-hoisted 3 \
-; RUN: | FileCheck %s
-
-; CHECK-LABEL: @ifThen_extractelement_constindex(
-; CHECK: extractelement
-; CHECK: br i1 true
-define void @ifThen_extractelement_constindex() {
- br i1 true, label %a, label %b
-
-a:
- %x = extractelement <4 x i32> undef, i32 0
- br label %b
-
-b:
- ret void
-}
-
-; CHECK-LABEL: @ifThen_extractelement_varindex(
-; CHECK: extractelement
-; CHECK: br i1 true
-define void @ifThen_extractelement_varindex(i32 %idx) {
- br i1 true, label %a, label %b
-
-a:
- %x = extractelement <4 x i32> undef, i32 %idx
- br label %b
-
-b:
- ret void
-}
-
-; CHECK-LABEL: @ifThen_insertelement_constindex(
-; CHECK: insertelement
-; CHECK: br i1 true
-define void @ifThen_insertelement_constindex() {
- br i1 true, label %a, label %b
-
-a:
- %x = insertelement <4 x i32> undef, i32 undef, i32 0
- br label %b
-
-b:
- ret void
-}
-
-; CHECK-LABEL: @ifThen_insertelement_varindex(
-; CHECK: insertelement
-; CHECK: br i1 true
-define void @ifThen_insertelement_varindex(i32 %idx) {
- br i1 true, label %a, label %b
-
-a:
- %x = insertelement <4 x i32> undef, i32 undef, i32 %idx
- br label %b
-
-b:
- ret void
-}
-
-; CHECK-LABEL: @ifThen_shufflevector(
-; CHECK: shufflevector
-; CHECK: br i1 true
-define void @ifThen_shufflevector() {
- br i1 true, label %a, label %b
-
-a:
- %x = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> undef
- br label %b
-
-b:
- ret void
-}
diff --git a/test/Transforms/Util/split-bit-piece.ll b/test/Transforms/Util/split-bit-piece.ll
index 3d7bcac73ca3..5a374e839926 100644
--- a/test/Transforms/Util/split-bit-piece.ll
+++ b/test/Transforms/Util/split-bit-piece.ll
@@ -3,43 +3,85 @@
; if it only describes part of the variable.
; RUN: opt -S -sroa %s | FileCheck %s
-; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #0
+; Built from:
+; struct foo { bool b; long i; };
+; void f(bool b, bool expr, foo g) {
+; }
+; And modifying the frag dbg.declare to use a fragmented DIExpression (with offset: 0, size: 4)
+; to test the dbg.declare+fragment case here.
-; Function Attrs: nounwind uwtable
-define hidden void @_ZN6__tsan9FastState14SetHistorySizeEi(i32 %hs) #1 align 2 {
+; Expect two fragments:
+; * first starting at bit 0, 8 bits (for the bool)
+; * second starting at bit 32, 32 bits (for the long)
+; (this happens to create/demonstrate a gap from bits [7, 32))
+
+; But also check that a complex expression is not used for a lone bool
+; parameter. It can reference the register it's in directly without masking off
+; high bits or anything
+
+; CHECK: call void @llvm.dbg.value(metadata i8 %g.coerce0, i64 0, metadata ![[VAR_STRUCT:[0-9]+]], metadata ![[EXPR_STRUCT1:[0-9]+]])
+; CHECK: call void @llvm.dbg.value(metadata i64 %g.coerce1, i64 0, metadata ![[VAR_STRUCT]], metadata ![[EXPR_STRUCT2:[0-9]+]])
+; CHECK: call void @llvm.dbg.value(metadata i1 %b, i64 0, metadata ![[VAR_BOOL:[0-9]+]], metadata ![[EXPR_BOOL:[0-9]+]])
+; CHECK: call void @llvm.dbg.value(metadata i1 %frag, i64 0, metadata ![[FRAG_BOOL:[0-9]+]], metadata ![[FRAG_BOOL:[0-9]+]])
+; CHECK: ![[EXPR_STRUCT1]] = !DIExpression(DW_OP_LLVM_fragment, 0, 8)
+; CHECK: ![[EXPR_STRUCT2]] = !DIExpression(DW_OP_LLVM_fragment, 32, 64)
+; CHECK: ![[EXPR_BOOL]] = !DIExpression()
+; CHECK: ![[FRAG_BOOL]] = !DIExpression(DW_OP_LLVM_fragment, 0, 1)
+
+%struct.foo = type { i8, i64 }
+
+; Function Attrs: noinline nounwind uwtable
+define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) #0 !dbg !6 {
entry:
- %hs.addr = alloca i32, align 4
- %v1 = alloca i64, align 8
- %v2 = alloca i64, align 8
- store i32 %hs, i32* %hs.addr, align 4
-; CHECK: call void @llvm.dbg.value(metadata i32 %hs, i64 0, metadata !{{[0-9]+}}, metadata ![[EXPR:[0-9]+]])
-; CHECK: ![[EXPR]] = !DIExpression(DW_OP_LLVM_fragment, 0
- call void @llvm.dbg.declare(metadata i64* %v1, metadata !9, metadata !12), !dbg !13
- %0 = load i32, i32* %hs.addr, align 4
- %conv = sext i32 %0 to i64
- store i64 %conv, i64* %v1, align 8
- %1 = load i64, i64* %v2, align 8
- unreachable
+ %g = alloca %struct.foo, align 8
+ %b.addr = alloca i8, align 1
+ %frag.addr = alloca i8, align 1
+ %0 = bitcast %struct.foo* %g to { i8, i64 }*
+ %1 = getelementptr inbounds { i8, i64 }, { i8, i64 }* %0, i32 0, i32 0
+ store i8 %g.coerce0, i8* %1, align 8
+ %2 = getelementptr inbounds { i8, i64 }, { i8, i64 }* %0, i32 0, i32 1
+ store i64 %g.coerce1, i64* %2, align 8
+ %frombool = zext i1 %b to i8
+ store i8 %frombool, i8* %b.addr, align 1
+ call void @llvm.dbg.declare(metadata i8* %b.addr, metadata !15, metadata !16), !dbg !17
+ %frombool1 = zext i1 %frag to i8
+ store i8 %frombool1, i8* %frag.addr, align 1
+ call void @llvm.dbg.declare(metadata i8* %frag.addr, metadata !18, metadata !23), !dbg !19
+ call void @llvm.dbg.declare(metadata %struct.foo* %g, metadata !20, metadata !16), !dbg !21
+ ret void, !dbg !22
}
-attributes #0 = { nounwind readnone }
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone speculatable }
!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!7}
-!llvm.ident = !{!8}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 3.8.0 (trunk 256979) (llvm/trunk 257107)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, retainedTypes: !2)
-!1 = !DIFile(filename: "tsan_shadow_test.cc", directory: "/tmp")
-!2 = !{!3, !5}
-!3 = !DICompositeType(tag: DW_TAG_class_type, name: "FastState", file: !4, line: 91, size: 64, align: 64, identifier: "_ZTSN6__tsan9FastStateE")
-!4 = !DIFile(filename: "/mnt/extra/llvm/projects/compiler-rt/lib/tsan/rtl/tsan_rtl.h", directory: "/tmp")
-!5 = distinct !DIDerivedType(tag: DW_TAG_typedef, name: "u64", line: 78, baseType: !6)
-!6 = !DIBasicType(name: "long long unsigned int", size: 64, align: 64, encoding: DW_ATE_unsigned)
-!7 = !{i32 2, !"Debug Info Version", i32 3}
-!8 = !{!"clang version 3.8.0 (trunk 256979) (llvm/trunk 257107)"}
-!9 = !DILocalVariable(name: "v1", scope: !10, file: !4, line: 136, type: !5)
-!10 = distinct !DILexicalBlock(scope: !11, file: !4, line: 136, column: 5)
-!11 = distinct !DISubprogram(name: "SetHistorySize", linkageName: "_ZN6__tsan9FastState14SetHistorySizeEi", scope: !3, file: !4, line: 135, isLocal: false, isDefinition: true, scopeLine: 135, flags: DIFlagPrototyped, isOptimized: false, unit: !0)
-!12 = !DIExpression()
-!13 = !DILocation(line: 136, column: 5, scope: !10)
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 303077) (llvm/trunk 303098)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "foo.cpp", directory: "/usr/local/google/home/blaikie/dev/scratch")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang version 5.0.0 (trunk 303077) (llvm/trunk 303098)"}
+!6 = distinct !DISubprogram(name: "f", linkageName: "_Z1fbb3foo", scope: !1, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{null, !9, !9, !10}
+!9 = !DIBasicType(name: "bool", size: 8, encoding: DW_ATE_boolean)
+!10 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "foo", file: !1, line: 1, size: 128, elements: !11, identifier: "_ZTS3foo")
+!11 = !{!12, !13}
+!12 = !DIDerivedType(tag: DW_TAG_member, name: "b", scope: !10, file: !1, line: 1, baseType: !9, size: 8)
+!13 = !DIDerivedType(tag: DW_TAG_member, name: "i", scope: !10, file: !1, line: 1, baseType: !14, size: 64, offset: 64)
+!14 = !DIBasicType(name: "long int", size: 64, encoding: DW_ATE_signed)
+!15 = !DILocalVariable(name: "b", arg: 1, scope: !6, file: !1, line: 2, type: !9)
+!16 = !DIExpression()
+!17 = !DILocation(line: 2, column: 13, scope: !6)
+!18 = !DILocalVariable(name: "frag", arg: 2, scope: !6, file: !1, line: 2, type: !9)
+!19 = !DILocation(line: 2, column: 21, scope: !6)
+!20 = !DILocalVariable(name: "g", arg: 3, scope: !6, file: !1, line: 2, type: !10)
+!21 = !DILocation(line: 2, column: 31, scope: !6)
+!22 = !DILocation(line: 3, column: 1, scope: !6)
+!23 = !DIExpression(DW_OP_LLVM_fragment, 0, 4)
diff --git a/test/Verifier/metadata-function-dbg.ll b/test/Verifier/metadata-function-dbg.ll
index 24989ed7aa2e..6db40943ec38 100644
--- a/test/Verifier/metadata-function-dbg.ll
+++ b/test/Verifier/metadata-function-dbg.ll
@@ -3,12 +3,18 @@
; CHECK: function declaration may not have a !dbg attachment
declare !dbg !4 void @f1()
-define void @f2() !dbg !4 {
+; CHECK: function must have a single !dbg attachment
+define void @f2() !dbg !4 !dbg !4 {
unreachable
}
-; CHECK: function must have a single !dbg attachment
-define void @f3() !dbg !4 !dbg !4 {
+; CHECK: DISubprogram attached to more than one function
+define void @f3() !dbg !4 {
+ unreachable
+}
+
+; CHECK: DISubprogram attached to more than one function
+define void @f4() !dbg !4 {
unreachable
}
@@ -16,7 +22,7 @@ define void @f3() !dbg !4 !dbg !4 {
; CHECK: function !dbg attachment must be a subprogram
; CHECK-NEXT: void ()* @bar
; CHECK-NEXT: !{{[0-9]+}} = !{}
-define void @bar() !dbg !6 {
+define void @bar() !dbg !3 {
unreachable
}
@@ -26,5 +32,5 @@ define void @bar() !dbg !6 {
!llvm.dbg.cu = !{!1}
!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2)
!2 = !DIFile(filename: "t.c", directory: "/path/to/dir")
+!3 = !{}
!4 = distinct !DISubprogram(name: "foo", scope: !1, file: !2, unit: !1)
-!6 = !{}
diff --git a/test/tools/llvm-pdbdump/Inputs/FilterTest.cpp b/test/tools/llvm-pdbdump/Inputs/FilterTest.cpp
index bcf9360d4a9b..4dd5581e2fca 100644
--- a/test/tools/llvm-pdbdump/Inputs/FilterTest.cpp
+++ b/test/tools/llvm-pdbdump/Inputs/FilterTest.cpp
@@ -10,6 +10,8 @@ public:
void MemberFunc() {}
+ int foo() const { return IntMemberVar; }
+
private:
int IntMemberVar;
double DoubleMemberVar;
@@ -18,10 +20,26 @@ private:
int IntGlobalVar;
double DoubleGlobalVar;
typedef int GlobalTypedef;
+char OneByte;
+char TwoBytes[2];
+char ThreeBytes[3];
+
enum GlobalEnum {
GlobalEnumVal1
} GlobalEnumVar;
+int CFunc() {
+ return (int)OneByte * 2;
+}
+int BFunc() {
+ return 42;
+}
+int AFunc() {
+ static FilterTestClass FC;
+
+ return (CFunc() + BFunc()) * IntGlobalVar + FC.foo();
+}
+
int main(int argc, char **argv) {
FilterTestClass TestClass;
GlobalTypedef v1;
diff --git a/test/tools/llvm-pdbdump/Inputs/FilterTest.pdb b/test/tools/llvm-pdbdump/Inputs/FilterTest.pdb
index 5f01ec701b81..ce7e017f9151 100644
--- a/test/tools/llvm-pdbdump/Inputs/FilterTest.pdb
+++ b/test/tools/llvm-pdbdump/Inputs/FilterTest.pdb
Binary files differ
diff --git a/test/tools/llvm-pdbdump/regex-filter.test b/test/tools/llvm-pdbdump/regex-filter.test
index d2f500e88c33..36c3da33e2e4 100644
--- a/test/tools/llvm-pdbdump/regex-filter.test
+++ b/test/tools/llvm-pdbdump/regex-filter.test
@@ -1,4 +1,4 @@
-; RUN: llvm-pdbdump pretty -symbols -globals -types %p/Inputs/FilterTest.pdb \
+; RUN: llvm-pdbdump pretty -module-syms -globals -types %p/Inputs/FilterTest.pdb \
; RUN: | FileCheck --check-prefix=NO_FILTER %s
; RUN: llvm-pdbdump pretty -types -exclude-types="GlobalTypedef|NestedTypedef" \
@@ -11,15 +11,15 @@
; RUN: llvm-pdbdump pretty -classes -typedefs %p/Inputs/FilterTest.pdb \
; RUN: | FileCheck --check-prefix=EXCLUDE_ENUMS %s
-; RUN: llvm-pdbdump pretty -types -symbols -globals -exclude-symbols="MemberVar|GlobalVar" \
+; RUN: llvm-pdbdump pretty -types -module-syms -globals -exclude-symbols="MemberVar|GlobalVar" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=EXCLUDE_VARS %s
; RUN: llvm-pdbdump pretty -types -exclude-types="FilterTestClass" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=EXCLUDE_WHOLE_CLASS %s
-; RUN: llvm-pdbdump pretty -symbols -globals -exclude-compilands="FilterTest.obj" \
+; RUN: llvm-pdbdump pretty -module-syms -globals -exclude-compilands="FilterTest.obj" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=EXCLUDE_COMPILAND %s
; RUN: llvm-pdbdump pretty -types -include-types="FilterTestClass" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=INCLUDE_ONLY_TYPES %s
-; RUN: llvm-pdbdump pretty -types -symbols -globals -include-symbols="[[:<:]](IntGlobalVar|DoubleGlobalVar)[[:>:]]" \
+; RUN: llvm-pdbdump pretty -types -module-syms -globals -include-symbols="[[:<:]](IntGlobalVar|DoubleGlobalVar)[[:>:]]" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=INCLUDE_ONLY_VARS %s
; NO_FILTER: ---TYPES---
diff --git a/test/tools/llvm-pdbdump/symbol-filters.test b/test/tools/llvm-pdbdump/symbol-filters.test
new file mode 100644
index 000000000000..d12d2aa8be0f
--- /dev/null
+++ b/test/tools/llvm-pdbdump/symbol-filters.test
@@ -0,0 +1,74 @@
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=data %p/Inputs/FilterTest.pdb \
+; RUN: | FileCheck --check-prefix=ONLY_DATA %s
+
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=thunks %p/Inputs/FilterTest.pdb \
+; RUN: | FileCheck --check-prefix=ONLY_THUNKS %s
+
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=funcs %p/Inputs/FilterTest.pdb \
+; RUN: | FileCheck --check-prefix=ONLY_FUNCS %s
+
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=funcs -sym-types=data \
+; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=TWO_TYPES %s
+
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=data \
+; RUN: -symbol-order=name %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=NAME_SORT_DATA %s
+
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=data \
+; RUN: -symbol-order=size %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=SIZE_SORT_DATA %s
+
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=funcs \
+; RUN: -symbol-order=name %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=NAME_SORT_FUNCS %s
+
+; RUN: llvm-pdbdump pretty -globals -module-syms -sym-types=funcs \
+; RUN: -symbol-order=size %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=SIZE_SORT_FUNCS %s
+
+; ONLY_DATA-NOT: func
+; ONLY_DATA-NOT: thunk
+; ONLY_DATA-DAG: data {{.*}} static char OneByte
+; ONLY_DATA-DAG: data {{.*}} static double DoubleGlobalVar
+; ONLY_DATA-DAG: data {{.*}} static char TwoBytes[2]
+; ONLY_DATA-DAG: data {{.*}} static char ThreeBytes[3]
+; ONLY_DATA-DAG: data {{.*}} static int IntGlobalVar
+; ONLY_DATA-DAG: data {{.*}} static GlobalEnum GlobalEnumVar
+
+; ONLY_FUNCS-NOT: data
+; ONLY_FUNCS-NOT: thunk
+; ONLY_FUNCS: func {{.*}} int __cdecl main(int argc, char** argv)
+; ONLY_FUNCS: func {{.*}} int __cdecl CFunc()
+; ONLY_FUNCS: func {{.*}} int __cdecl BFunc()
+; ONLY_FUNCS: func {{.*}} int __cdecl AFunc()
+; ONLY_FUNCS: func {{.*}} int FilterTestClass::foo()
+
+; ONLY_THUNKS-NOT: func
+; ONLY_THUNKS-NOT: data
+; ONLY_THUNKS-DAG: thunk {{.*}} (TrampIncremental)
+
+; TWO_TYPES-NOT: thunk
+; TWO_TYPES-DAG: func {{.*}} int __cdecl main(int argc, char** argv)
+; TWO_TYPES-DAG: data {{.*}} static double DoubleGlobalVar
+
+; NAME_SORT_DATA: data {{.*}} static double DoubleGlobalVar
+; NAME_SORT_DATA: data {{.*}} static GlobalEnum GlobalEnumVar
+; NAME_SORT_DATA: data {{.*}} static int IntGlobalVar
+; NAME_SORT_DATA: data {{.*}} static char OneByte
+; NAME_SORT_DATA: data {{.*}} static char ThreeBytes[3]
+; NAME_SORT_DATA: data {{.*}} static char TwoBytes[2]
+
+; SIZE_SORT_DATA: data {{.*}}sizeof=8{{.*}}double DoubleGlobalVar
+; SIZE_SORT_DATA-DAG: data {{.*}}sizeof=4{{.*}}GlobalEnum GlobalEnumVar
+; SIZE_SORT_DATA-DAG: data {{.*}}sizeof=4{{.*}}int IntGlobalVar
+; SIZE_SORT_DATA: data {{.*}}sizeof=3{{.*}}char ThreeBytes[3]
+; SIZE_SORT_DATA: data {{.*}}sizeof=2{{.*}}char TwoBytes[2]
+; SIZE_SORT_DATA: data {{.*}}sizeof=1{{.*}}char OneByte
+
+; NAME_SORT_FUNCS: func {{.*}}sizeof= 40{{.*}}AFunc
+; NAME_SORT_FUNCS: func {{.*}}sizeof= 10{{.*}}BFunc
+; NAME_SORT_FUNCS: func {{.*}}sizeof= 14{{.*}}CFunc
+; NAME_SORT_FUNCS: func {{.*}}sizeof= 16{{.*}}FilterTestClass::foo
+; NAME_SORT_FUNCS: func {{.*}}sizeof= 7{{.*}}main
+
+; SIZE_SORT_FUNCS: func {{.*}}sizeof= 40{{.*}}AFunc
+; SIZE_SORT_FUNCS: func {{.*}}sizeof= 16{{.*}}FilterTestClass::foo
+; SIZE_SORT_FUNCS: func {{.*}}sizeof= 14{{.*}}CFunc
+; SIZE_SORT_FUNCS: func {{.*}}sizeof= 10{{.*}}BFunc
+; SIZE_SORT_FUNCS: func {{.*}}sizeof= 7{{.*}}main
diff --git a/test/tools/llvm-profdata/sample-profile-basic.test b/test/tools/llvm-profdata/sample-profile-basic.test
index 211d8c5bbd84..3ba42c20f2e8 100644
--- a/test/tools/llvm-profdata/sample-profile-basic.test
+++ b/test/tools/llvm-profdata/sample-profile-basic.test
@@ -25,9 +25,10 @@ RUN: diff %t-binary %t-text
counters have doubled.
RUN: llvm-profdata merge --sample %p/Inputs/sample-profile.proftext -o %t-binprof
RUN: llvm-profdata merge --sample --text %p/Inputs/sample-profile.proftext %t-binprof -o - | FileCheck %s --check-prefix=MERGE1
-MERGE1-DAG: main:368038:0
-MERGE1-DAG: 9: 4128 _Z3fooi:1262 _Z3bari:2942
-MERGE1-DAG: _Z3fooi:15422:1220
+MERGE1: main:368038:0
+MERGE1: 9: 4128 _Z3fooi:1262 _Z3bari:2942
+MERGE1: _Z3bari:40602:2874
+MERGE1: _Z3fooi:15422:1220
5- Detect invalid text encoding (e.g. instrumentation profile text format).
RUN: not llvm-profdata show --sample %p/Inputs/foo3bar3-1.proftext 2>&1 | FileCheck %s --check-prefix=BADTEXT
diff --git a/test/tools/llvm-readobj/wasm-invalid.test b/test/tools/llvm-readobj/wasm-invalid.test
new file mode 100644
index 000000000000..d500d582ca03
--- /dev/null
+++ b/test/tools/llvm-readobj/wasm-invalid.test
@@ -0,0 +1,7 @@
+# RUN: yaml2obj %s | not llvm-readobj -t - 2>&1 | FileCheck %s
+
+--- !WASM
+FileHeader:
+ Version: 0x0000000c
+
+# CHECK: Error reading file: <stdin>: Bad version number